Ejemplo n.º 1
0
def main():
    loop = asyncio.get_event_loop()
    # Connect to the demo host at memoria.ndn.ucla.edu .
    face = ThreadsafeFace(loop, "128.97.98.8")

    # Counter will stop the ioService after callbacks for all expressInterest.
    counter = Counter(loop, 3)

    # Try to fetch anything.
    name1 = Name("/")
    dump("Express name ", name1.toUri())
    # These call to exressIinterest is thread safe because face is a ThreadsafeFace.
    face.expressInterest(name1, counter.onData, counter.onTimeout)

    # Try to fetch using a known name.
    name2 = Name("/ndn/edu/ucla/remap/demo/ndn-js-test/hello.txt/%FDX%DC5%1F")
    dump("Express name ", name2.toUri())
    face.expressInterest(name2, counter.onData, counter.onTimeout)

    # Expect this to time out.
    name3 = Name("/test/timeout")
    dump("Express name ", name3.toUri())
    face.expressInterest(name3, counter.onData, counter.onTimeout)

    # Run until the Counter calls stop().
    loop.run_forever()
    face.shutdown()
Ejemplo n.º 2
0
    def start(self):
        """
        Begins the event loop. After this, the node's Face is set up and it can
        send/receive interests+data
        """
        self.log.info("Starting up")
        self.loop = asyncio.get_event_loop()
        
        if (self.faceTransport == None or self.faceTransport == ''):
            self.face = ThreadsafeFace(self.loop)
        else:
            self.face = ThreadsafeFace(self.loop, self.faceTransport, self.faceConn)
        
        self.face.setCommandSigningInfo(self._keyChain, self.getDefaultCertificateName())
        
        self._keyChain.setFace(self.face)

        self._isStopped = False
        self.beforeLoopStart()
        
        try:
            self.loop.run_forever()
        except Exception as e:
            self.log.exception(exc_info=True)
        finally:
            self.stop()
Ejemplo n.º 3
0
def main():
    # Silence the warning from Interest wire encode.
    Interest.setDefaultCanBePrefix(True)

    loop = asyncio.get_event_loop()
    face = ThreadsafeFace(loop, "memoria.ndn.ucla.edu")

    # Counter will stop the ioService after callbacks for all expressInterest.
    counter = Counter(loop, 3)

    # Try to fetch anything.
    name1 = Name("/")
    dump("Express name ", name1.toUri())
    # These call to exressIinterest is thread safe because face is a ThreadsafeFace.
    face.expressInterest(name1, counter.onData, counter.onTimeout)

    # Try to fetch using a known name.
    name2 = Name("/ndn/edu/ucla/remap/demo/ndn-js-test/hello.txt/%FDU%8D%9DM")
    dump("Express name ", name2.toUri())
    face.expressInterest(name2, counter.onData, counter.onTimeout)

    # Expect this to time out.
    name3 = Name("/test/timeout")
    dump("Express name ", name3.toUri())
    face.expressInterest(name3, counter.onData, counter.onTimeout)

    # Run until the Counter calls stop().
    loop.run_forever()
    face.shutdown()
Ejemplo n.º 4
0
    def start(self):
        """
        Begins the event loop. After this, the node's Face is set up and it can
        send/receive interests+data
        """
        self.log.info("Starting up")
        self.loop = asyncio.get_event_loop()
        
        if (self.faceTransport == None or self.faceTransport == ''):
            self.face = ThreadsafeFace(self.loop)
        else:
            self.face = ThreadsafeFace(self.loop, self.faceTransport, self.faceConn)
        
        self.face.setCommandSigningInfo(self._keyChain, self.getDefaultCertificateName())
        
        self._keyChain.setFace(self.face)

        self._isStopped = False
        self.beforeLoopStart()
        
        try:
            self.loop.run_forever()
        except Exception as e:
            self.log.exception(exc_info=True)
        finally:
            self.stop()
Ejemplo n.º 5
0
 def __init__(self, host):
     self.__name__ = __name__
     self.keyChain = KeyChain()
     self.isDone = False
     self.counter = 0
     loop = asyncio.get_event_loop()
     self.face = ThreadsafeFace(loop, host)
     self.a = {}
     self.methods = {}
Ejemplo n.º 6
0
class NdnClient:
    def __init__(self,localhub_ip):
        self.loop = asyncio.get_event_loop()
        self.face = ThreadsafeFace(self.loop, localhub_ip)
         
    def request(self,req,callback,content= None, mustbefresh= False):
        baseName = ndn.Name(req)
        interest = ndn.Interest(ndn.Name(baseName))
        interest.setMustBeFresh(mustbefresh)
        self.callback = callback
        if content != None:
         interest.setContent(content)
        self.face.expressInterest(interest, self._onData, self._onTimeout)
        self.loop.run_forever()
    
    def _onData(self, interest, data):
        self.callback(data.content)
        self.loop.stop()
    def destroy(self):
        self.face.shutdown()
    def _onTimeout(self, interest):
        print >> "request timeout">> interest.name.toUri()
Ejemplo n.º 7
0
def main(): 
    # Params parsing
    parser = argparse.ArgumentParser(description='bms gateway node to Parse or follow Cascade Datahub log and publish to MiniNdn.')
    parser.add_argument('filename', help='datahub log file')
    parser.add_argument('-f', dest='follow', action='store_true', help='follow (tail -f) the log file')  
    parser.add_argument('--namespace', default='/ndn/edu/ucla/remap/bms', help='root of ndn name, no trailing slash')
    args = parser.parse_args()
    
    # Setup logging
    logger = Logger()
    logger.prepareLogging()

    # Face, KeyChain, memoryContentCache and asio event loop initialization
    loop = asyncio.get_event_loop()
    face = ThreadsafeFace(loop, "128.97.98.7")

    keyChain = KeyChain(IdentityManager(BasicIdentityStorage(), FilePrivateKeyStorage()))
    # For the gateway publisher, we create one identity for it to sign nfd command interests
    #certificateName = keyChain.createIdentityAndCertificate(Name("/ndn/bms/gateway-publisher"))
    face.setCommandSigningInfo(keyChain, keyChain.getDefaultCertificateName())
    print "Using certificate name " + keyChain.getDefaultCertificateName().toUri()
    cache = MemoryContentCache(face)

    dataPublisher = DataPublisher(face, keyChain, loop, cache, args.namespace)
    cache.registerPrefix(Name(args.namespace), dataPublisher.onRegisterFailed, dataPublisher.onDataNotFound)
    
    # Parse csv to decide the mapping between sensor JSON -> <NDN name, data type>
    dataPublisher.populateSensorNDNDictFromCSV('bms-sensor-data-types-sanitized.csv')

    loop.call_later(dataPublisher._restartInterval, dataPublisher.checkAlive)
    if args.follow: 
        #asyncio.async(loop.run_in_executor(executor, followfile, args.filename, args.namespace, cache))
        loop.run_until_complete(dataPublisher.followfile(args.filename))
    else:
        loop.run_until_complete(dataPublisher.readfile(args.filename))
        
    loop.run_forever()
    face.shutdown()
def main():
    # Params parsing
    parser = argparse.ArgumentParser(
        description=
        'bms gateway node to Parse or follow Cascade Datahub log and publish to MiniNdn.'
    )
    parser.add_argument('filename', help='datahub log file')
    parser.add_argument('-f',
                        dest='follow',
                        action='store_true',
                        help='follow (tail -f) the log file')
    parser.add_argument('--namespace',
                        default='/ndn/nist/bms',
                        help='root of ndn name, no trailing slash')

    parser.add_argument('--image',
                        dest='image',
                        default='../simulator/res/floor2.jpg',
                        help='the floor plan to publish')
    parser.add_argument('--location',
                        dest='location',
                        default='../simulator/res/locations.txt',
                        help='the floor plan to publish')

    args = parser.parse_args()

    # Setup logging
    logger = Logger()
    logger.prepareLogging()

    # Face, KeyChain, memoryContentCache and asio event loop initialization
    loop = asyncio.get_event_loop()
    face = ThreadsafeFace(loop)

    keyChain = KeyChain(IdentityManager(BasicIdentityStorage()))
    # For the gateway publisher, we create one identity for it to sign nfd command interests
    certificateName = keyChain.createIdentityAndCertificate(
        Name("/ndn/nist/gateway"))
    face.setCommandSigningInfo(keyChain, certificateName)
    cache = MemoryContentCache(face)

    dataPublisher = DataPublisher(face, keyChain, loop, cache, args.namespace,
                                  args.image, args.location)
    cache.registerPrefix(Name(args.namespace), dataPublisher.onRegisterFailed,
                         dataPublisher.onDataNotFound)
    loop.run_until_complete(dataPublisher.publishFloorImage())

    if args.follow:
        #asyncio.async(loop.run_in_executor(executor, followfile, args.filename, args.namespace, cache))
        loop.run_until_complete(dataPublisher.followfile(args.filename))
    else:
        loop.run_until_complete(dataPublisher.readfile(args.filename))

    loop.run_forever()
    face.shutdown()
Ejemplo n.º 9
0
    def __init__(self, applyEDLAdjustment=True):
        # prepare trollius logging
        self.prepareLogging()

        self._events = dict()
        self._running = False
        self._applyEDLAdjustment = applyEDLAdjustment

        # NDN related variables
        self._loop = asyncio.get_event_loop()
        self._face = ThreadsafeFace(self._loop)

        # Use the system default key chain and certificate name to sign commands.
        self._keyChain = KeyChain()
        self._keyChain.setFace(self._face)
        self._certificateName = self._keyChain.getDefaultCertificateName()
        self._face.setCommandSigningInfo(self._keyChain, self._certificateName)
        self._memoryContentCache = MemoryContentCache(self._face)

        # Publishing parameters conf  iguration
        self._translationServiceUrl = "http://the-archive.la/losangeles/services/get-youtube-url"
        self._namePrefixString = "/ndn/edu/ucla/remap/test/edl/"

        self._dataLifetime = 2000
        self._publishBeforeSeconds = 3
        self._translateBeforeSeconds = 60
        self._currentIdx = 0

        # Youtube related variables:
        # Channel Global song: UCSMJaKICZKXkpvr7Gj8pPUg
        # Channel Los Angeles: UCeuQoBBzMW6SWkxd8_1I8NQ
        # self._channelID = 'UCSMJaKICZKXkpvr7Gj8pPUg'
        self._channelID = "UCSMJaKICZKXkpvr7Gj8pPUg"
        self._accessKey = "AIzaSyCe8t7PnmWjMKZ1gBouhP1zARpqNwHAs0s"
        # queryStr = 'https://www.googleapis.com/youtube/v3/videos?part=snippet,contentDetails,statistics,status&key=' + apiKey + '&id='
        # Video query example
        # https://www.googleapis.com/youtube/v3/videos?part=snippet,contentDetails,statistics,status&key=AIzaSyDUY_AX1iJQcwCW1mASEp5GcLtq1V9BM1Q&id=_ebELPKANxo
        # Channel query example
        # https://www.googleapis.com/youtube/v3/search?key=AIzaSyCe8t7PnmWjMKZ1gBouhP1zARpqNwHAs0s&channelId=UCSMJaKICZKXkpvr7Gj8pPUg&part=snippet,id&order=date&maxResults=20
        self._videoUrlDict = dict()

        self._edlAdjustmentDict = dict()
        return
Ejemplo n.º 10
0
def main():
    # Params parsing
    parser = argparse.ArgumentParser(
        description=
        'bms gateway node to Parse or follow Cascade Datahub log and publish to MiniNdn.'
    )
    parser.add_argument('filename', help='datahub log file')
    parser.add_argument('-f',
                        dest='follow',
                        action='store_true',
                        help='follow (tail -f) the log file')
    parser.add_argument('--namespace',
                        default='/ndn/edu/ucla/remap/bms',
                        help='root of ndn name, no trailing slash')
    args = parser.parse_args()

    # Setup logging
    logger = Logger()
    logger.prepareLogging()

    # Face, KeyChain, memoryContentCache and asio event loop initialization
    loop = asyncio.get_event_loop()
    face = ThreadsafeFace(loop, "128.97.98.7")

    keyChain = KeyChain(
        IdentityManager(BasicIdentityStorage(), FilePrivateKeyStorage()))
    # For the gateway publisher, we create one identity for it to sign nfd command interests
    #certificateName = keyChain.createIdentityAndCertificate(Name("/ndn/bms/gateway-publisher"))
    face.setCommandSigningInfo(keyChain, keyChain.getDefaultCertificateName())
    print "Using certificate name " + keyChain.getDefaultCertificateName(
    ).toUri()
    cache = MemoryContentCache(face)

    dataPublisher = DataPublisher(face, keyChain, loop, cache, args.namespace)
    cache.registerPrefix(Name(args.namespace), dataPublisher.onRegisterFailed,
                         dataPublisher.onDataNotFound)

    # Parse csv to decide the mapping between sensor JSON -> <NDN name, data type>
    dataPublisher.populateSensorNDNDictFromCSV(
        'bms-sensor-data-types-sanitized.csv')

    loop.call_later(dataPublisher._restartInterval, dataPublisher.checkAlive)
    if args.follow:
        #asyncio.async(loop.run_in_executor(executor, followfile, args.filename, args.namespace, cache))
        loop.run_until_complete(dataPublisher.followfile(args.filename))
    else:
        loop.run_until_complete(dataPublisher.readfile(args.filename))

    loop.run_forever()
    face.shutdown()
Ejemplo n.º 11
0
    def startPublishing(self):
        # One-time security setup
        self.prepareLogging()

        privateKeyStorage = FilePrivateKeyStorage()
        identityStorage = BasicIdentityStorage()
        policyManager = ConfigPolicyManager(self._trustSchemaFile)

        self._keyChain = KeyChain(
            IdentityManager(identityStorage, privateKeyStorage), policyManager)
        self._certificateName = self._keyChain.createIdentityAndCertificate(
            self._identityName)

        print("My Identity name: " + self._identityName.toUri())
        print("My certificate name: " + self._certificateName.toUri())
        certificateData = self._keyChain.getIdentityManager(
        )._identityStorage.getCertificate(self._certificateName)
        print("My certificate string: " +
              b64encode(certificateData.wireEncode().toBuffer()))
        # self._keyChain.getIdentityCertificate(self._certificateName).)

        self._loop = asyncio.get_event_loop()
        self._face = ThreadsafeFace(self._loop)
        self._keyChain.setFace(self._face)

        self._face.setCommandSigningInfo(self._keyChain, self._certificateName)
        self._memoryContentCache = MemoryContentCache(self._face)

        # We should only ask for cert to be signed upon the first run of a certain aggregator
        if DO_CERT_SETUP:
            if (KeyLocator.getFromSignature(
                    certificateData.getSignature()).getKeyName().equals(
                        self._certificateName.getPrefix(-1))):
                # Need to configure for mini-ndn; aggregation node runs outside of mini-ndn first so that signed cert get installed and mini-ndn won't ask for this again
                print("certificate " + self._certificateName.toUri() +
                      " asking for signature")
                response = urllib2.urlopen(
                    "http://192.168.56.1:5000/bms-cert-hack?cert=" +
                    b64encode(certificateData.wireEncode().toBuffer()) +
                    "&cert_prefix=" + self._identityName.toUri() +
                    '&subject_name=' + self._identityName.toUri()).read()

                signedCertData = Data()
                signedCertData.wireDecode(Blob(b64decode(response)))

                self._memoryContentCache.add(signedCertData)
                cmdline = ['ndnsec-install-cert', '-']
                p = subprocess.Popen(cmdline,
                                     stdin=subprocess.PIPE,
                                     stdout=subprocess.PIPE)
                # desanitize + sign in GET request
                cert, err = p.communicate(response)
                if p.returncode != 0:
                    raise RuntimeError("ndnsec-install-cert error")
            else:
                self._memoryContentCache.add(certificateData)
        else:
            self._memoryContentCache.add(certificateData)

        dataNode = self.conf.getDataNode()
        childrenNode = self.conf.getChildrenNode()

        self._memoryContentCache.registerPrefix(Name(self._identityName),
                                                self.onRegisterFailed,
                                                self.onDataNotFound)

        # For each type of data, we refresh each type of aggregation according to the interval in the configuration
        for i in range(len(dataNode.subtrees)):
            dataType = dataNode.subtrees.keys()[i]
            aggregationParams = self.conf.getProducingParamsForAggregationType(
                dataNode.subtrees.items()[i][1])

            if childrenNode == None:
                self._dataQueue[dataType] = DataQueue(None, None, None)
                self.generateData(dataType, 2, 0)

            for aggregationType in aggregationParams:
                childrenList = OrderedDict()
                if childrenNode != None:

                    for j in range(len(childrenNode.subtrees)):
                        if dataType in childrenNode.subtrees.items(
                        )[j][1].subtrees['data'].subtrees:
                            if aggregationType in childrenNode.subtrees.items(
                            )[j][1].subtrees['data'].subtrees[
                                    dataType].subtrees:
                                childrenList[childrenNode.subtrees.items()[j][
                                    0]] = self.conf.getProducingParamsForAggregationType(
                                        childrenNode.subtrees.items()[j]
                                        [1].subtrees['data'].subtrees[dataType]
                                    )[aggregationType]

                self.startPublishingAggregation(
                    aggregationParams[aggregationType], childrenList, dataType,
                    aggregationType)
        return
Ejemplo n.º 12
0
class BmsNode(object):
    def __init__(self):
        self.conf = None
        self._keyChain = None
        self._certificateName = None

        self._dataQueue = dict()
        self._memoryContentCache = None
        self._identityName = None

        self._aggregation = Aggregation()

    def setConfiguration(self, fileName, trustSchemaFile):
        self.conf = BoostInfoParser()
        self.conf.read(fileName)
        self._identityName = Name(self.conf.getNodePrefix())
        self._trustSchemaFile = trustSchemaFile

    def onDataNotFound(self, prefix, interest, face, interestFilterId, filter):
        #print('Data not found for ' + interest.getName().toUri())
        return

    def startPublishing(self):
        # One-time security setup
        self.prepareLogging()

        privateKeyStorage = FilePrivateKeyStorage()
        identityStorage = BasicIdentityStorage()
        policyManager = ConfigPolicyManager(self._trustSchemaFile)

        self._keyChain = KeyChain(IdentityManager(identityStorage, privateKeyStorage), policyManager)
        self._certificateName = self._keyChain.createIdentityAndCertificate(self._identityName)

        print("My Identity name: " + self._identityName.toUri())
        print("My certificate name: " + self._certificateName.toUri())
        certificateData = self._keyChain.getIdentityManager()._identityStorage.getCertificate(self._certificateName, True)
        print("My certificate string: " + b64encode(certificateData.wireEncode().toBuffer()))
        # self._keyChain.getIdentityCertificate(self._certificateName).)

        self._loop = asyncio.get_event_loop()
        self._face = ThreadsafeFace(self._loop)
        self._keyChain.setFace(self._face)

        self._face.setCommandSigningInfo(self._keyChain, self._certificateName)
        self._memoryContentCache = MemoryContentCache(self._face)

        # We should only ask for cert to be signed upon the first run of a certain aggregator
        if DO_CERT_SETUP:
            if (KeyLocator.getFromSignature(certificateData.getSignature()).getKeyName().equals(self._certificateName.getPrefix(-1))):
                # Need to configure for mini-ndn; aggregation node runs outside of mini-ndn first so that signed cert get installed and mini-ndn won't ask for this again
                print("certificate " + self._certificateName.toUri() + " asking for signature")
                response = urllib2.urlopen("http://192.168.56.1:5000/bms-cert-hack?cert=" + b64encode(certificateData.wireEncode().toBuffer()) + "&cert_prefix=" + self._identityName.toUri() + '&subject_name=' + self._identityName.toUri()).read()
                
                signedCertData = Data()
                signedCertData.wireDecode(Blob(b64decode(response)))

                self._memoryContentCache.add(signedCertData)
                cmdline = ['ndnsec-install-cert', '-']
                p = subprocess.Popen(cmdline, stdin=subprocess.PIPE, stdout=subprocess.PIPE)
                # desanitize + sign in GET request
                cert, err = p.communicate(response)
                if p.returncode != 0:
                    raise RuntimeError("ndnsec-install-cert error")
            else:
                self._memoryContentCache.add(certificateData)
        else:
            self._memoryContentCache.add(certificateData)

        dataNode = self.conf.getDataNode()
        childrenNode = self.conf.getChildrenNode()

        self._memoryContentCache.registerPrefix(Name(self._identityName), self.onRegisterFailed, self.onDataNotFound)

        # For each type of data, we refresh each type of aggregation according to the interval in the configuration
        for i in range(len(dataNode.subtrees)):
            dataType = dataNode.subtrees.keys()[i]
            aggregationParams = self.conf.getProducingParamsForAggregationType(dataNode.subtrees.items()[i][1])

            if childrenNode == None:
                self._dataQueue[dataType] = DataQueue(None, None, None)
                self.generateData(dataType, 2, 0)

            for aggregationType in aggregationParams:
                childrenList = OrderedDict()
                if childrenNode != None:

                    for j in range(len(childrenNode.subtrees)):
                        if dataType in childrenNode.subtrees.items()[j][1].subtrees['data'].subtrees:
                            if aggregationType in childrenNode.subtrees.items()[j][1].subtrees['data'].subtrees[dataType].subtrees:
                                childrenList[childrenNode.subtrees.items()[j][0]] = self.conf.getProducingParamsForAggregationType(childrenNode.subtrees.items()[j][1].subtrees['data'].subtrees[dataType])[aggregationType]

                self.startPublishingAggregation(aggregationParams[aggregationType], childrenList, dataType, aggregationType)
        return

    def startPublishingAggregation(self, params, childrenList, dataType, aggregationType):
        if __debug__:
            print('Start publishing for ' + dataType + '-' + aggregationType)
        
        # aggregation calculating and publishing mechanism
        publishingPrefix = Name(self._identityName).append(DATA_COMPONENT).append(dataType).append(AGGREGATION_COMPONENT).append(aggregationType)
        self._dataQueue[dataType + aggregationType] = DataQueue(params, childrenList, publishingPrefix)

        if len(childrenList.keys()) == 0:
            # TODO: make start_time optional for leaf nodes
            self._loop.call_later(int(params['producer_interval']), self.calculateAggregation, dataType, aggregationType, childrenList, int(params['start_time']), int(params['producer_interval']), publishingPrefix, True)
        else:
            # express interest for children who produce the same data and aggregation type
            for childName in childrenList.keys():
                name = Name(self._identityName).append(childName).append(DATA_COMPONENT).append(dataType).append(AGGREGATION_COMPONENT).append(aggregationType)
                interest = Interest(name)
                # if start_time is specified, we ask for data starting at start_time; 
                # if not, we ask for the right most child and go from there
                if ('start_time' in childrenList[childName]):
                    endTime = int(childrenList[childName]['start_time']) + int(childrenList[childName]['producer_interval'])
                    interest.getName().append(str(childrenList[childName]['start_time'])).append(str(endTime))
                else:
                    # TODO: For now we are playing with historical data, for each run we don't want to miss any data, thus we start with leftMost
                    interest.setChildSelector(0)
                    interest.setMustBeFresh(True)
                interest.setInterestLifetimeMilliseconds(DEFAULT_INTEREST_LIFETIME)
                if __debug__:
                    print('  Issue interest: ' + interest.getName().toUri())
                self._face.expressInterest(interest, self.onData, self.onTimeout)

        return

    # TODO: once one calculation's decided a child has not answered, we should do another calculation
    def calculateAggregation(self, dataType, aggregationType, childrenList, startTime, interval, publishingPrefix, repeat = False):
        doCalc = True
        dataList = []

        # TODO: an intermediate node cannot produce raw data for now
        if len(childrenList.keys()) != 0:
            for childName in childrenList.keys():
                dataDictKey = self.getDataDictKey(startTime, (startTime + interval), childName)
                if dataDictKey in self._dataQueue[dataType + aggregationType]._dataDict:
                    data = self._dataQueue[dataType + aggregationType]._dataDict[dataDictKey]
                    dataList.append(float(data.getContent().toRawStr()))
                else:
                    #print('Child ' + childName + ' has not replied yet')
                    doCalc = False
                    break
        else:
            for inst in self._dataQueue[dataType]._dataDict.keys():
                if int(inst) >= startTime and int(inst) < startTime + interval:
                    dataList.append(self._dataQueue[dataType]._dataDict[inst])
        if doCalc:
            content = self._aggregation.getAggregation(aggregationType, dataList)
            if content:
                publishData = Data(Name(publishingPrefix).append(str(startTime)).append(str(startTime + interval)))
                publishData.setContent(str(content))
                publishData.getMetaInfo().setFreshnessPeriod(DEFAULT_DATA_LIFETIME)
                self._keyChain.sign(publishData, self._certificateName)
                self._memoryContentCache.add(publishData)
                for childName in childrenList.keys():
                    dataDictKey = self.getDataDictKey(startTime, (startTime + interval), childName)
                    if dataDictKey in self._dataQueue[dataType + aggregationType]._dataDict:
                        del self._dataQueue[dataType + aggregationType]._dataDict[dataDictKey]
                if __debug__:
                    print("Produced: " + publishData.getName().toUri() + "; " + publishData.getContent().toRawStr())

        # repetition of this function only happens for raw data producer, otherwise calculateAggregation is called by each onData
        if repeat:
            self._loop.call_later(interval, self.calculateAggregation, dataType, aggregationType, childrenList, startTime + interval, interval, publishingPrefix, repeat)
        return

    def generateData(self, dataType, interval, startTime):
        self._dataQueue[dataType]._dataDict[str(startTime)] = random.randint(0,9)
        self._loop.call_later(interval, self.generateData, dataType, interval, startTime + interval)
        return

    def onRegisterFailed(self, prefix):
        raise RuntimeError("Register failed for prefix", prefix.toUri())

    def onVerified(self, data):
        print('Data verified: ' + data.getName().toUri())
        return

    def onVerifyFailed(self, data):
        print('Data verification failed: ' + data.getName().toUri())
        return

    def onData(self, interest, data):
        self._keyChain.verifyData(data, self.onVerified, self.onVerifyFailed)

        dataName = data.getName()
        dataQueue = None

        if __debug__:
            print("Got data: " + dataName.toUri() + "; " + data.getContent().toRawStr())
        for i in range(0, len(dataName)):
            if dataName.get(i).toEscapedString() == AGGREGATION_COMPONENT:
                dataType = dataName.get(i - 1).toEscapedString()
                aggregationType = dataName.get(i + 1).toEscapedString()
                
                startTime = int(dataName.get(i + 2).toEscapedString())
                endTime = int(dataName.get(i + 3).toEscapedString())
                childName = dataName.get(i - 3).toEscapedString()

                dataAndAggregationType = dataType + aggregationType
                
                dataDictKey = self.getDataDictKey(startTime, endTime, childName)
                dataQueue = self._dataQueue[dataAndAggregationType]
                dataQueue._dataDict[dataDictKey] = data
                break

        # TODO: check what if interval/starttime is misconfigured
        if dataQueue:
            self.calculateAggregation(dataType, aggregationType, dataQueue._childrenList, startTime, endTime - startTime, dataQueue._publishingPrefix)

        # Always ask for the next piece of data when we receive this one; assumes interval does not change; this also assumes there are no more components after endTime
        #newInterestName = dataName.getPrefix(i + 2).append(str(endTime)).append(str(endTime + (endTime - startTime)))
        
        # We don't expect aggregated data name to be continuous within our given time window, so we ask with exclusion instead
        newInterestName = dataName.getPrefix(i + 2)
        newInterest = Interest(interest)
        newInterest.setName(newInterestName)
        newInterest.setChildSelector(0)

        exclude = Exclude()
        exclude.appendAny()
        exclude.appendComponent(dataName.get(i + 2))
        newInterest.setExclude(exclude)

        self._face.expressInterest(newInterest, self.onData, self.onTimeout)
        if __debug__:
            print("  issue interest: " + interest.getName().toUri())

        return

    def onTimeout(self, interest):
        if __debug__:
            print("  interest timeout: " + interest.getName().toUri() + "; reexpress")
            pass
        self._face.expressInterest(interest, self.onData, self.onTimeout)
        return

    def stop(self):
        self._loop.stop()
        if __debug__:
            print("Stopped")
        return
    
    # This creation of dataDictKey means parent and child should not have the same name            
    @staticmethod
    def getDataDictKey(startTime, endTime, childName):
        return str(startTime) + '/' + str(endTime) + '/' + childName

##
# Logging
##
    def prepareLogging(self):
        self.log = logging.getLogger(str(self.__class__))
        self.log.setLevel(logging.DEBUG)
        logFormat = "%(asctime)-15s %(name)-20s %(funcName)-20s (%(levelname)-8s):\n\t%(message)s"
        self._console = logging.StreamHandler()
        self._console.setFormatter(logging.Formatter(logFormat))
        self._console.setLevel(logging.INFO)
        # without this, a lot of ThreadsafeFace errors get swallowed up
        logging.getLogger("trollius").addHandler(self._console)
        self.log.addHandler(self._console)

    def setLogLevel(self, level):
        """
        Set the log level that will be output to standard error
        :param level: A log level constant defined in the logging module (e.g. logging.INFO) 
        """
        self._console.setLevel(level)

    def getLogger(self):
        """
        :return: The logger associated with this node
        :rtype: logging.Logger
        """
        return self.log
Ejemplo n.º 13
0
class BaseNode(object):
    """
    This class contains methods/attributes common to both node and controller.
    
    """
    def __init__(self, transport = None, conn = None):
        """
        Initialize the network and security classes for the node
        """
        super(BaseNode, self).__init__()
        self.faceTransport = transport
        self.faceConn = conn
        
        self._identityStorage = IotIdentityStorage()
        self._identityManager = IotIdentityManager(self._identityStorage)
        self._policyManager = IotPolicyManager(self._identityStorage)

        # hopefully there is some private/public key pair available
        self._keyChain = KeyChain(self._identityManager, self._policyManager)

        self._registrationFailures = 0
        self._prepareLogging()

        self._setupComplete = False


##
# Logging
##
    def _prepareLogging(self):
        self.log = logging.getLogger(str(self.__class__))
        self.log.setLevel(logging.DEBUG)
        logFormat = "%(asctime)-15s %(name)-20s %(funcName)-20s (%(levelname)-8s):\n\t%(message)s"
        self._console = logging.StreamHandler()
        self._console.setFormatter(logging.Formatter(logFormat))
        self._console.setLevel(logging.INFO)
        # without this, a lot of ThreadsafeFace errors get swallowed up
        logging.getLogger("trollius").addHandler(self._console)
        self.log.addHandler(self._console)

    def setLogLevel(self, level):
        """
        Set the log level that will be output to standard error
        :param level: A log level constant defined in the logging module (e.g. logging.INFO) 
        """
        self._console.setLevel(level)

    def getLogger(self):
        """
        :return: The logger associated with this node
        :rtype: logging.Logger
        """
        return self.log

###
# Startup and shutdown
###
    def beforeLoopStart(self):
        """
        Called before the event loop starts.
        """
        pass

    def getDefaultCertificateName(self):
        try:
            certName = self._identityStorage.getDefaultCertificateNameForIdentity( 
                self._policyManager.getDeviceIdentity())
        except SecurityException as e:
            # zhehao: in the case of producer's /localhop prefixes, the default key is not defined in ndnsec-public-info.db
            #print(e)
            certName = self._keyChain.getDefaultCertificateName()
        return certName

    def start(self):
        """
        Begins the event loop. After this, the node's Face is set up and it can
        send/receive interests+data
        """
        self.log.info("Starting up")
        self.loop = asyncio.get_event_loop()
        
        if (self.faceTransport == None or self.faceTransport == ''):
            self.face = ThreadsafeFace(self.loop)
        else:
            self.face = ThreadsafeFace(self.loop, self.faceTransport, self.faceConn)
        
        self.face.setCommandSigningInfo(self._keyChain, self.getDefaultCertificateName())
        
        self._keyChain.setFace(self.face)

        self._isStopped = False
        self.beforeLoopStart()
        
        try:
            self.loop.run_forever()
        except Exception as e:
            self.log.exception(exc_info=True)
        finally:
            self.stop()

    def stop(self):
        """
        Stops the node, taking it off the network
        """
        self.log.info("Shutting down")
        self._isStopped = True 
        self.loop.stop()
        
###
# Data handling
###
    def signData(self, data):
        """
        Sign the data with our network certificate
        :param pyndn.Data data: The data to sign
        """
        self._keyChain.sign(data, self.getDefaultCertificateName())

    def sendData(self, data, transport, sign=True):
        """
        Reply to an interest with a data packet, optionally signing it.
        :param pyndn.Data data: The response data packet
        :param pyndn.Transport transport: The transport to send the data through. This is 
            obtained from an incoming interest handler
        :param boolean sign: (optional, default=True) Whether the response must be signed. 
        """
        if sign:
            self.signData(data)
        transport.send(data.wireEncode().buf())

###
# 
# 
##
    def onRegisterFailed(self, prefix):
        """
        Called when the node cannot register its name with the forwarder
        :param pyndn.Name prefix: The network name that failed registration
        """
        if self.faceTransport != None and self.faceConn != None:
            self.log.warn("Explicit face transport and connectionInfo: Could not register {}; expect a manual or autoreg on the other side.".format(prefix.toUri()))
        elif self._registrationFailures < 5:
            self._registrationFailures += 1
            self.log.warn("Could not register {}, retry: {}/{}".format(prefix.toUri(), self._registrationFailures, 5)) 
            self.face.registerPrefix(self.prefix, self._onCommandReceived, self.onRegisterFailed)
        else:
            self.log.info("Prefix registration failed")
            self.stop()

    def verificationFailed(self, dataOrInterest):
        """
        Called when verification of a data packet or command interest fails.
        :param pyndn.Data or pyndn.Interest: The packet that could not be verified
        """
        self.log.info("Received invalid" + dataOrInterest.getName().toUri())

    @staticmethod
    def getSerial():
        """
        Find and return the serial number of the Raspberry Pi. Provided in case
        you wish to distinguish data from nodes with the same name by serial.
        :return: The serial number extracted from device information in /proc/cpuinfo
        :rtype: str
        """
        with open('/proc/cpuinfo') as f:
            for line in f:
                if line.startswith('Serial'):
                    return line.split(':')[1].strip()
Ejemplo n.º 14
0
class Flask():
    def __init__(self, host):
        self.__name__ = __name__
        self.keyChain = KeyChain()
        self.isDone = False
        self.counter = 0
        loop = asyncio.get_event_loop()
        self.face = ThreadsafeFace(loop, host)
        self.a = {}
        self.methods = {}

    def route(self, uri, methods):
        self.baseName = ndn.Name(uri)
        self.methods[self.baseName] = methods[0]
        return self.dec

    def onInterest(self, prefix, interest, *k):
        print >> sys.stderr, "<< PyNDN %s" % interest.name
        print prefix
        d = self.a[prefix]
        if self.methods[prefix] == "POST":
            content = json.dumps(
                d(interest.getContent().toRawStr().decode('string_escape')))
        else:
            content = json.dumps(d())
        self.counter += 1
        #print interest.getContent().toRawStr().decode('string_escape')
        data = ndn.Data(interest.getName())

        meta = ndn.MetaInfo()
        meta.setFreshnessPeriod(5000)
        data.setMetaInfo(meta)

        data.setContent(content)
        self.keyChain.sign(data, self.keyChain.getDefaultCertificateName())

        self.face.putData(data)

    def _onRegisterFailed(self, prefix):
        print >> sys.stderr, "<< PyNDN: failed to register prefix"

    def run(self):
        root = logging.getLogger()
        root.setLevel(logging.DEBUG)

        ch = logging.StreamHandler(sys.stdout)
        ch.setLevel(logging.DEBUG)
        formatter = logging.Formatter(
            '%(asctime)s - %(name)s - %(levelname)s - %(message)s')
        ch.setFormatter(formatter)
        root.addHandler(ch)
        loop = asyncio.get_event_loop()
        #face = ThreadsafeFace(loop, "172.17.0.1")
        #face = ndn.Face("172.17.0.1","6363")
        server = Server(self.face)

        loop.run_forever()
        face.shutdown()

    def dec(self, func):
        self.a[self.baseName] = func
        print self.a
        self.face.registerPrefix(
            self.baseName,
            self.onInterest,
            self._onRegisterFailed,
        )
Ejemplo n.º 15
0
]

#set up a keyChain
keyChain = KeyChain()
print("Default identity:", keyChain.getDefaultCertificateName())

#main async event loop
loop = asyncio.get_event_loop()

for hub in hubList:
    url = urlparse(hub['site'])
    if (hub['shortname'] not in ['UCLA', 'LIP6']):
        continue

    print("Registering", hub['shortname'], "hostname:", url.hostname)
    face = ThreadsafeFace(loop, url.hostname)
    face.setCommandSigningInfo(keyChain, keyChain.getDefaultCertificateName())
    facePrefix = Name(PREFIX + hub['shortname'])

    print("Face:", facePrefix)
    #shadowing to replace the default PyNDN function that does not support passing options
    # setOrigin(65) must be included to propagate the prefix to other nodes
    face._node._nfdRegisterPrefix = types.MethodType(nfdRegisterPrefix,
                                                     face._node)
    face.registerPrefix(facePrefix,
                        onInterest,
                        onRegisterFailed,
                        onRegisterSuccess=onRegisterSuccess)
    faces[facePrefix] = face

#schedule pings later, so that prefixes have some time to register and propagate
Ejemplo n.º 16
0
    def _onInterest(self, prefix, interest, *k):
        print >> sys.stderr, "<< PyNDN %s" % interest.name

        content = "PyNDN LINE #%d\n" % self.counter
        self.counter += 1

        data = ndn.Data(interest.getName())

        meta = ndn.MetaInfo()
        meta.setFreshnessPeriod(5000)
        data.setMetaInfo(meta)

        data.setContent(content)

        self.keyChain.sign(data, self.keyChain.getDefaultCertificateName())

        self.face.putData(data)

    def _onRegisterFailed(self, prefix):
        print >> sys.stderr, "<< PyNDN: failed to register prefix"


if __name__ == '__main__':
    loop = asyncio.get_event_loop()
    face = ThreadsafeFace(loop, None)
    server = Server(face)

    loop.run_forever()
    face.shutdown()
Ejemplo n.º 17
0
 def __init__(self,localhub_ip):
     self.loop = asyncio.get_event_loop()
     self.face = ThreadsafeFace(self.loop, localhub_ip)
Ejemplo n.º 18
0
        namespaces = args.namespace.split(',')
    else:
        namespaces = [default_prefix]

    if args.security:
        security_option = args.security
    else:
        security_option = default_security_option

    if args.request:
        request_prefix = args.request
    else:
        request_prefix = default_request_prefix

    loop = asyncio.get_event_loop()
    face = ThreadsafeFace(loop)
    
    bootstrap = Bootstrap(face)
    appName = "flow1"
    
    def startProducers(defaultCertificateName, keyChain):
        if len(addrs) != len(namespaces):
            print "argument length mismatch: addrs(" + str(len(addrs)) + ") and namespaces(" + str(len(namespaces)) + ")"
            return
        for i in range(0, len(addrs)):
            producer = AppProducer(face, defaultCertificateName, keyChain, Name(namespaces[i]))
            producer.start()
            peripheral = BtlePeripheral(addrs[i], producer, loop, receive_uuid, send_uuid, security_option)
            peripheral.start()
        return
Ejemplo n.º 19
0
 def _setup_face(self, ip):
     """Sets up a face that connects to a remote forwarder."""
     udp_connection_info = UdpTransport.ConnectionInfo(ip, 6363)
     udp_transport = UdpTransport()
     return ThreadsafeFace(self._loop, udp_transport, udp_connection_info)
Ejemplo n.º 20
0
    def startPublishing(self):
        # One-time security setup
        self.prepareLogging()

        privateKeyStorage = FilePrivateKeyStorage()
        identityStorage = BasicIdentityStorage()
        policyManager = ConfigPolicyManager(self._trustSchemaFile)

        self._keyChain = KeyChain(IdentityManager(identityStorage, privateKeyStorage), policyManager)
        self._certificateName = self._keyChain.createIdentityAndCertificate(self._identityName)

        print("My Identity name: " + self._identityName.toUri())
        print("My certificate name: " + self._certificateName.toUri())
        certificateData = self._keyChain.getIdentityManager()._identityStorage.getCertificate(self._certificateName, True)
        print("My certificate string: " + b64encode(certificateData.wireEncode().toBuffer()))
        # self._keyChain.getIdentityCertificate(self._certificateName).)

        self._loop = asyncio.get_event_loop()
        self._face = ThreadsafeFace(self._loop)
        self._keyChain.setFace(self._face)

        self._face.setCommandSigningInfo(self._keyChain, self._certificateName)
        self._memoryContentCache = MemoryContentCache(self._face)

        # We should only ask for cert to be signed upon the first run of a certain aggregator
        if DO_CERT_SETUP:
            if (KeyLocator.getFromSignature(certificateData.getSignature()).getKeyName().equals(self._certificateName.getPrefix(-1))):
                # Need to configure for mini-ndn; aggregation node runs outside of mini-ndn first so that signed cert get installed and mini-ndn won't ask for this again
                print("certificate " + self._certificateName.toUri() + " asking for signature")
                response = urllib2.urlopen("http://192.168.56.1:5000/bms-cert-hack?cert=" + b64encode(certificateData.wireEncode().toBuffer()) + "&cert_prefix=" + self._identityName.toUri() + '&subject_name=' + self._identityName.toUri()).read()
                
                signedCertData = Data()
                signedCertData.wireDecode(Blob(b64decode(response)))

                self._memoryContentCache.add(signedCertData)
                cmdline = ['ndnsec-install-cert', '-']
                p = subprocess.Popen(cmdline, stdin=subprocess.PIPE, stdout=subprocess.PIPE)
                # desanitize + sign in GET request
                cert, err = p.communicate(response)
                if p.returncode != 0:
                    raise RuntimeError("ndnsec-install-cert error")
            else:
                self._memoryContentCache.add(certificateData)
        else:
            self._memoryContentCache.add(certificateData)

        dataNode = self.conf.getDataNode()
        childrenNode = self.conf.getChildrenNode()

        self._memoryContentCache.registerPrefix(Name(self._identityName), self.onRegisterFailed, self.onDataNotFound)

        # For each type of data, we refresh each type of aggregation according to the interval in the configuration
        for i in range(len(dataNode.subtrees)):
            dataType = dataNode.subtrees.keys()[i]
            aggregationParams = self.conf.getProducingParamsForAggregationType(dataNode.subtrees.items()[i][1])

            if childrenNode == None:
                self._dataQueue[dataType] = DataQueue(None, None, None)
                self.generateData(dataType, 2, 0)

            for aggregationType in aggregationParams:
                childrenList = OrderedDict()
                if childrenNode != None:

                    for j in range(len(childrenNode.subtrees)):
                        if dataType in childrenNode.subtrees.items()[j][1].subtrees['data'].subtrees:
                            if aggregationType in childrenNode.subtrees.items()[j][1].subtrees['data'].subtrees[dataType].subtrees:
                                childrenList[childrenNode.subtrees.items()[j][0]] = self.conf.getProducingParamsForAggregationType(childrenNode.subtrees.items()[j][1].subtrees['data'].subtrees[dataType])[aggregationType]

                self.startPublishingAggregation(aggregationParams[aggregationType], childrenList, dataType, aggregationType)
        return
Ejemplo n.º 21
0
class BmsNode(object):
    def __init__(self):
        self.conf = None
        self._keyChain = None
        self._certificateName = None

        self._dataQueue = dict()
        self._memoryContentCache = None
        self._identityName = None

        self._aggregation = Aggregation()

    def setConfiguration(self, fileName, trustSchemaFile):
        self.conf = BoostInfoParser()
        self.conf.read(fileName)
        self._identityName = Name(self.conf.getNodePrefix())
        self._trustSchemaFile = trustSchemaFile

    def onDataNotFound(self, prefix, interest, face, interestFilterId, filter):
        #print('Data not found for ' + interest.getName().toUri())
        return

    def startPublishing(self):
        # One-time security setup
        self.prepareLogging()

        privateKeyStorage = FilePrivateKeyStorage()
        identityStorage = BasicIdentityStorage()
        policyManager = ConfigPolicyManager(self._trustSchemaFile)

        self._keyChain = KeyChain(
            IdentityManager(identityStorage, privateKeyStorage), policyManager)
        self._certificateName = self._keyChain.createIdentityAndCertificate(
            self._identityName)

        print("My Identity name: " + self._identityName.toUri())
        print("My certificate name: " + self._certificateName.toUri())
        certificateData = self._keyChain.getIdentityManager(
        )._identityStorage.getCertificate(self._certificateName)
        print("My certificate string: " +
              b64encode(certificateData.wireEncode().toBuffer()))
        # self._keyChain.getIdentityCertificate(self._certificateName).)

        self._loop = asyncio.get_event_loop()
        self._face = ThreadsafeFace(self._loop)
        self._keyChain.setFace(self._face)

        self._face.setCommandSigningInfo(self._keyChain, self._certificateName)
        self._memoryContentCache = MemoryContentCache(self._face)

        # We should only ask for cert to be signed upon the first run of a certain aggregator
        if DO_CERT_SETUP:
            if (KeyLocator.getFromSignature(
                    certificateData.getSignature()).getKeyName().equals(
                        self._certificateName.getPrefix(-1))):
                # Need to configure for mini-ndn; aggregation node runs outside of mini-ndn first so that signed cert get installed and mini-ndn won't ask for this again
                print("certificate " + self._certificateName.toUri() +
                      " asking for signature")
                response = urllib2.urlopen(
                    "http://192.168.56.1:5000/bms-cert-hack?cert=" +
                    b64encode(certificateData.wireEncode().toBuffer()) +
                    "&cert_prefix=" + self._identityName.toUri() +
                    '&subject_name=' + self._identityName.toUri()).read()

                signedCertData = Data()
                signedCertData.wireDecode(Blob(b64decode(response)))

                self._memoryContentCache.add(signedCertData)
                cmdline = ['ndnsec-install-cert', '-']
                p = subprocess.Popen(cmdline,
                                     stdin=subprocess.PIPE,
                                     stdout=subprocess.PIPE)
                # desanitize + sign in GET request
                cert, err = p.communicate(response)
                if p.returncode != 0:
                    raise RuntimeError("ndnsec-install-cert error")
            else:
                self._memoryContentCache.add(certificateData)
        else:
            self._memoryContentCache.add(certificateData)

        dataNode = self.conf.getDataNode()
        childrenNode = self.conf.getChildrenNode()

        self._memoryContentCache.registerPrefix(Name(self._identityName),
                                                self.onRegisterFailed,
                                                self.onDataNotFound)

        # For each type of data, we refresh each type of aggregation according to the interval in the configuration
        for i in range(len(dataNode.subtrees)):
            dataType = dataNode.subtrees.keys()[i]
            aggregationParams = self.conf.getProducingParamsForAggregationType(
                dataNode.subtrees.items()[i][1])

            if childrenNode == None:
                self._dataQueue[dataType] = DataQueue(None, None, None)
                self.generateData(dataType, 2, 0)

            for aggregationType in aggregationParams:
                childrenList = OrderedDict()
                if childrenNode != None:

                    for j in range(len(childrenNode.subtrees)):
                        if dataType in childrenNode.subtrees.items(
                        )[j][1].subtrees['data'].subtrees:
                            if aggregationType in childrenNode.subtrees.items(
                            )[j][1].subtrees['data'].subtrees[
                                    dataType].subtrees:
                                childrenList[childrenNode.subtrees.items()[j][
                                    0]] = self.conf.getProducingParamsForAggregationType(
                                        childrenNode.subtrees.items()[j]
                                        [1].subtrees['data'].subtrees[dataType]
                                    )[aggregationType]

                self.startPublishingAggregation(
                    aggregationParams[aggregationType], childrenList, dataType,
                    aggregationType)
        return

    def startPublishingAggregation(self, params, childrenList, dataType,
                                   aggregationType):
        if __debug__:
            print('Start publishing for ' + dataType + '-' + aggregationType)

        # aggregation calculating and publishing mechanism
        publishingPrefix = Name(
            self._identityName).append(DATA_COMPONENT).append(dataType).append(
                AGGREGATION_COMPONENT).append(aggregationType)
        self._dataQueue[dataType + aggregationType] = DataQueue(
            params, childrenList, publishingPrefix)

        if len(childrenList.keys()) == 0:
            # TODO: make start_time optional for leaf nodes
            self._loop.call_later(int(params['producer_interval']),
                                  self.calculateAggregation, dataType,
                                  aggregationType, childrenList,
                                  int(params['start_time']),
                                  int(params['producer_interval']),
                                  publishingPrefix, True)
        else:
            # express interest for children who produce the same data and aggregation type
            for childName in childrenList.keys():
                name = Name(self._identityName).append(childName).append(
                    DATA_COMPONENT).append(dataType).append(
                        AGGREGATION_COMPONENT).append(aggregationType)
                interest = Interest(name)
                # if start_time is specified, we ask for data starting at start_time;
                # if not, we ask for the right most child and go from there
                if ('start_time' in childrenList[childName]):
                    endTime = int(childrenList[childName]['start_time']) + int(
                        childrenList[childName]['producer_interval'])
                    interest.getName().append(
                        str(childrenList[childName]['start_time'])).append(
                            str(endTime))
                else:
                    # TODO: For now we are playing with historical data, for each run we don't want to miss any data, thus we start with leftMost
                    interest.setChildSelector(0)
                    interest.setMustBeFresh(True)
                interest.setInterestLifetimeMilliseconds(
                    DEFAULT_INTEREST_LIFETIME)
                if __debug__:
                    print('  Issue interest: ' + interest.getName().toUri())
                self._face.expressInterest(interest, self.onData,
                                           self.onTimeout)

        return

    # TODO: once one calculation's decided a child has not answered, we should do another calculation
    def calculateAggregation(self,
                             dataType,
                             aggregationType,
                             childrenList,
                             startTime,
                             interval,
                             publishingPrefix,
                             repeat=False):
        doCalc = True
        dataList = []

        # TODO: an intermediate node cannot produce raw data for now
        if len(childrenList.keys()) != 0:
            for childName in childrenList.keys():
                dataDictKey = self.getDataDictKey(startTime,
                                                  (startTime + interval),
                                                  childName)
                if dataDictKey in self._dataQueue[dataType +
                                                  aggregationType]._dataDict:
                    data = self._dataQueue[
                        dataType + aggregationType]._dataDict[dataDictKey]
                    dataList.append(float(data.getContent().toRawStr()))
                else:
                    #print('Child ' + childName + ' has not replied yet')
                    doCalc = False
                    break
        else:
            for inst in self._dataQueue[dataType]._dataDict.keys():
                if int(inst) >= startTime and int(inst) < startTime + interval:
                    dataList.append(self._dataQueue[dataType]._dataDict[inst])
        if doCalc:
            content = self._aggregation.getAggregation(aggregationType,
                                                       dataList)
            if content:
                publishData = Data(
                    Name(publishingPrefix).append(str(startTime)).append(
                        str(startTime + interval)))
                publishData.setContent(str(content))
                publishData.getMetaInfo().setFreshnessPeriod(
                    DEFAULT_DATA_LIFETIME)
                self._keyChain.sign(publishData, self._certificateName)
                self._memoryContentCache.add(publishData)
                for childName in childrenList.keys():
                    dataDictKey = self.getDataDictKey(startTime,
                                                      (startTime + interval),
                                                      childName)
                    if dataDictKey in self._dataQueue[
                            dataType + aggregationType]._dataDict:
                        del self._dataQueue[
                            dataType + aggregationType]._dataDict[dataDictKey]
                if __debug__:
                    print("Produced: " + publishData.getName().toUri() + "; " +
                          publishData.getContent().toRawStr())

        # repetition of this function only happens for raw data producer, otherwise calculateAggregation is called by each onData
        if repeat:
            self._loop.call_later(interval, self.calculateAggregation,
                                  dataType, aggregationType, childrenList,
                                  startTime + interval, interval,
                                  publishingPrefix, repeat)
        return

    def generateData(self, dataType, interval, startTime):
        self._dataQueue[dataType]._dataDict[str(startTime)] = random.randint(
            0, 9)
        self._loop.call_later(interval, self.generateData, dataType, interval,
                              startTime + interval)
        return

    def onRegisterFailed(self, prefix):
        raise RuntimeError("Register failed for prefix", prefix.toUri())

    def onVerified(self, data):
        print('Data verified: ' + data.getName().toUri())
        return

    def onVerifyFailed(self, data):
        print('Data verification failed: ' + data.getName().toUri())
        return

    def onData(self, interest, data):
        self._keyChain.verifyData(data, self.onVerified, self.onVerifyFailed)

        dataName = data.getName()
        dataQueue = None

        if __debug__:
            print("Got data: " + dataName.toUri() + "; " +
                  data.getContent().toRawStr())
        for i in range(0, len(dataName)):
            if dataName.get(i).toEscapedString() == AGGREGATION_COMPONENT:
                dataType = dataName.get(i - 1).toEscapedString()
                aggregationType = dataName.get(i + 1).toEscapedString()

                startTime = int(dataName.get(i + 2).toEscapedString())
                endTime = int(dataName.get(i + 3).toEscapedString())
                childName = dataName.get(i - 3).toEscapedString()

                dataAndAggregationType = dataType + aggregationType

                dataDictKey = self.getDataDictKey(startTime, endTime,
                                                  childName)
                dataQueue = self._dataQueue[dataAndAggregationType]
                dataQueue._dataDict[dataDictKey] = data
                break

        # TODO: check what if interval/starttime is misconfigured
        if dataQueue:
            self.calculateAggregation(dataType, aggregationType,
                                      dataQueue._childrenList, startTime,
                                      endTime - startTime,
                                      dataQueue._publishingPrefix)

        # Always ask for the next piece of data when we receive this one; assumes interval does not change; this also assumes there are no more components after endTime
        #newInterestName = dataName.getPrefix(i + 2).append(str(endTime)).append(str(endTime + (endTime - startTime)))

        # We don't expect aggregated data name to be continuous within our given time window, so we ask with exclusion instead
        newInterestName = dataName.getPrefix(i + 2)
        newInterest = Interest(interest)
        newInterest.setName(newInterestName)
        newInterest.setChildSelector(0)

        exclude = Exclude()
        exclude.appendAny()
        exclude.appendComponent(dataName.get(i + 2))
        newInterest.setExclude(exclude)

        self._face.expressInterest(newInterest, self.onData, self.onTimeout)
        if __debug__:
            print("  issue interest: " + interest.getName().toUri())

        return

    def onTimeout(self, interest):
        if __debug__:
            print("  interest timeout: " + interest.getName().toUri() +
                  "; reexpress")
            pass
        self._face.expressInterest(interest, self.onData, self.onTimeout)
        return

    def stop(self):
        self._loop.stop()
        if __debug__:
            print("Stopped")
        return

    # This creation of dataDictKey means parent and child should not have the same name
    @staticmethod
    def getDataDictKey(startTime, endTime, childName):
        return str(startTime) + '/' + str(endTime) + '/' + childName


##
# Logging
##

    def prepareLogging(self):
        self.log = logging.getLogger(str(self.__class__))
        self.log.setLevel(logging.DEBUG)
        logFormat = "%(asctime)-15s %(name)-20s %(funcName)-20s (%(levelname)-8s):\n\t%(message)s"
        self._console = logging.StreamHandler()
        self._console.setFormatter(logging.Formatter(logFormat))
        self._console.setLevel(logging.INFO)
        # without this, a lot of ThreadsafeFace errors get swallowed up
        logging.getLogger("trollius").addHandler(self._console)
        self.log.addHandler(self._console)

    def setLogLevel(self, level):
        """
        Set the log level that will be output to standard error
        :param level: A log level constant defined in the logging module (e.g. logging.INFO) 
        """
        self._console.setLevel(level)

    def getLogger(self):
        """
        :return: The logger associated with this node
        :rtype: logging.Logger
        """
        return self.log
Ejemplo n.º 22
0
class NaiveEDLParserAndPublisher(object):
    def __init__(self, applyEDLAdjustment=True):
        # prepare trollius logging
        self.prepareLogging()

        self._events = dict()
        self._running = False
        self._applyEDLAdjustment = applyEDLAdjustment

        # NDN related variables
        self._loop = asyncio.get_event_loop()
        self._face = ThreadsafeFace(self._loop)

        # Use the system default key chain and certificate name to sign commands.
        self._keyChain = KeyChain()
        self._keyChain.setFace(self._face)
        self._certificateName = self._keyChain.getDefaultCertificateName()
        self._face.setCommandSigningInfo(self._keyChain, self._certificateName)
        self._memoryContentCache = MemoryContentCache(self._face)

        # Publishing parameters conf  iguration
        self._translationServiceUrl = "http://the-archive.la/losangeles/services/get-youtube-url"
        self._namePrefixString = "/ndn/edu/ucla/remap/test/edl/"

        self._dataLifetime = 2000
        self._publishBeforeSeconds = 3
        self._translateBeforeSeconds = 60
        self._currentIdx = 0

        # Youtube related variables:
        # Channel Global song: UCSMJaKICZKXkpvr7Gj8pPUg
        # Channel Los Angeles: UCeuQoBBzMW6SWkxd8_1I8NQ
        # self._channelID = 'UCSMJaKICZKXkpvr7Gj8pPUg'
        self._channelID = "UCSMJaKICZKXkpvr7Gj8pPUg"
        self._accessKey = "AIzaSyCe8t7PnmWjMKZ1gBouhP1zARpqNwHAs0s"
        # queryStr = 'https://www.googleapis.com/youtube/v3/videos?part=snippet,contentDetails,statistics,status&key=' + apiKey + '&id='
        # Video query example
        # https://www.googleapis.com/youtube/v3/videos?part=snippet,contentDetails,statistics,status&key=AIzaSyDUY_AX1iJQcwCW1mASEp5GcLtq1V9BM1Q&id=_ebELPKANxo
        # Channel query example
        # https://www.googleapis.com/youtube/v3/search?key=AIzaSyCe8t7PnmWjMKZ1gBouhP1zARpqNwHAs0s&channelId=UCSMJaKICZKXkpvr7Gj8pPUg&part=snippet,id&order=date&maxResults=20
        self._videoUrlDict = dict()

        self._edlAdjustmentDict = dict()
        return

    def getClipUrlOAuth(self):
        self._videoUrlDict = dict((k.lower(), v) for k, v in getAllVideosFromChannel().iteritems())

    # Old getClipUrl function that looks at the public Youtube channel without using Python API
    def getClipUrl(self, nextPageToken=None):
        options = {"part": "snippet,id", "order": "date", "maxResults": "20"}
        if nextPageToken is not None:
            options["pageToken"] = nextPageToken
        prefix = "https://www.googleapis.com/youtube/v3/search?"

        queryUrl = prefix + "key=" + self._accessKey + "&channelId=" + self._channelID
        for item in options:
            queryUrl += "&" + item + "=" + options[item]
        result = json.loads(urllib.urlopen(queryUrl).read())
        for item in result["items"]:
            if "snippet" in item and "id" in item and "videoId" in item["id"]:
                self._videoUrlDict[item["snippet"]["title"].lower()] = item["id"]["videoId"]
            else:
                print("Unexpected JSON from youtube channel query")
        if "nextPageToken" in result:
            self.getClipUrl(result["nextPageToken"])
        else:
            if __debug__:
                print("Building videoUrl dict finished; number of entries: " + str(len(self._videoUrlDict)))
                # for item in self._videoUrlDict:
                #  print("* " + item)
        return

    def parse(self, fileName):
        isEventBegin = False
        lastEventID = -1
        with open(fileName, "r") as edlFile:
            for line in edlFile:
                if isEventBegin:
                    components = line.split()
                    try:
                        eventID = int(components[0])
                    except ValueError:
                        print("Cannot cast " + components[0] + " to eventID")
                        continue
                    # We seem to have a fixed number of components here;
                    # reference: http://www.edlmax.com/maxguide.html
                    reelName = components[1]
                    channel = components[2]
                    trans = components[3]

                    timeComponentsIdx = len(components) - 4

                    srcStartTime = components[timeComponentsIdx]
                    srcEndTime = components[timeComponentsIdx + 1]
                    dstStartTime = components[timeComponentsIdx + 2]
                    dstEndTime = components[timeComponentsIdx + 3]

                    self._events[eventID] = json.loads(
                        '{ \
              "event_id": "%s", \
              "reel_name": "%s", \
              "channel": "%s", \
              "trans": "%s", \
              "src_start_time": "%s", \
              "src_end_time": "%s", \
              "dst_start_time": "%s", \
              "dst_end_time": "%s", \
              "src_url": "%s", \
              "translated": "%s", \
              "clipName": "%s", \
              "ytPresent": "%s" \
             }'
                        % (
                            str(eventID),
                            reelName,
                            channel,
                            trans,
                            srcStartTime,
                            srcEndTime,
                            dstStartTime,
                            dstEndTime,
                            "none",
                            "none",
                            "n/a",
                            "n/a",
                        )
                    )

                    isEventBegin = False
                    lastEventID = eventID
                elif re.match(r"\s+", line) is not None or line == "":
                    isEventBegin = True
                elif lastEventID > 0:
                    # Skipping events that do not have right offset
                    if not eventID in self._events:
                        print("Line skipped because of missing start time adjustment")
                        continue

                    fromClipNameMatch = re.match(r"\* FROM CLIP NAME: ([^\n]*)\n", line)
                    if fromClipNameMatch is not None:
                        clipName = fromClipNameMatch.group(1).strip()
                        parsedClipName = clipName.lower().replace("_", " ").replace("-", " ")

                        if self._applyEDLAdjustment:
                            if clipName in self._edlAdjustmentDict:
                                startTimeAdjusted = self.getTimeMinus(
                                    self._edlAdjustmentDict[clipName].split(":"),
                                    self._events[eventID]["src_start_time"].split(":"),
                                )
                                endTimeAdjusted = self.getTimeMinus(
                                    self._edlAdjustmentDict[clipName].split(":"),
                                    self._events[eventID]["src_end_time"].split(":"),
                                )
                                self._events[eventID]["src_start_time"] = startTimeAdjusted
                                self._events[eventID]["src_end_time"] = endTimeAdjusted

                                # Skipping events that do not have right offset
                                if startTimeAdjusted == "" or endTimeAdjusted == "":
                                    print(
                                        clipName + " : " + startTimeAdjusted,
                                        " start time incorrect; event " + str(eventID) + " ignored",
                                    )
                                    del self._events[eventID]
                                    continue
                            else:
                                # Skipping events that do not have right offset
                                print(
                                    "Warning: EDL adjustment not found for "
                                    + clipName
                                    + "; event "
                                    + str(eventID)
                                    + " ignored"
                                )
                                del self._events[eventID]
                                continue

                        self._events[eventID]["clipName"] = parsedClipName
                        # We don't do audio (only .wav or .mp3) for now
                        if parsedClipName.endswith(".wav") or parsedClipName.endswith(".mp3"):
                            continue
                        else:
                            parsedClipName = (" ").join(parsedClipName.split(".")[:-1])
                            # print(parsedClipName)
                        if parsedClipName in self._videoUrlDict:
                            # we assume one src_url from one FROM CLIP NAME for now
                            self._events[eventID]["src_url"] = (
                                "https://www.youtube.com/watch?v=" + self._videoUrlDict[parsedClipName]
                            )
                            self._events[eventID]["ytPresent"] = "YES"
                            print("src_url is " + self._events[eventID]["src_url"])
                        else:
                            self._events[eventID]["ytPresent"] = "NO"
                            print("Warning: file not found in Youtube channel: " + clipName)
                    else:
                        if "payload" not in self._events[eventID]:
                            self._events[eventID]["payload"] = [line]
                        else:
                            self._events[eventID]["payload"].append(line)

    @asyncio.coroutine
    def startPublishing(self):
        if len(self._events) == 0:
            return
        elif not self._running:
            self._memoryContentCache.registerPrefix(
                Name(self._namePrefixString), self.onRegisterFailed, self.onDataNotFound
            )
            startTime = time.time()

            latestEventTime = 0
            lastEventID = 0
            for event_id in sorted(self._events):
                timeStrs = self._events[event_id]["dst_start_time"].split(":")
                publishingTime = self.getScheduledTime(timeStrs, self._publishBeforeSeconds)
                translationTime = self.getScheduledTime(timeStrs, self._translateBeforeSeconds)
                if publishingTime > latestEventTime:
                    latestEventTime = publishingTime
                self._loop.call_later(translationTime, self.translateUrl, event_id)
                self._loop.call_later(publishingTime, self.publishData, event_id)
                lastEventID = event_id

            # append arbitrary 'end' data
            lastEventID = lastEventID + 1
            self._events[lastEventID] = json.loads(
                '{ \
        "event_id": "%s", \
        "src_url": "%s", \
        "translated": "%s" \
      }'
                % (str(lastEventID), "end", "not-required")
            )
            startTime = self.getScheduledTime(self._events[lastEventID - 1]["src_start_time"].split(":"), 0)
            endTime = self.getScheduledTime(self._events[lastEventID - 1]["src_end_time"].split(":"), 0)
            print("scheduled end " + str(endTime - startTime) + " sec from now")
            self._loop.call_later(latestEventTime + 1, self.publishData, lastEventID)
            self._loop.call_later(latestEventTime + 2 + (endTime - startTime), self._loop.stop)

            self._running = True

    def translateUrl(self, idx):
        queryUrl = self._translationServiceUrl
        timeStrs = self._events[idx]["src_start_time"].split(":")

        # we don't have the video from Youtube
        if self._events[idx]["src_url"] == "none":
            # print("no video from Youtube")
            # we still publish the data even if src_url is "none", to maintain consecutive sequence numbers
            self._events[idx]["translated"] = "non-existent"
            return

        serviceUrl = self._events[idx]["src_url"]  # + "&t=" + str(self.timeToSeconds(timeStrs)) + "s"

        values = {"url": serviceUrl, "fetchIfNotExist": "true"}

        data = urllibparse.urlencode(values)
        req = urllib.Request(queryUrl, data)
        # This synchronous request might block the execution of publishData; should be changed later
        response = urllib.urlopen(req)
        videoUrl = response.read()

        self._events[idx]["ori_url"] = serviceUrl
        self._events[idx]["src_url"] = videoUrl

        if self._events[idx]["translated"] == "publish":
            # We already missed the scheduled publishing time; should publish as soon as translation finishes
            self.publishData(idx)
        else:
            self._events[idx]["translated"] = "translated"
        return

    def publishData(self, idx):
        # Translation of the video URL has finished by the time of the publishData call;
        # if not, we set translated to "publish"; this is data race free since translateUrl and publishData are scheduled in the same thread
        if self._events[idx]["translated"] != "none":
            # Order published events sequence numbers by start times in destination
            data = Data(Name(self._namePrefixString + str(self._currentIdx)))

            data.setContent(json.dumps(self._events[idx]))
            data.getMetaInfo().setFreshnessPeriod(self._dataLifetime)
            self._keyChain.sign(data, self._certificateName)
            self._memoryContentCache.add(data)
            self._currentIdx += 1
            if __debug__:
                eventId = str(self._events[idx]["event_id"])
                channel = str(self._events[idx]["channel"])
                srcUrl = str(self._events[idx]["src_url"])
                clipName = str(self._events[idx]["clipName"])
                ytPresent = str(self._events[idx]["ytPresent"])
                clipStartTime = str(self._events[idx]["dst_start_time"])
                clipEndTime = str(self._events[idx]["dst_end_time"])
                print(
                    str(time.time())
                    + " Added event ["
                    + eventId
                    + "-"
                    + channel
                    + "|"
                    + clipName
                    + " YT:"
                    + ytPresent
                    + " "
                    + srcUrl[0:30]
                    + "... "
                    + clipStartTime
                    + "-"
                    + clipEndTime
                    + "] ("
                    + data.getName().toUri()
                    + ")"
                )
        else:
            self._events[idx]["translated"] = "publish"

    def timeToSeconds(self, timeStrs):
        seconds = int(timeStrs[2])
        minutes = int(timeStrs[1])
        hours = int(timeStrs[0])
        ret = hours * 3600 + minutes * 60 + seconds
        return ret

    def getTimeMinus(self, timeStrs1, timeStrs2):
        frameNumber = int(timeStrs1[3])
        seconds = int(timeStrs1[2])
        minutes = int(timeStrs1[1])
        hours = int(timeStrs1[0])

        frameNumber2 = int(timeStrs2[3]) - frameNumber
        seconds2 = int(timeStrs2[2]) - seconds
        minutes2 = int(timeStrs2[1]) - minutes
        hours2 = int(timeStrs2[0]) - hours

        if frameNumber2 < 0:
            # frame rate assumption
            frameNumber2 = 30 + frameNumber2
            seconds2 = seconds2 - 1

        if seconds2 < 0:
            seconds2 = 60 + seconds2
            minutes2 = minutes2 - 1

        if minutes2 < 0:
            minutes2 = 60 + minutes2
            hours2 = hours2 - 1

        if hours2 < 0:
            print("Warning: time minus smaller than 0")
            return ""

        # Arbitrary guard of start times that are off
        if hours2 > 1 or minutes2 > 1:
            return ""

        return ":".join([str(hours2), str(minutes2), str(seconds2), str(frameNumber2)])

    def getScheduledTime(self, timeStrs, beforeSeconds):
        frameNumber = int(timeStrs[3])
        seconds = int(timeStrs[2])
        minutes = int(timeStrs[1])
        hours = int(timeStrs[0])
        ret = hours * 3600 + minutes * 60 + seconds - beforeSeconds
        return 0 if ret < 0 else ret

    def onRegisterFailed(self, prefix):
        raise RuntimeError("Register failed for prefix", prefix.toUri())

    def onDataNotFound(self, prefix, interest, face, interestFilterId, filter):
        # print('Data not found for interest: ' + interest.getName().toUri())
        return

    #############################
    # Logging
    #############################
    def prepareLogging(self):
        self.log = logging.getLogger(str(self.__class__))
        self.log.setLevel(logging.DEBUG)
        logFormat = "%(asctime)-15s %(name)-20s %(funcName)-20s (%(levelname)-8s):\n\t%(message)s"
        self._console = logging.StreamHandler()
        self._console.setFormatter(logging.Formatter(logFormat))
        self._console.setLevel(logging.INFO)
        # without this, a lot of ThreadsafeFace errors get swallowed up
        logging.getLogger("trollius").addHandler(self._console)
        self.log.addHandler(self._console)

    def setLogLevel(self, level):
        """
    Set the log level that will be output to standard error
    :param level: A log level constant defined in the logging module (e.g. logging.INFO) 
    """
        self._console.setLevel(level)

    def getLogger(self):
        """
    :return: The logger associated with this node
    :rtype: logging.Logger
    """
        return self.log

    ############################
    def loadEDLAdjustment(self, csvFile):
        with open(csvFile, "rb") as csvfile:
            reader = csv.reader(csvfile, delimiter=",", quotechar="|")
            for row in reader:
                self._edlAdjustmentDict[row[3]] = row[1]
Ejemplo n.º 23
0
#TODO: Test if DB not initialized, else do
#first_run()
timestamp = int(time.time())
#sys.exit(1)
testbed_json = json.load(open(request.urlretrieve("http://ndndemo.arl.wustl.edu/testbed-nodes.json")[0], "r"))
#Initialize keychain
keychain = KeyChain()
loop = asyncio.get_event_loop()
# loop.call_soon(destroy)
# loop.run_forever()
for hub_name in testbed_json.keys():
    hub = testbed_json[hub_name]
    valid_prefixes = Name(PREFIX + hub["shortname"])

for hub_name in testbed_json.keys():
    hub = testbed_json[hub_name]
    print("Adding faces to hub: {}".format(hub["name"]))
    face_base = hub["site"].strip("http").replace(":80/",":6363")
    for protocol in PROTOCOLS:
        face = ThreadsafeFace(loop, "{}{}".format(protocol, hub))
        face.setCommandSigningInfo(keychain, keychain.getDefaultCertificateName())
        prefix = Name(PREFIX + hub["shortname"])
        options = RegistrationOptions().setOrigin(65)
        face.registerPrefix(prefix, onInterest, onRegisterFailed, onRegisterSuccess=registration, registrationOptions=options)
        time.sleep(2)
        print("Begin ping")
        loop.call_soon(schedulePings, prefix)
        loop.run_forever()
#time.sleep(30)
conn.close()
Ejemplo n.º 24
0
class BaseNode(object):
    """
    This class contains methods/attributes common to both node and controller.
    """
    def __init__(self, transport = None, conn = None):
        """
        Initialize the network and security classes for the node
        """
        super(BaseNode, self).__init__()
        self.faceTransport = transport
        self.faceConn = conn
        
        self._identityStorage = BasicIdentityStorage()

        self._identityManager = IdentityManager(self._identityStorage, FilePrivateKeyStorage())
        self._policyManager = IotPolicyManager(self._identityStorage)

        # hopefully there is some private/public key pair available
        self._keyChain = KeyChain(self._identityManager, self._policyManager)

        self._registrationFailures = 0
        self._prepareLogging()

        self._setupComplete = False


##
# Logging
##
    def _prepareLogging(self):
        self.log = logging.getLogger(str(self.__class__))
        self.log.setLevel(logging.DEBUG)
        logFormat = "%(asctime)-15s %(name)-20s %(funcName)-20s (%(levelname)-8s):\n\t%(message)s"
        self._console = logging.StreamHandler()
        self._console.setFormatter(logging.Formatter(logFormat))
        self._console.setLevel(logging.INFO)
        # without this, a lot of ThreadsafeFace errors get swallowed up
        logging.getLogger("trollius").addHandler(self._console)
        self.log.addHandler(self._console)

    def setLogLevel(self, level):
        """
        Set the log level that will be output to standard error
        :param level: A log level constant defined in the logging module (e.g. logging.INFO) 
        """
        self._console.setLevel(level)

    def getLogger(self):
        """
        :return: The logger associated with this node
        :rtype: logging.Logger
        """
        return self.log

###
# Startup and shutdown
###
    def beforeLoopStart(self):
        """
        Called before the event loop starts.
        """
        pass

    def getDefaultCertificateName(self):
        try:
            certName = self._identityStorage.getDefaultCertificateNameForIdentity( 
                self._policyManager.getDeviceIdentity())
        except SecurityException as e:
            # zhehao: in the case of producer's /localhop prefixes, the default key is not defined in ndnsec-public-info.db
            certName = self._keyChain.createIdentityAndCertificate(self._policyManager.getDeviceIdentity())
            #certName = self._keyChain.getDefaultCertificateName()
            #print(certName.toUri())
        return certName

    def start(self):
        """
        Begins the event loop. After this, the node's Face is set up and it can
        send/receive interests+data
        """
        self.log.info("Starting up")
        self.loop = asyncio.get_event_loop()
        
        if (self.faceTransport == None or self.faceTransport == ''):
            self.face = ThreadsafeFace(self.loop)
        else:
            self.face = ThreadsafeFace(self.loop, self.faceTransport, self.faceConn)
        
        self.face.setCommandSigningInfo(self._keyChain, self.getDefaultCertificateName())
        
        self._keyChain.setFace(self.face)

        self._isStopped = False
        self.beforeLoopStart()
        
        try:
            self.loop.run_forever()
        except Exception as e:
            self.log.exception(exc_info=True)
        finally:
            self.stop()

    def stop(self):
        """
        Stops the node, taking it off the network
        """
        self.log.info("Shutting down")
        self._isStopped = True 
        self.loop.stop()
        
###
# Data handling
###
    def signData(self, data):
        """
        Sign the data with our network certificate
        :param pyndn.Data data: The data to sign
        """
        self._keyChain.sign(data, self.getDefaultCertificateName())

    def sendData(self, data, sign=True):
        """
        Reply to an interest with a data packet, optionally signing it.
        :param pyndn.Data data: The response data packet
        :param boolean sign: (optional, default=True) Whether the response must be signed. 
        """
        if sign:
            self.signData(data)
        self.face.putData(data)

###
# 
# 
##
    def onRegisterFailed(self, prefix):
        """
        Called when the node cannot register its name with the forwarder
        :param pyndn.Name prefix: The network name that failed registration
        """
        if self.faceTransport != None and self.faceConn != None:
            self.log.warn("Explicit face transport and connectionInfo: Could not register {}; expect a manual or autoreg on the other side.".format(prefix.toUri()))
        elif self._registrationFailures < 5:
            self._registrationFailures += 1
            self.log.warn("Could not register {}, retry: {}/{}".format(prefix.toUri(), self._registrationFailures, 5)) 
            self.face.registerPrefix(self.prefix, self._onCommandReceived, self.onRegisterFailed)
        else:
            self.log.info("Prefix registration failed")
            self.stop()

    def verificationFailed(self, dataOrInterest):
        """
        Called when verification of a data packet or command interest fails.
        :param pyndn.Data or pyndn.Interest: The packet that could not be verified
        """
        self.log.info("Received invalid" + dataOrInterest.getName().toUri())

    @staticmethod
    def getSerial():
        """
        Find and return the serial number of the Raspberry Pi. Provided in case
        you wish to distinguish data from nodes with the same name by serial.
        :return: The serial number extracted from device information in /proc/cpuinfo
        :rtype: str
        """
        try:
            with open('/proc/cpuinfo') as f:
                for line in f:
                    if line.startswith('Serial'):
                        return line.split(':')[1].strip()
        except NameError:
            return "todo"
Ejemplo n.º 25
0
class Flask():
    def __init__(self, host):
        self.__name__ = __name__
        self.keyChain = KeyChain()
        self.isDone = False
        self.counter = 0
        loop = asyncio.get_event_loop()
        self.face = ThreadsafeFace(loop, host)
        self.a = {}
        self.methods = {}

    def route(self, uri, methods):
        prefix = uri
        if bool(re.search('<(.*)>', prefix)):
            self.baseName = ndn.Name(
                re.sub('<(.*)>', methods[0] + '/<data>', prefix))
        else:
            self.baseName = ndn.Name(prefix + "/" + methods[0])
        self.methods[self.baseName.toUri()] = methods[0]
        return self.dec

    def onInterest(self, prefix, interest, *k):
        print >> sys.stderr, "<< PyNDN %s" % interest.name

        intrestUri = interest.name.toUri()
        prefixUri = prefix.toUri()

        parameters = ''
        if intrestUri != prefixUri:
            parameters = intrestUri[len(prefixUri) + 1:]
            print
            prefixUri += "/%3Cdata%3E"

        d = self.a[prefixUri]
        if self.methods[prefixUri] == "POST":
            content = json.dumps(
                d(interest.getContent().toRawStr().decode('string_escape')))
        else:
            if parameters:
                content = json.dumps(d(parameters))
            else:
                content = json.dumps(d())

        self.counter += 1
        data = ndn.Data(interest.getName())

        meta = ndn.MetaInfo()
        meta.setFreshnessPeriod(5000)
        data.setMetaInfo(meta)

        data.setContent(content)
        self.keyChain.sign(data, self.keyChain.getDefaultCertificateName())

        self.face.putData(data)

    def _onRegisterFailed(self, prefix):
        print >> sys.stderr, "<< PyNDN: failed to register prefix"

    def run(self):
        root = logging.getLogger()
        root.setLevel(logging.DEBUG)

        ch = logging.StreamHandler(sys.stdout)
        ch.setLevel(logging.DEBUG)
        formatter = logging.Formatter(
            '%(asctime)s - %(name)s - %(levelname)s - %(message)s')
        ch.setFormatter(formatter)
        root.addHandler(ch)
        loop = asyncio.get_event_loop()
        server = Server(self.face)

        loop.run_forever()
        self.face.shutdown()

    def dec(self, func):
        self.a[self.baseName.toUri()] = func
        self.face.registerPrefix(
            self.baseName,
            self.onInterest,
            self._onRegisterFailed,
        )