def main(): # The default Face will connect using a Unix socket, or to "localhost". face = Face() # Create an in-memory key chain with default keys. keyChain = KeyChain() face.setCommandSigningInfo(keyChain, keyChain.getDefaultCertificateName()) streamNamespace = Namespace(Name("/example-data"), keyChain) dump("Register prefix", streamNamespace.name) # Set the face and register to receive Interests. streamNamespace.setFace(face, lambda prefixName: dump("Register failed for prefix", prefixName)) # /example-data/2~3 for sequenceNumber in range(2, 4): sequenceNamespace = streamNamespace[str(sequenceNumber)] dump("Preparing data for", sequenceNamespace.name) # Prepare the _meta packet. contentMetaInfo = ContentMetaInfo() contentMetaInfo.setContentType("jpeg") contentMetaInfo.setTimestamp(Common.getNowMilliseconds()) contentMetaInfo.setHasSegments(True) sequenceNamespace["_meta"].serializeObject(contentMetaInfo.wireEncode()) # Read jpeg file img_path = os.path.join(root_path, "sample420x236.jpg") with open(img_path, "rb") as f: data = f.read() segment_size = face.getMaxNdnPacketSize() // 2 segment_cnt = (len(data) + segment_size - 1) // segment_size # We know the content has two segments. metaInfo = MetaInfo() metaInfo.setFinalBlockId(Name().appendSegment(segment_cnt - 1)[0]) sequenceNamespace.setNewDataMetaInfo(metaInfo) for i in range(segment_cnt): start_offset = i * segment_size end_offset = start_offset + segment_size sequenceNamespace[Name.Component.fromSegment(i)].serializeObject( Blob(bytearray(data[start_offset:end_offset]))) while True: face.processEvents() # We need to sleep for a few milliseconds so we don't use 100% of the CPU. time.sleep(0.01)
def test_max_ndn_packet_size(self): # Construct an interest whose encoding is one byte larger than getMaxNdnPacketSize. targetSize = Face.getMaxNdnPacketSize() + 1 # Start with an interest which is almost the right size. interest = Interest() interest.getName().append(bytearray(targetSize)) initialSize = interest.wireEncode().size() # Now replace the component with the desired size which trims off the extra encoding. interest.setName( (Name().append(bytearray(targetSize - (initialSize - targetSize))))) interestSize = interest.wireEncode().size() self.assertEqual(targetSize, interestSize, "Wrong interest size for MaxNdnPacketSize") with self.assertRaises(RuntimeError): # If no error is raised, then expressInterest didn't throw an # exception when the interest size exceeds getMaxNdnPacketSize() self.face.expressInterest(interest, Mock(), Mock())
def run(self, namespace): # Create a connection to the local forwarder over a Unix socket # face = Face() face = Face("127.0.0.1") prefix = Name(namespace) # Use the system default key chain and certificate name to sign commands. cert_name = self.keyChain.getDefaultCertificateName() face.setCommandSigningInfo(self.keyChain, cert_name) # Also use the default certificate name to sign Data packets. face.registerPrefix(prefix, self.onInterest, self.onRegisterFailed) print "Registering prefix", prefix.toUri() self.maxNdnPacketSize = face.getMaxNdnPacketSize() # Run the event loop forever. Use a short sleep to # prevent the Producer from using 100% of the CPU. while not self.isDone: face.processEvents() time.sleep(0.01)
def __init__(self, deeplab_manager, fst_manager, _root_path, storage): # type: (DeepLab, Fst, str, IStorage) -> None self.face = None self.keychain = KeyChain() # self.namespace = Namespace(Name(SERVER_PREFIX).append(RESULT_PREFIX), self.keychain) # self.namespace.addOnObjectNeeded(self.on_result_interest) self.segment_size = Face.getMaxNdnPacketSize() // 2 self.running = False self._restart = False self.deeplab_manager = deeplab_manager self.fst_manager = fst_manager self.storage = storage deeplab_manager.on_finished = self.on_process_finished fst_manager.on_finished = self.on_process_finished self.fetcher = Fetcher(self.keychain, self.on_payload, self.storage, self.on_fetch_fail) self.command_filter_id = 0 self.result_filter_id = 0 self.operations_set = self.fst_manager.get_models() | {"deeplab"}
class Decision_Engine_Main(object): def __init__(self, namePrefix): self.outstanding = dict() self.isDone = False self.keyChain = KeyChain() self.face = Face("127.0.0.1") self.configPrefix = Name(namePrefix) self.script_path = os.path.abspath( __file__) # i.e. /path/to/dir/foobar.py self.script_dir = os.path.split( self.script_path)[0] #i.e. /path/to/dir/ self.interestLifetime = 800000 #self.Datamessage_size = 1999000 self.Datamessage_size = 19990000 folder_name = "SC_repository/" rel_path = os.path.join(self.script_dir, folder_name) prefix_startDE = "/picasso/start_de/" self.prefix_startDE = Name(prefix_startDE) self.prefix_deployService = '/picasso/service_deployment_push/' self.json_server_Spec_default = { # This is only an skeleton 'par': { # service parameters 'serviceName': 'nameOfService', 'imageName': 'NameOfImageToIstantiateService', 'imageSize': 'sizeOfImage', 'maxConReq': 'maxNumConcurrentRequestsThatAnIntanceCanHandle', 'startUpTime': 'timeToInstatiateService' }, 'QoS': { #QoS parameters expected from the service 'responseTime': 'resposeTimeExpectedFromService', 'availability': 'availabilityExpectedFromService', 'numConReq': 'numConcurrentRequestsToBeHandledByService' } } if not os.path.exists(rel_path): os.makedirs(rel_path) def run(self): try: ### This face is used to send an image to the SEG self.face.setCommandSigningInfo( self.keyChain, self.keyChain.getDefaultCertificateName()) self.face.registerPrefix(self.configPrefix, self.onInterest_PullService, self.onRegisterFailed) #### This face is used to start the algorithm of decision engine. The Interest is sent by trigger module #### This face is for testing propose self.face.setCommandSigningInfo( self.keyChain, self.keyChain.getDefaultCertificateName()) self.face.registerPrefix(self.prefix_startDE, self.onInterest_StartDE, self.onRegisterFailed) Max = self.face.getMaxNdnPacketSize() print 'Maxsize: ', Max print "Registered prefix : " + self.configPrefix.toUri() while not self.isDone: self.face.processEvents() time.sleep(0.01) except RuntimeError as e: print "ERROR: %s" % e return True def onInterest_StartDE(self, prefix, interest, face, interestFilterId, filter): interestName = interest.getName() print "Interest Name: %s" % interestName interest_name_components = interestName.toUri().split("/") if "start_de" in interest_name_components: #print 'Query database' #print 'Call decision engine algorithm' #parent_dir = os.path.split(self.script_dir)[0] #monitor_path = os.path.join(self.script_dir, parent_dir, 'Monitoring', 'Monitoring_DB') #print monitor_path #myDE = de(monitor_path) #json_lst_dict = myDE.get_lst_of_dictionaries() #json_server_Spec = self.json_server_Spec_default #node_name = myDE.selectHost_to_deploy_firstInstance(json_lst_dict, json_server_Spec) node_name = interest_name_components[ interest_name_components.index("start_de") + 2] print 'Selected Host Name %s' % node_name service_name = interest_name_components[ interest_name_components.index("start_de") + 1] print 'service name %s' % service_name print 'Start service deployment' deployService = self.prefix_deployService + node_name + '/' + service_name config_prefix_deployService = Name(deployService) interest = Interest(config_prefix_deployService) interest.setInterestLifetimeMilliseconds(self.interestLifetime) interest.setMustBeFresh(True) self.face.expressInterest( interest, None, None ) ## set None --> sent out only, don't wait for Data and Timeout print "Sent Push Interest to SEG %s" % config_prefix_deployService else: print "Interest name mismatch" def onInterest_PullService(self, prefix, interest, face, interestFilterId, filter): interestName = interest.getName() data = Data(interestName) print "Interest Name: %s" % interestName interest_name_components = interestName.toUri().split("/") if "service_deployment_pull" in interest_name_components: ## Extract filename from Interest name #filename = "uhttpd.tar" filename = interest_name_components[ interest_name_components.index("service_deployment_pull") + 1] folder_name = "ServiceRepo/SC_repository/" parent_dir = os.path.split(self.script_dir)[0] rel_path = os.path.join(parent_dir, folder_name) if not os.path.exists(rel_path): os.makedirs(rel_path) abs_file_path = os.path.join(rel_path, filename) freshness = 6000000 #milli second, content will be deleted from the cache after freshness period self.sendingFile(abs_file_path, interest, face, freshness) else: print "Interest name mismatch" def onRegisterFailed(self, prefix): print "Register failed for prefix", prefix.toUri() self.isDone = True def sendingFile(self, file_path, interest, face, freshness): print "Sending File Function" interestName = interest.getName() interestNameSize = interestName.size() try: SegmentNum = (interestName.get(interestNameSize - 1)).toSegment() dataName = interestName.getSubName(0, interestNameSize - 1) # If no segment number is included in the INTEREST, set the segment number as 0 and set the file name to configuration script to be sent except RuntimeError as e: SegmentNum = 0 dataName = interestName # Put file to the Data message try: # due to overhead of NDN name and other header values; NDN header overhead + Data packet content = < maxNdnPacketSize # So Here segment size is hard coded to 5000 KB. # Class Enumerate publisher is used to split large files into segments and get a required segment ( segment numbers started from 0) dataSegment, last_segment_num = EnumeratePublisher( file_path, self.Datamessage_size, SegmentNum).getFileSegment() print 'SegmentNum:%s last_segment_num: %s' % (SegmentNum, last_segment_num) # create the DATA name appending the segment number dataName = dataName.appendSegment(SegmentNum) data = Data(dataName) data.setContent(dataSegment) # set the final block ID to the last segment number last_segment = (Name.Component()).fromNumber(last_segment_num) data.getMetaInfo().setFinalBlockId(last_segment) #hourMilliseconds = 600 * 1000 data.getMetaInfo().setFreshnessPeriod(freshness) # currently Data is signed from the Default Identitiy certificate self.keyChain.sign(data, self.keyChain.getDefaultCertificateName()) # Sending Data message face.send(data.wireEncode().toBuffer()) print "Replied to Interest name: %s" % interestName.toUri() print "Replied with Data name: %s" % dataName.toUri() except ValueError as err: print "ERROR: %s" % err
class ServiceController(object): def __init__(self): # Register all published name prefixes prefix_serviceDiscovery = "/sm/service_discovery" self.prefix_serviceDiscovery = Name(prefix_serviceDiscovery) prefix_serviceRegistration = "/sm/service_registration" self.prefix_serviceRegistration = Name(prefix_serviceRegistration) prefix_serviceMigration = "/sm/service_migration" self.prefix_serviceMigration = Name(prefix_serviceMigration) self.prefix_serviceMigrationPush = "/sm/service_migration/push/" prefix_trigger = "/trigger" self.prefix_trigger = Name(prefix_trigger) self.Path = "/home/pi/carlos/SC_repository/" self.serviceName = "rpi-nano-httpd.tar" self.serviceInfo = { 'uhttpd.tar': { 'image_name': 'fnichol/uhttpd:latest', 'port_host': 8080, 'port_container': 80, 'component': ['ubuntu.tar', 'python.tar', 'java.tar'] }, 'httpd.tar': { 'image_name': 'httpd:latest', 'port_host': 8081, 'port_container': 80, 'component': ['debian.tar', 'python.tar', 'java.tar'] }, 'rpi-nano-httpd.tar': { 'image_name': 'hypriot/rpi-nano-httpd:latest', 'port_host': 8082, 'port_container': 80, 'component': ['debian.tar', 'python.tar', 'java.tar'] }, 'rpi-busybox-httpd.tar': { 'image_name': 'hypriot/rpi-nano-httpd:latest', 'port_host': 8083, 'port_container': 80, 'component': ['debian.tar', 'python.tar', 'java.tar'] } } # Create face to localhost, Default configuration from PyNDN self.outstanding = dict() self.isDone = False self.keyChain = KeyChain() self.face = Face("127.0.0.1") # Set the KeyChain and certificate name used to sign command interests (e.g. for registerPrefix). self.face.setCommandSigningInfo(self.keyChain, \ self.keyChain.getDefaultCertificateName()) # Register name prefixes to the face self.face.registerPrefix(self.prefix_serviceDiscovery, self.onInterest_serviceDiscovery, self.onRegisterFailed) print "Registering prefix : " + self.prefix_serviceDiscovery.toUri() self.face.registerPrefix(self.prefix_serviceRegistration, self.onInterest_serviceRegistration, self.onRegisterFailed) print "Registering prefix : " + self.prefix_serviceRegistration.toUri() self.face.registerPrefix(self.prefix_serviceMigration, self.onInterest_serviceMigration, self.onRegisterFailed) print "Registering prefix : " + self.prefix_serviceMigration.toUri() self.face.registerPrefix(self.prefix_trigger, self.onInterest_trigger, self.onRegisterFailed) print "Registering prefix : " + self.prefix_trigger.toUri() def onInterest_trigger(self, prefix, interest, face, interestFilterId, filter): interestName = interest.getName() print "Received Interest name: %s" % interestName.toUri() interest_name_components = interestName.toUri().split("/") serviceName = interest_name_components[2] nodeName = interest_name_components[3] prefix_serviceMigrationPushtoNode = self.prefix_serviceMigrationPush + nodeName + '/' + serviceName print "Sent Push Interest for %s" % prefix_serviceMigrationPushtoNode self.prefix_serviceMigrationPushtoNode = Name( prefix_serviceMigrationPushtoNode) self.sendPushInterest(self.prefix_serviceMigrationPushtoNode) def sendPushInterest(self, name): interest = Interest(name) uri = name.toUri() interest.setInterestLifetimeMilliseconds(4000) interest.setMustBeFresh(True) if uri not in self.outstanding: self.outstanding[uri] = 1 # self.face.expressInterest(interest, self.onData, self._onTimeout) self.face.expressInterest( interest, None, None ) ## set None --> sent out only, don't wait for Data and Timeout print "Sent Push-Interest for %s" % uri def onInterest_serviceDiscovery(self, prefix, interest, face, interestFilterId, filter): print "Processing Interest message:/SM/service_discovery" # send a list of available service def onInterest_serviceRegistration(self, prefix, interest, face, interestFilterId, filter): print "Processing Interest message:/SM/service_registration" # upload image file to repository # send ACK to service provider def onInterest_serviceMigration(self, prefix, interest, face, interestFilterId, filter): # Receive command from decision engine to migrate teh service to some specific location # Select image file from the repo # Send to the chosen SEG interestName = interest.getName() interestNameSize = interestName.size() print "Receive Interest message: %s , sending back DATA message" % interestName # try if the received INTEREST include a segment number. If include, extract the segment number and file requested. try: SegmentNum = (interestName.get(interestNameSize - 1)).toSegment() serviceName = (interestName.get(interestNameSize - 2)).toEscapedString() dataName = interestName.getSubName(0, interestNameSize - 1) # If no segment number is included in the INTEREST, set the segment number as 0 and set the file name to configuration script to be sent except RuntimeError as e: SegmentNum = 0 #dataName = (interestName.append(Name("install")).append(Name(self.serviceName))) dataName = interestName # TO BE USED WITH WEB INTERFACE filePath = self.Path + self.serviceName print filePath # Test in terminal # self.filePath = "config_script_tobe_uploaded/" + serviceName # get the practical limit of the size of a network-layer packet : 8800 bytes maxNdnPacketSize = self.face.getMaxNdnPacketSize() # print 'practical limit of the size of a network-layer packet :' + str(maxNdnPacketSize) try: # due to overhead of NDN name and other header values; NDN header overhead + Data packet content = < maxNdnPacketSize # So Here segment size is hard coded to 5000 KB. # Class Enumerate publisher is used to split large files into segments and get a required segment ( segment numbers started from 0) dataSegment, last_segment_num = EnumeratePublisher( filePath, 5000, SegmentNum).getFileSegment() # create the DATA name appending the segment number dataName = dataName.appendSegment(SegmentNum) data = Data(dataName) data.setContent(dataSegment) # set the final block ID to the last segment number last_segment = (Name.Component()).fromNumber(last_segment_num) data.getMetaInfo().setFinalBlockId(last_segment) hourMilliseconds = 600 * 1000 data.getMetaInfo().setFreshnessPeriod(hourMilliseconds) # currently Data is signed from the Default Identitiy certificate self.keyChain.sign(data, self.keyChain.getDefaultCertificateName()) face.send(data.wireEncode().toBuffer()) print "Replied to Interest name: %s" % interestName.toUri() print "Replied with Data name: %s" % dataName.toUri() # If configuration manager has sent the last segment of the file, script can be stopped. #if SegmentNum == last_segment_num: #self.isDone = True except ValueError as err: print "ERROR: %s" % err def onRegisterFailed(self, prefix, interest, face, interestFilterId, filter): print "Register failed for prefix", prefix.toUri() self.isDone = True def run(self): try: while not self.isDone: self.face.processEvents() time.sleep(0.01) except RuntimeError as e: print "ERROR: %s" % e
class SEG(object): def __init__(self, node_id): self.node_id = str(node_id) print "Node-name: %s" % self.node_id prefix_serviceMigrationPush = "/sm/service_migration/push/" + self.node_id self.prefix_serviceMigrationPush = Name(prefix_serviceMigrationPush) prefix_serviceMigration = "/sm/service_migration" self.prefix_serviceMigration = Name(prefix_serviceMigration) prefix_serviceMonitoring = "/sm/service_monitoring" + '/' + self.node_id self.prefix_serviceMonitoring = Name(prefix_serviceMonitoring) self.serviceInfo = { 'uhttpd.tar': { 'image_name': 'fnichol/uhttpd:latest', 'port_host': 8080, 'port_container': 80, 'component': ['ubuntu.tar', 'python.tar', 'java.tar'] }, 'httpd.tar': { 'image_name': 'httpd:latest', 'port_host': 8081, 'port_container': 80, 'component': ['debian.tar', 'python.tar', 'java.tar'] }, 'rpi-nano-httpd.tar': { 'image_name': 'hypriot/rpi-nano-httpd:latest', 'port_host': 8082, 'port_container': 80, 'component': ['debian.tar', 'python.tar', 'java.tar'] }, 'rpi-busybox-httpd.tar': { 'image_name': 'hypriot/rpi-nano-httpd:latest', 'port_host': 8083, 'port_container': 80, 'component': ['debian.tar', 'python.tar', 'java.tar'] } } self.pi_status = { 'PiID': '', 'hardResources': {}, 'softResources': { 'OS': 'Linux' }, 'resourceUsage': {}, 'containers': [] } # Default configuration of NDN self.outstanding = dict() self.isDone = False self.keyChain = KeyChain() self.face = Face("127.0.0.1") self.face.setCommandSigningInfo(self.keyChain, \ self.keyChain.getDefaultCertificateName()) self.face.registerPrefix(self.prefix_serviceMigrationPush, self.onInterest_serviceMigrationPush, self.onRegisterFailed) print "Registering prefix : " + self.prefix_serviceMigrationPush.toUri( ) self.face.registerPrefix(self.prefix_serviceMonitoring, self.onInterest_serviceMonitoring, self.onRegisterFailed) print "Registering prefix : " + self.prefix_serviceMonitoring.toUri() def run(self): try: # Run the event loop forever. Use a short sleep to prevent the Producer from using 100% of the CPU. while not self.isDone: self.face.processEvents() time.sleep(0.01) except RuntimeError as e: print "ERROR: %s" % e def onInterest_serviceMonitoring(self, prefix, interest, face, interestFilterId, filter): interestName = interest.getName() interestNameSize = interestName.size() interest_name_components = interestName.toUri().split("/") timeStamp = interest_name_components[ interest_name_components.index("service_monitoring") + 2] print "Receive Interest message name: %s" % interestName ##### V2 Data message is JSON file try: SegmentNum = (interestName.get(interestNameSize - 1)).toSegment() dataName = interestName.getSubName(0, interestNameSize - 1) # If no segment number is included in the INTEREST, set the segment number as 0 and set the file name to configuration script to be sent except RuntimeError as e: SegmentNum = 0 dataName = interestName # Put JSON file to the Data message jsonfileName = 'status' + '-' + self.node_id + '.json' print 'Monitoring FileName %s' % jsonfileName putgetfunc.put_PiID(self.pi_status, self.node_id) putgetfunc.put_hardResources_cpu( self.pi_status, "A 1.2GHz 64-bit quad-core ARMv8 CPU") putgetfunc.put_hardResources_mem(self.pi_status, "1GB") putgetfunc.put_hardResources_disk(self.pi_status, "16GB") putgetfunc.put_resourceUsage_cpu(self.pi_status, picheck.pi_cpuUsage()) putgetfunc.put_resourceUsage_cpuLoad(self.pi_status, picheck.pi_cpuLoad()) putgetfunc.put_resourceUsage_mem(self.pi_status, picheck.pi_memUsage()) self.pi_status = dockerctl.container_info(self.pi_status) putgetfunc.put_PiID(self.pi_status, self.node_id) putgetfunc.create_jsonFile(self.pi_status, jsonfileName) # get the practical limit of the size of a network-layer packet : 8800 bytes maxNdnPacketSize = self.face.getMaxNdnPacketSize() #print 'practical limit of the size of a network-layer packet :' + str(maxNdnPacketSize) try: # due to overhead of NDN name and other header values; NDN header overhead + Data packet content = < maxNdnPacketSize # So Here segment size is hard coded to 5000 KB. # Class Enumerate publisher is used to split large files into segments and get a required segment ( segment numbers started from 0) dataSegment, last_segment_num = EnumeratePublisher( jsonfileName, 8000, SegmentNum).getFileSegment() # create the DATA name appending the segment number dataName = dataName.appendSegment(SegmentNum) data = Data(dataName) data.setContent(dataSegment) # set the final block ID to the last segment number last_segment = (Name.Component()).fromNumber(last_segment_num) data.getMetaInfo().setFinalBlockId(last_segment) hourMilliseconds = 600 * 1000 data.getMetaInfo().setFreshnessPeriod(hourMilliseconds) # currently Data is signed from the Default Identitiy certificate self.keyChain.sign(data, self.keyChain.getDefaultCertificateName()) # Sending Data message face.send(data.wireEncode().toBuffer()) print "Replied to Interest name: %s" % interestName.toUri() print "Replied with Data name: %s" % dataName.toUri() # If configuration manager has sent the last segment of the file, script can be stopped. # if SegmentNum == last_segment_num: # self.isDone = True except ValueError as err: print "ERROR: %s" % err def onInterest_serviceMigrationPush(self, prefix, interest, face, interestFilterId, filter): # receive Push Interest from SC, send another Interest to start service migration interestName = interest.getName() #interestNameSize = interestName.size() print "Receive Interest message name: %s" % interestName interest_name_components = interestName.toUri().split("/") if "push" in interest_name_components: fileName = interest_name_components[ interest_name_components.index("push") + 2] print fileName ## check image is running or not #Ger info from serviceInfo docker_image_name = self.serviceInfo[fileName]['image_name'] docker_port_host = self.serviceInfo[fileName]['port_host'] docker_port_container = self.serviceInfo[fileName]['port_container'] print 'Check docker Image Name: %s ' % docker_image_name print 'Port Host: %d' % docker_port_host print 'Port Container %d' % docker_port_container if dockerctl.is_image_running(docker_image_name) == True: print 'Image: %s is already running' % docker_image_name else: ##image is not running ##check docker client has this image or not print 'Image: %s is NOT running' % docker_image_name if dockerctl.has_image(docker_image_name) == True: ## has image but image is not running print 'Image: %s is already stored' % docker_image_name if dockerctl.run_image(docker_image_name, docker_port_host, docker_port_container) == True: print 'Running docker image %s ...' % docker_image_name else: print 'Error: Cannot run image %s' % docker_image_name else: print 'Image: %s is not stored, pull from SC' % docker_image_name ### Call sendNextInterest to SC prefix.requestService = (self.prefix_serviceMigration.append( Name(fileName))) print 'Sending Interest message: %s' % prefix.requestService self.sendNextInterest(prefix.requestService) def sendNextInterest(self, name): interest = Interest(name) uri = name.toUri() interest.setInterestLifetimeMilliseconds(4000) interest.setMustBeFresh(True) if uri not in self.outstanding: self.outstanding[uri] = 1 self.face.expressInterest(interest, self.onData, self.onTimeout) print "Sent Interest for %s" % uri def onData(self, interest, data): # payload = data.getContent() # name = data.getName() # print "Received data: ", payload.toRawStr() # del self.outstanding[name.toUri()] # self.isDone = Truepayload = data.getContent() payload = data.getContent() dataName = data.getName() dataName_size = dataName.size() print "Received data name: ", dataName.toUri() data_name_components = dataName.toUri().split("/") # Check any Configuration script is in the Data...If so Data name include "install" Keyword and name component next to "install" is the configuration script name #if "install" in data_name_components: if "service_migration" in data_name_components: #fileName = data_name_components[data_name_components.index("install") + 1] fileName = data_name_components[ data_name_components.index("service_migration") + 1] # Write the configuration script in the desired location in append mode path = "/home/pi/SM_NDN/SEG_repository" if not os.path.exists(path): os.makedirs(path) with open(os.path.join(path, fileName), 'ab') as temp_file: temp_file.write(payload.toRawStr()) # if recieved Data is a segment of the configuration script, then need to fetch remaing segments # try if segment number is existed in Data Name try: dataSegmentNum = (dataName.get(dataName_size - 1)).toSegment() lastSegmentNum = ( data.getMetaInfo().getFinalBlockId()).toNumber() print "dataSegmentNum" + str(dataSegmentNum) print "lastSegmentNum" + str(lastSegmentNum) # If segment number is available and what have recieved is not the FINAL_BLOCK, then fetch the NEXT segment if lastSegmentNum != dataSegmentNum: interestName = dataName.getSubName(0, dataName_size - 1) interestName = interestName.appendSegment(dataSegmentNum + 1) self.sendNextInterest(interestName) # If segment number is available and what have recieved is the FINAL_BLOCK, then EXECUTE the configuration script ### Recieve all chunks of data --> Execute it here if lastSegmentNum == dataSegmentNum: print "Received complete image: %s, EXECUTED !!!!" % fileName #subprocess.call("python " + path + "/" + fileName, shell=True) docker_image_name = self.serviceInfo[fileName][ 'image_name'] docker_port_host = self.serviceInfo[fileName]['port_host'] docker_port_container = self.serviceInfo[fileName][ 'port_container'] dockerctl.load_image(docker_image_name) if dockerctl.run_image(docker_image_name, docker_port_host, docker_port_container) == True: print 'Running docker image %s ...' % docker_image_name else: print 'Error: Cannot run image %s' % docker_image_name # forward_request(webserver, port, s, data) self.isDone = True # If Configuration Manager has sent a file with 'install' key word, but no segment number is available, that DATA packet is invalid. Then just do nothing and exist the program except RuntimeError as e: print "ERROR: %s" % e self.isDone = True currentInterestName = interest.getName() # Delete the Interest name from outstanding INTEREST dict as reply DATA has been received. del self.outstanding[currentInterestName.toUri()] def onRegisterFailed(self, prefix): print "Register failed for prefix", prefix.toUri() self.isDone = True def onTimeout(self, interest): name = interest.getName() uri = name.toUri() print "TIMEOUT #%d: %s" % (self.outstanding[uri], uri) self.outstanding[uri] += 1 if self.outstanding[uri] <= 3: self.sendNextInterest(name) else: self.isDone = True