Example #1
0
    def _update_execute_xml(self):
        # For all testbeds,
        #   For all elements in testbed,
        #       - gather immutable execute-readable attribuets lists
        #         asynchronously
        # Generate new design description from design xml
        # (Wait for attributes lists - implicit syncpoint)
        # For all testbeds,
        #   For all elements in testbed,
        #       - gather all immutable execute-readable attribute
        #         values, asynchronously
        # (Wait for attribute values - implicit syncpoint)
        # For all testbeds,
        #   For all elements in testbed,
        #       - inject non-None values into new design
        # Generate execute xml from new design

        attribute_lists = dict(
            (testbed_guid, collections.defaultdict(dict))
            for testbed_guid in self._testbeds
        )
        
        for testbed_guid, testbed in self._testbeds.iteritems():
            guids = self._guids_in_testbed(testbed_guid)
            for guid in guids:
                attribute_lists[testbed_guid][guid] = \
                    testbed.get_attribute_list_deferred(guid, Attribute.ExecImmutable)
        
        parser = XmlExperimentParser()
        execute_data = parser.from_xml_to_data(self._experiment_design_xml)

        attribute_values = dict(
            (testbed_guid, collections.defaultdict(dict))
            for testbed_guid in self._testbeds
        )
        
        for testbed_guid, testbed_attribute_lists in attribute_lists.iteritems():
            testbed = self._testbeds[testbed_guid]
            for guid, attribute_list in testbed_attribute_lists.iteritems():
                attribute_list = _undefer(attribute_list)
                attribute_values[testbed_guid][guid] = dict(
                    (attribute, testbed.get_deferred(guid, attribute))
                    for attribute in attribute_list
                )
        
        for testbed_guid, testbed_attribute_values in attribute_values.iteritems():
            for guid, attribute_values in testbed_attribute_values.iteritems():
                for attribute, value in attribute_values.iteritems():
                    value = _undefer(value)
                    if value is not None:
                        execute_data.add_attribute_data(guid, attribute, value)
        
        self._experiment_execute_xml = parser.to_xml(data=execute_data)
Example #2
0
    def _update_execute_xml(self):
        # For all testbeds,
        #   For all elements in testbed,
        #       - gather immutable execute-readable attribuets lists
        #         asynchronously
        # Generate new design description from design xml
        # (Wait for attributes lists - implicit syncpoint)
        # For all testbeds,
        #   For all elements in testbed,
        #       - gather all immutable execute-readable attribute
        #         values, asynchronously
        # (Wait for attribute values - implicit syncpoint)
        # For all testbeds,
        #   For all elements in testbed,
        #       - inject non-None values into new design
        # Generate execute xml from new design

        attribute_lists = dict((testbed_guid, collections.defaultdict(dict))
                               for testbed_guid in self._testbeds)

        for testbed_guid, testbed in self._testbeds.iteritems():
            guids = self._guids_in_testbed(testbed_guid)
            for guid in guids:
                attribute_lists[testbed_guid][guid] = \
                    testbed.get_attribute_list_deferred(guid, Attribute.ExecImmutable)

        parser = XmlExperimentParser()
        execute_data = parser.from_xml_to_data(self._experiment_design_xml)

        attribute_values = dict((testbed_guid, collections.defaultdict(dict))
                                for testbed_guid in self._testbeds)

        for testbed_guid, testbed_attribute_lists in attribute_lists.iteritems(
        ):
            testbed = self._testbeds[testbed_guid]
            for guid, attribute_list in testbed_attribute_lists.iteritems():
                attribute_list = _undefer(attribute_list)
                attribute_values[testbed_guid][guid] = dict(
                    (attribute, testbed.get_deferred(guid, attribute))
                    for attribute in attribute_list)

        for testbed_guid, testbed_attribute_values in attribute_values.iteritems(
        ):
            for guid, attribute_values in testbed_attribute_values.iteritems():
                for attribute, value in attribute_values.iteritems():
                    value = _undefer(value)
                    if value is not None:
                        execute_data.add_attribute_data(guid, attribute, value)

        self._experiment_execute_xml = parser.to_xml(data=execute_data)
Example #3
0
 def from_xml(self, xml):
     parser = XmlExperimentParser()
     parser.from_xml(self, xml)
Example #4
0
 def to_xml(self):
     parser = XmlExperimentParser()
     return parser.to_xml(self)
Example #5
0
 def _testbed_recovery_policy(self, guid, data = None):
     if data is None:
         parser = XmlExperimentParser()
         data = parser.from_xml_to_data(self._experiment_design_xml)
     
     return data.get_attribute_data(guid, DC.RECOVERY_POLICY)
Example #6
0
    def _start(self, recover = False):
        parser = XmlExperimentParser()
        
        if recover:
            xml = self._experiment_execute_xml
        else:
            xml = self._experiment_design_xml
        data = parser.from_xml_to_data(xml)

        # instantiate testbed controllers
        to_recover, to_restart = self._init_testbed_controllers(data, recover)
        all_restart = set(to_restart)
        
        if not recover:
            # persist testbed connection data, for potential recovery
            self._persist_testbed_proxies()
        else:
            # recover recoverable controllers
            for guid in to_recover:
                try:
                    self._testbeds[guid].do_setup()
                    self._testbeds[guid].recover()
                except:
                    self._logger.exception("During recovery of testbed %s", guid)
                    
                    # Mark failed
                    self._failed_testbeds.add(guid)
    
        def steps_to_configure(self, allowed_guids):
            # perform setup in parallel for all test beds,
            # wait for all threads to finish

            self._logger.debug("ExperimentController: Starting parallel do_setup")
            self._parallel([testbed.do_setup 
                            for guid,testbed in self._testbeds.iteritems()
                            if guid in allowed_guids])
       
            # perform create-connect in parallel, wait
            # (internal connections only)
            self._logger.debug("ExperimentController: Starting parallel do_create")
            self._parallel([testbed.do_create
                            for guid,testbed in self._testbeds.iteritems()
                            if guid in allowed_guids])

            self._logger.debug("ExperimentController: Starting parallel do_connect_init")
            self._parallel([testbed.do_connect_init
                            for guid,testbed in self._testbeds.iteritems()
                            if guid in allowed_guids])

            self._logger.debug("ExperimentController: Starting parallel do_connect_fin")
            self._parallel([testbed.do_connect_compl
                            for guid,testbed in self._testbeds.iteritems()
                            if guid in allowed_guids])

            self._logger.debug("ExperimentController: Starting parallel do_preconfigure")
            self._parallel([testbed.do_preconfigure
                            for guid,testbed in self._testbeds.iteritems()
                            if guid in allowed_guids])
            self._clear_caches()
            
            # Store testbed order
            self._testbed_order.append(allowed_guids)

        steps_to_configure(self, to_restart)

        if self._netreffed_testbeds:
            self._logger.debug("ExperimentController: Resolving netreffed testbeds")
            # initally resolve netrefs
            self.do_netrefs(data, fail_if_undefined=False)
            
            # rinse and repeat, for netreffed testbeds
            netreffed_testbeds = set(self._netreffed_testbeds)

            to_recover, to_restart = self._init_testbed_controllers(data, recover)
            all_restart.update(to_restart)
            
            if not recover:
                # persist testbed connection data, for potential recovery
                self._persist_testbed_proxies()
            else:
                # recover recoverable controllers
                for guid in to_recover:
                    try:
                        self._testbeds[guid].do_setup()
                        self._testbeds[guid].recover()
                    except:
                        self._logger.exception("During recovery of testbed %s", guid)

                        # Mark failed
                        self._failed_testbeds.add(guid)

            # configure dependant testbeds
            steps_to_configure(self, to_restart)
        
        all_restart = [ self._testbeds[guid] for guid in all_restart ]
            
        # final netref step, fail if anything's left unresolved
        self._logger.debug("ExperimentController: Resolving do_netrefs")
        self.do_netrefs(data, fail_if_undefined=False)
       
        # Only now, that netref dependencies have been solve, it is safe to
        # program cross_connections
        self._logger.debug("ExperimentController: Programming testbed cross-connections")
        self._program_testbed_cross_connections(data)
 
        # perform do_configure in parallel for al testbeds
        # (it's internal configuration for each)
        self._logger.debug("ExperimentController: Starting parallel do_configure")
        self._parallel([testbed.do_configure
                        for testbed in all_restart])

        self._clear_caches()

        #print >>sys.stderr, "DO IT"
        #import time
        #time.sleep(60)
        
        # cross-connect (cannot be done in parallel)
        self._logger.debug("ExperimentController: Starting cross-connect")
        for guid, testbed in self._testbeds.iteritems():
            cross_data = self._get_cross_data(guid)
            testbed.do_cross_connect_init(cross_data)
        for guid, testbed in self._testbeds.iteritems():
            cross_data = self._get_cross_data(guid)
            testbed.do_cross_connect_compl(cross_data)
       
        self._clear_caches()

        # Last chance to configure (parallel on all testbeds)
        self._logger.debug("ExperimentController: Starting parallel do_prestart")
        self._parallel([testbed.do_prestart
                        for testbed in all_restart])

        # final netref step, fail if anything's left unresolved
        self.do_netrefs(data, fail_if_undefined=True)
 
        self._clear_caches()
        
        if not recover:
            # update execution xml with execution-specific values
            # TODO: BUG! BUggy code! cant stand all serializing all attribute values (ej: tun_key which is non ascci)"
            self._update_execute_xml()
            self.persist_execute_xml()

        # start experiment (parallel start on all testbeds)
        self._logger.debug("ExperimentController: Starting parallel do_start")
        self._parallel([testbed.start
                        for testbed in all_restart])

        self._clear_caches()
Example #7
0
    def _testbed_recovery_policy(self, guid, data=None):
        if data is None:
            parser = XmlExperimentParser()
            data = parser.from_xml_to_data(self._experiment_design_xml)

        return data.get_attribute_data(guid, DC.RECOVERY_POLICY)
Example #8
0
    def _start(self, recover=False):
        parser = XmlExperimentParser()

        if recover:
            xml = self._experiment_execute_xml
        else:
            xml = self._experiment_design_xml
        data = parser.from_xml_to_data(xml)

        # instantiate testbed controllers
        to_recover, to_restart = self._init_testbed_controllers(data, recover)
        all_restart = set(to_restart)

        if not recover:
            # persist testbed connection data, for potential recovery
            self._persist_testbed_proxies()
        else:
            # recover recoverable controllers
            for guid in to_recover:
                try:
                    self._testbeds[guid].do_setup()
                    self._testbeds[guid].recover()
                except:
                    self._logger.exception("During recovery of testbed %s",
                                           guid)

                    # Mark failed
                    self._failed_testbeds.add(guid)

        def steps_to_configure(self, allowed_guids):
            # perform setup in parallel for all test beds,
            # wait for all threads to finish

            self._logger.debug(
                "ExperimentController: Starting parallel do_setup")
            self._parallel([
                testbed.do_setup
                for guid, testbed in self._testbeds.iteritems()
                if guid in allowed_guids
            ])

            # perform create-connect in parallel, wait
            # (internal connections only)
            self._logger.debug(
                "ExperimentController: Starting parallel do_create")
            self._parallel([
                testbed.do_create
                for guid, testbed in self._testbeds.iteritems()
                if guid in allowed_guids
            ])

            self._logger.debug(
                "ExperimentController: Starting parallel do_connect_init")
            self._parallel([
                testbed.do_connect_init
                for guid, testbed in self._testbeds.iteritems()
                if guid in allowed_guids
            ])

            self._logger.debug(
                "ExperimentController: Starting parallel do_connect_fin")
            self._parallel([
                testbed.do_connect_compl
                for guid, testbed in self._testbeds.iteritems()
                if guid in allowed_guids
            ])

            self._logger.debug(
                "ExperimentController: Starting parallel do_preconfigure")
            self._parallel([
                testbed.do_preconfigure
                for guid, testbed in self._testbeds.iteritems()
                if guid in allowed_guids
            ])
            self._clear_caches()

            # Store testbed order
            self._testbed_order.append(allowed_guids)

        steps_to_configure(self, to_restart)

        if self._netreffed_testbeds:
            self._logger.debug(
                "ExperimentController: Resolving netreffed testbeds")
            # initally resolve netrefs
            self.do_netrefs(data, fail_if_undefined=False)

            # rinse and repeat, for netreffed testbeds
            netreffed_testbeds = set(self._netreffed_testbeds)

            to_recover, to_restart = self._init_testbed_controllers(
                data, recover)
            all_restart.update(to_restart)

            if not recover:
                # persist testbed connection data, for potential recovery
                self._persist_testbed_proxies()
            else:
                # recover recoverable controllers
                for guid in to_recover:
                    try:
                        self._testbeds[guid].do_setup()
                        self._testbeds[guid].recover()
                    except:
                        self._logger.exception("During recovery of testbed %s",
                                               guid)

                        # Mark failed
                        self._failed_testbeds.add(guid)

            # configure dependant testbeds
            steps_to_configure(self, to_restart)

        all_restart = [self._testbeds[guid] for guid in all_restart]

        # final netref step, fail if anything's left unresolved
        self._logger.debug("ExperimentController: Resolving do_netrefs")
        self.do_netrefs(data, fail_if_undefined=False)

        # Only now, that netref dependencies have been solve, it is safe to
        # program cross_connections
        self._logger.debug(
            "ExperimentController: Programming testbed cross-connections")
        self._program_testbed_cross_connections(data)

        # perform do_configure in parallel for al testbeds
        # (it's internal configuration for each)
        self._logger.debug(
            "ExperimentController: Starting parallel do_configure")
        self._parallel([testbed.do_configure for testbed in all_restart])

        self._clear_caches()

        #print >>sys.stderr, "DO IT"
        #import time
        #time.sleep(60)

        # cross-connect (cannot be done in parallel)
        self._logger.debug("ExperimentController: Starting cross-connect")
        for guid, testbed in self._testbeds.iteritems():
            cross_data = self._get_cross_data(guid)
            testbed.do_cross_connect_init(cross_data)
        for guid, testbed in self._testbeds.iteritems():
            cross_data = self._get_cross_data(guid)
            testbed.do_cross_connect_compl(cross_data)

        self._clear_caches()

        # Last chance to configure (parallel on all testbeds)
        self._logger.debug(
            "ExperimentController: Starting parallel do_prestart")
        self._parallel([testbed.do_prestart for testbed in all_restart])

        # final netref step, fail if anything's left unresolved
        self.do_netrefs(data, fail_if_undefined=True)

        self._clear_caches()

        if not recover:
            # update execution xml with execution-specific values
            # TODO: BUG! BUggy code! cant stand all serializing all attribute values (ej: tun_key which is non ascci)"
            self._update_execute_xml()
            self.persist_execute_xml()

        # start experiment (parallel start on all testbeds)
        self._logger.debug("ExperimentController: Starting parallel do_start")
        self._parallel([testbed.start for testbed in all_restart])

        self._clear_caches()