예제 #1
0
    def __init__(self):
        """Constructor for the server wrapper."""
        #self._app = Flask(__name__) # imports the named package, in this case this file
        # Imports the named module (package includes "." and this is not nice with PyMongo)
        self.config = ConfParser("flask.conf")
        self.general_section = self.config.get("general")
        self.template_folder = self.general_section.get("template_folder")
        self.fcgi_section = self.config.get("fcgi")
        self.certificates_section = self.config.get("certificates")
        self._app = Flask(__name__.split(".")[-1],
                          template_folder=self.template_folder)
        self._mongo = PyMongo(self._app)
        # Added in order to be able to execute "before_request" method
        app = self._app

        # Setup debugging for app
        cDebug = self.general_section.get("debug")
        if cDebug:  # log all actions on the XML-RPC interface

            def log_request(sender, **extra):
                logger.info(">>> REQUEST %s:\n%s" %
                            (request.path, request.data))

            request_started.connect(log_request, self._app)

            def log_response(sender, response, **extra):
                logger.info(">>> RESPONSE %s:\n%s" %
                            (response.status, response.data))

            request_finished.connect(log_response, self._app)

        @app.before_request
        def before_request():
            # "Attach" objects within the "g" object. This is passed to each view method
            g.mongo = self._mongo
예제 #2
0
 def __init__(self):
     super(GENIv3Delegate, self).__init__()
     self._resource_manager = rm_adaptor
     self._allowed_peers = AllowedPeers.get_peers()
     self._mro_enabled =\
         ast.literal_eval(ConfParser("ro.conf").get("master_ro").
                          get("mro_enabled"))
     self._interdomain_available_to_user =\
         ast.literal_eval(ConfParser("ro.conf").get("resources").
                          get("interdomain_available_to_user"))
예제 #3
0
 def __init__(self):
     super(SliceMonitoring, self).__init__()
     ms = ConfParser("ro.conf").get("monitoring")
     self.__ms_url = "%s://%s:%s%s" %\
         (ms.get("protocol"), ms.get("address"),
          ms.get("port"), ms.get("endpoint"))
     self.__topologies = etree.Element("topology_list")
     self.__stored = {}
     self.__mapping_c_interface = {}
     # self.__sdn_links = []
     self.__se_links = []
     self.__tn_links = []
     self.__hybrid_links = []
예제 #4
0
 def __init__(self):
     super(BaseMonitoring, self).__init__()
     self.peers = [p for p in db_sync_manager.get_configured_peers()]
     self.peers_info = [p for p in db_sync_manager.get_info_peers()]
     self.domain_urn = ""
     self.domain_last_update = ""
     self.topology_list = etree.Element("topology_list")
     self.topology = etree.SubElement(self.topology_list, "topology")
     ## Operation mode
     self.config = ConfParser("ro.conf")
     master_ro = self.config.get("master_ro")
     self.mro_enabled = ast.literal_eval(master_ro.get("mro_enabled"))
     self.software_stacks = {
         "ofelia": "ocf",
         "felix": "fms",
     }
     self.urn_type_resources = {
         "crm": "vtam",
         "sdnrm": "openflow",
         "serm": "se",
         "tnrm": "tn",
     }
     self.urn_type_resources_variations = {
         "crm": ["vtam"],
         "sdnrm": ["openflow", "ofam"],
         "serm": ["se"],
         "tnrm": ["tn", "NSI"],
     }
     self.management_type_resources = {
         "crm": "server",
         "sdnrm": "switch",
         "serm": "se",
     }
     self.monitoring_expected_nodes = {
         "crm": "",
         "sdnrm": "switch",
         "serm": "se",
         "tnrm": "tn",
     }
     self.peers_by_domain = {}
     self.__group_peers_by_domain()
     ## Configurations
     # CRM config
     self.config_crm = core.config.JSONParser("crm.json")
     self.crm_mgmt_info = self.config_crm.get("device_management_info")
     # SDNRM config
     self.config_sdnrm = core.config.JSONParser("sdnrm.json")
     self.sdnrm_mgmt_info = self.config_sdnrm.get("device_management_info")
     # SERM config
     self.config_serm = core.config.JSONParser("serm.json")
     self.serm_mgmt_info = self.config_serm.get("device_management_info")
예제 #5
0
 def __init__(self):
     super(MonitoringManager, self).__init__("monitoring")
     self.config = ConfParser("ro.conf")
     self.monitoring_section = self.config.get("monitoring")
     self.protocol = self.monitoring_section.get("protocol")
     self.address = self.monitoring_section.get("address")
     self.port = self.monitoring_section.get("port")
     self.endpoint = self.monitoring_section.get("endpoint")
     self.monitoring_server = {
         "protocol": self.protocol,
         "address": self.address,
         "port": self.port,
         "endpoint": self.endpoint,
     }
예제 #6
0
 def __init__(self):
     GenericCommand.__init__(self)
     self.type_ = None
     self.addr_ = None
     self.port_ = None
     self.protocol_ = None
     self.endpoint_ = None
     self.user_ = None
     self.password_ = None
     self.am_type_ = None
     self.am_version_ = None
     self.config = ConfParser("ro.conf")
     self.master_ro = self.config.get("master_ro")
     self.mro_enabled = ast.literal_eval(self.master_ro.get("mro_enabled"))
예제 #7
0
class MonitoringManager(ResourceDetector):
    """
    Periodically communicates both physical and slice data to the MS.
    """

    def __init__(self):
        super(MonitoringManager, self).__init__("monitoring")
        self.config = ConfParser("ro.conf")
        self.monitoring_section = self.config.get("monitoring")
        self.protocol = self.monitoring_section.get("protocol")
        self.address = self.monitoring_section.get("address")
        self.port = self.monitoring_section.get("port")
        self.endpoint = self.monitoring_section.get("endpoint")
        self.monitoring_server = {"protocol": self.protocol,
                                    "address": self.address,
                                    "port": self.port,
                                    "endpoint": self.endpoint,
                                }

    def physical_topology(self):
        try:
            return PhysicalMonitoring().send_topology(self.monitoring_server)
        except Exception as e:
            logger.error("Physical topology - Could not send topology. Details: %s" % e)
            logger.error(traceback.format_exc())
    def slice_topology(self):
        return SliceMonitoring().send_topology(self.monitoring_server)
예제 #8
0
    def __init__(self):
        """Constructor for the server wrapper."""
        #self._app = Flask(__name__) # imports the named package, in this case this file
        # Imports the named module (package includes "." and this is not nice with PyMongo)
        self.config = ConfParser("flask.conf")
        self.general_section = self.config.get("general")
        self.template_folder = self.general_section.get("template_folder")
        self.fcgi_section = self.config.get("fcgi")
        self.certificates_section = self.config.get("certificates")
        self._app = Flask(__name__.split(".")[-1], template_folder = self.template_folder)
        self._mongo = PyMongo(self._app)
        # Added in order to be able to execute "before_request" method
        app = self._app

        # Setup debugging for app
        cDebug = self.general_section.get("debug")
        if cDebug: # log all actions on the XML-RPC interface
            def log_request(sender, **extra):
                logger.info(">>> REQUEST %s:\n%s" % (request.path, request.data))
            request_started.connect(log_request, self._app)
            def log_response(sender, response, **extra):
                logger.info(">>> RESPONSE %s:\n%s" % (response.status, response.data))
            request_finished.connect(log_response, self._app)

        @app.before_request
        def before_request():
            # "Attach" objects within the "g" object. This is passed to each view method
            g.mongo = self._mongo
예제 #9
0
class MonitoringManager(ResourceDetector):
    """
    Periodically communicates both physical and slice data to the MS.
    """
    def __init__(self):
        super(MonitoringManager, self).__init__("monitoring")
        self.config = ConfParser("ro.conf")
        self.monitoring_section = self.config.get("monitoring")
        self.protocol = self.monitoring_section.get("protocol")
        self.address = self.monitoring_section.get("address")
        self.port = self.monitoring_section.get("port")
        self.endpoint = self.monitoring_section.get("endpoint")
        self.monitoring_server = {
            "protocol": self.protocol,
            "address": self.address,
            "port": self.port,
            "endpoint": self.endpoint,
        }

    def physical_topology(self):
        try:
            return PhysicalMonitoring().send_topology(self.monitoring_server)
        except Exception as e:
            logger.error(
                "Physical topology - Could not send topology. Details: %s" % e)
            logger.error(traceback.format_exc())

    def slice_topology(self):
        return SliceMonitoring().send_topology(self.monitoring_server)
예제 #10
0
 def __init__(self):
     super(GENIv3Handler, self).__init__(logger)
     self._delegate = None
     self._verify_users =\
         ast.literal_eval(ConfParser("auth.conf").get("certificates").
                          get("verify_users"))
     self.__credential_manager = GCFCredentialManager()
예제 #11
0
    def __init__(self, path=None):
        super(GENIv3Delegate, self).__init__()

        self.SEResources = SEConfigurator.seConfigurator()

        self.SESlices = seSlicesWithSlivers()
        self._verify_users =\
            ast.literal_eval(ConfParser("geniv3.conf").get("certificates").get("verify_users"))
예제 #12
0
 def __init__(self):
     self.__abs_path = dirname(dirname(dirname(abspath(__file__))))
     self.__trusted_certs =\
         abspath(join(self.__abs_path, ast.literal_eval(ConfParser("auth.conf").\
                          get("certificates").get("cert_root"))))
     self.__root_cert = join(dirname(self.__trusted_certs), "server.crt")
     self.__root_cert = open(self.__root_cert, "r").read()
     self.__auth = CredentialVerifier(self.__trusted_certs)
     self.__define_config_object()
     self.__auth = CredentialVerifier(self.__config.TRUSTED_ROOTS_DIR)
     self.SFA_CREDENTIAL_TYPE = "geni_sfa"
예제 #13
0
    def __init__(self):
        """
        Constructor of the service.
        """
        self.config = ConfParser("ro.conf")
        self.scheduler = self.config.get("scheduler")
        interval = int(self.scheduler.get("frequency"))
        master_ro = self.config.get("master_ro")
        mro_enabled = ast.literal_eval(master_ro.get("mro_enabled"))
        db_name = "felix_ro"
        if mro_enabled:
            db_name = "felix_mro"
        global ro_scheduler
        ro_scheduler = BackgroundScheduler()
        ro_scheduler.add_jobstore(
            MongoDBJobStore(database=db_name, collection="scheduler.jobs"))
        ro_scheduler.start()

        super(ROSchedulerService, self).__init__(
            "ROSchedulerService", interval)
        self.first_time = True
예제 #14
0
 def __init__(self):
     super(MonitoringManager, self).__init__("monitoring")
     self.config = ConfParser("ro.conf")
     self.monitoring_section = self.config.get("monitoring")
     self.protocol = self.monitoring_section.get("protocol")
     self.address = self.monitoring_section.get("address")
     self.port = self.monitoring_section.get("port")
     self.endpoint = self.monitoring_section.get("endpoint")
     self.monitoring_server = {"protocol": self.protocol,
                                 "address": self.address,
                                 "port": self.port,
                                 "endpoint": self.endpoint,
                             }
예제 #15
0
 def __init__(self):
     super(BaseMonitoring, self).__init__()
     self.peers = [p for p in db_sync_manager.get_configured_peers()]
     self.peers_info = [p for p in db_sync_manager.get_info_peers()]
     self.domain_urn = ""
     self.domain_last_update = ""
     self.topology_list = etree.Element("topology_list")
     self.topology = etree.SubElement(self.topology_list, "topology")
     ## Operation mode
     self.config = ConfParser("ro.conf")
     master_ro = self.config.get("master_ro")
     self.mro_enabled = ast.literal_eval(master_ro.get("mro_enabled"))
     self.software_stacks = {
                             "ofelia": "ocf",
                             "felix": "fms",
                             }
     self.urn_type_resources = {
                             "crm": "vtam",
                             "sdnrm": "openflow",
                             "serm": "se",
                             "tnrm": "tn",
                             }
     self.urn_type_resources_variations = {
                             "crm": ["vtam"],
                             "sdnrm": ["openflow", "ofam"],
                             "serm": ["se"],
                             "tnrm": ["tn", "NSI"],
                             }
     self.management_type_resources = {
                             "crm": "server",
                             "sdnrm": "switch",
                             "serm": "se",
                             }
     self.monitoring_expected_nodes = {
                             "crm": "",
                             "sdnrm": "switch",
                             "serm": "se",
                             "tnrm": "tn",
                             }
     self.peers_by_domain = {}
     self.__group_peers_by_domain()
     ## Configurations
     # CRM config
     self.config_crm = core.config.JSONParser("crm.json")
     self.crm_mgmt_info = self.config_crm.get("device_management_info")
     # SDNRM config
     self.config_sdnrm = core.config.JSONParser("sdnrm.json")
     self.sdnrm_mgmt_info = self.config_sdnrm.get("device_management_info")
     # SERM config
     self.config_serm = core.config.JSONParser("serm.json")
     self.serm_mgmt_info = self.config_serm.get("device_management_info")
예제 #16
0
파일: manage.py 프로젝트: HalasNet/felix
 def __init__(self):
     GenericCommand.__init__(self)
     self.type_ = None
     self.addr_ = None
     self.port_ = None
     self.protocol_ = None
     self.endpoint_ = None
     self.user_ = None
     self.password_ = None
     self.am_type_ = None
     self.am_version_ = None
     self.config = ConfParser("ro.conf")
     self.master_ro = self.config.get("master_ro")
     self.mro_enabled = ast.literal_eval(self.master_ro.get("mro_enabled"))
예제 #17
0
    def __init__(self):
        """
        Constructor of the service.
        """
        self.config = ConfParser("ro.conf")
        master_ro = self.config.get("master_ro")
        self.mro_enabled = ast.literal_eval(master_ro.get("mro_enabled"))

        self.TABLES = {
            "domain.routing": self.__get_table("domain.routing"),
            "resource.com.node": self.__get_table("resource.com.node"),
            "resource.com.link": self.__get_table("resource.com.link"),
            "resource.of.node": self.__get_table("resource.of.node"),
            "resource.of.link": self.__get_table("resource.of.link"),
            "resource.se.link": self.__get_table("resource.se.link"),
            "resource.se.node": self.__get_table("resource.se.node"),
            "resource.tn.link": self.__get_table("resource.tn.link"),
            "resource.tn.node": self.__get_table("resource.tn.node"),
            "topology.slice": self.__get_table("topology.slice"),
            "topology.slice.sdn": self.__get_table("topology.slice.sdn"),
            "scheduler.jobs": self.__get_table("scheduler.jobs"),
            "domain.info": self.__get_table("domain.info"),
            "topology.physical": self.__get_table("topology.physical"),
        }
예제 #18
0
 def __init__(self):
     super(SliceMonitoring, self).__init__()
     ms = ConfParser("ro.conf").get("monitoring")
     self.__ms_url = "%s://%s:%s%s" %\
         (ms.get("protocol"), ms.get("address"),
          ms.get("port"), ms.get("endpoint"))
     self.__topologies = etree.Element("topology_list")
     self.__stored = {}
     self.__mapping_c_interface = {}
     #self.__sdn_links = []
     self.__se_links = []
     self.__tn_links = []
     self.__hybrid_links = []
예제 #19
0
파일: action_db.py 프로젝트: HalasNet/felix
    def __init__(self):
        """
        Constructor of the service.
        """
        self.config = ConfParser("ro.conf")
        master_ro = self.config.get("master_ro")
        self.mro_enabled = ast.literal_eval(master_ro.get("mro_enabled"))

        self.TABLES = {
            "domain.routing":
                self.__get_table("domain.routing"),
            "resource.com.node":
                self.__get_table("resource.com.node"),
            "resource.com.link":
                self.__get_table("resource.com.link"),
            "resource.of.node":
                self.__get_table("resource.of.node"),
            "resource.of.link":
                self.__get_table("resource.of.link"),
            "resource.se.link":
                self.__get_table("resource.se.link"),
            "resource.se.node":
                self.__get_table("resource.se.node"),
            "resource.tn.link":
                self.__get_table("resource.tn.link"),
            "resource.tn.node":
                self.__get_table("resource.tn.node"),
            "topology.slice":
                self.__get_table("topology.slice"),
            "topology.slice.sdn":
                self.__get_table("topology.slice.sdn"),
            "scheduler.jobs":
                self.__get_table("scheduler.jobs"),
            "domain.info":
                self.__get_table("domain.info"),
            "topology.physical":
                self.__get_table("topology.physical"),
        }
예제 #20
0
class ROSchedulerService(Service):
    def __init__(self):
        """
        Constructor of the service.
        """
        self.config = ConfParser("ro.conf")
        self.scheduler = self.config.get("scheduler")
        interval = int(self.scheduler.get("frequency"))
        master_ro = self.config.get("master_ro")
        mro_enabled = ast.literal_eval(master_ro.get("mro_enabled"))
        self.tnrm_refresh = self.config.get("tnrm").get("refresh_timeout")
        db_name = "felix_ro"
        if mro_enabled:
            db_name = "felix_mro"
        global ro_scheduler
        ro_scheduler = BackgroundScheduler()
        ro_scheduler.add_jobstore(
            MongoDBJobStore(database=db_name, collection="scheduler.jobs"))
        ro_scheduler.start()

        super(ROSchedulerService, self).__init__("ROSchedulerService",
                                                 interval)
        self.first_time = True

    @staticmethod
    def get_scheduler():
        return ro_scheduler

    def do_action(self):
        jobs = ro_scheduler.get_jobs()
        logger.debug("Scheduled Jobs=%s" % (jobs, ))

        if self.first_time:
            self.__oneshot_jobs()
            self.__cron_jobs()
            self.first_time = False

            if self.tnrm_refresh:
                self.__add_interval_tn_refresh(self.tnrm_refresh)

    def stop(self):
        # remove the stored jobs!
        self.__remove(ro_scheduler.get_jobs())
        ro_scheduler.shutdown()
        logger.info("ro_scheduler shutdown")
        super(ROSchedulerService, self).stop()

    def __remove(self, jobs):
        for job in jobs:
            job.remove()

    def __add_interval_tn_refresh(self, minutes):
        try:
            ro_scheduler.add_job(tn_resource_refresh,
                                 trigger=IntervalTrigger(minutes=int(minutes)),
                                 id="interval_tn_refresh",
                                 replace_existing=True)
        except Exception as e:
            logger.warning("interval_jobs failure: %s" % (e, ))

    def __add_oneshot(self, secs_, func_, id_):
        try:
            run_time = datetime.now() + timedelta(seconds=secs_)
            ro_scheduler.add_job(func_,
                                 trigger=DateTrigger(run_date=run_time),
                                 id=id_)

        except Exception as e:
            logger.warning("oneshot_jobs failure: %s" % (e, ))

    def __oneshot_jobs(self):
        self.__add_oneshot(int(self.scheduler.get("oneshot_ro")),
                           ro_resource_detector, "oneshot_ro_rd")
        self.__add_oneshot(int(self.scheduler.get("oneshot_com")),
                           com_resource_detector, "oneshot_com_rd")
        self.__add_oneshot(int(self.scheduler.get("oneshot_sdn")),
                           sdn_resource_detector, "oneshot_sdn_rd")
        self.__add_oneshot(int(self.scheduler.get("oneshot_se")),
                           se_resource_detector, "oneshot_se_rd")
        self.__add_oneshot(int(self.scheduler.get("oneshot_tn")),
                           tn_resource_detector, "oneshot_tn_rd")
        self.__add_oneshot(int(self.scheduler.get("oneshot_phy-monit")),
                           physical_monitoring, "oneshot_physical_monitoring")
        self.__add_oneshot(int(self.scheduler.get("oneshot_slice-monit")),
                           slice_monitoring, "oneshot_slice_monitoring")

    def __add_cron(self, func_, id_, hour_, min_, sec_):
        try:
            tr_ = CronTrigger(hour=hour_, minute=min_, second=sec_)
            ro_scheduler.add_job(func_, trigger=tr_, id=id_)
        except Exception as e:
            logger.warning("cron_jobs failure: %s" % (e, ))

    def __cron_jobs(self):
        self.__add_cron(ro_resource_detector, "cron_ro_rd", 0, 1, 0)
        self.__add_cron(com_resource_detector, "cron_com_rd", 0, 11, 0)
        self.__add_cron(sdn_resource_detector, "cron_sdn_rd", 0, 21, 0)
        self.__add_cron(se_resource_detector, "cron_se_rd", 0, 31, 0)
        self.__add_cron(tn_resource_detector, "cron_tn_rd", 0, 41, 0)
        self.__add_cron(physical_monitoring, "cron_physical_monitoring", 0, 51,
                        0)
        self.__add_cron(slice_monitoring, "cron_slice_monitoring", 1, 1, 0)
예제 #21
0
 def __init__(self):
     self.__mutex = threading.Lock()
     self.config = ConfParser("ro.conf")
     master_ro = self.config.get("master_ro")
     self.mro_enabled = ast.literal_eval(master_ro.get("mro_enabled"))
예제 #22
0
 def __init__(self):
     self.__mutex = threading.Lock()
     self.config = ConfParser("ro.conf")
     master_ro = self.config.get("master_ro")
     self.mro_enabled = ast.literal_eval(master_ro.get("mro_enabled"))
예제 #23
0
 def __init__(self):
     super(GENIv3DelegateBase, self).__init__()
     self.config = ConfParser("geniv3.conf")
     self.general_section = self.config.get("general")
     self.certificates_section = self.config.get("certificates")
예제 #24
0
파일: manage.py 프로젝트: HalasNet/felix
class RoutingTableCommand(GenericCommand):

    def __init__(self):
        GenericCommand.__init__(self)
        self.type_ = None
        self.addr_ = None
        self.port_ = None
        self.protocol_ = None
        self.endpoint_ = None
        self.user_ = None
        self.password_ = None
        self.am_type_ = None
        self.am_version_ = None
        self.config = ConfParser("ro.conf")
        self.master_ro = self.config.get("master_ro")
        self.mro_enabled = ast.literal_eval(self.master_ro.get("mro_enabled"))

    def __get_table(self, table_name):
        db_name = "felix_ro"
        if self.mro_enabled:
            db_name = "felix_mro"
        return getattr(getattr(pymongo.MongoClient(), db_name), table_name)

    def updateType(self, type_):
        self.type_ = type_

    def updateAddress(self, address_):
        self.addr_ = address_

    def updatePort(self, port_):
        self.port_ = port_

    def updateProtocol(self, protocol_):
        self.protocol_ = protocol_

    def updateEndpoint(self, endpoint_):
        self.endpoint_ = endpoint_

    def updateUser(self, user_):
        self.user_ = user_

    def updatePassword(self, password_):
        self.password_ = password_

    def updateAMType(self, am_type_):
        self.am_type_ = am_type_

    def updateAMVersion(self, am_version_):
        self.am_version_ = am_version_

    def checkAllNone(self):
        if self.type_ is not None:
            raise AttributeError("Type argument is NOT allowed!")

        if self.addr_ is not None:
            raise AttributeError("Address argument is NOT allowed!")

        if self.port_ is not None:
            raise AttributeError("Port argument is NOT allowed!")

    def getTable(self):
        db_name = "felix_ro"
        if self.mro_enabled:
            db_name = "felix_mro"
        return getattr(getattr(pymongo.MongoClient(), db_name), "domain.routing")
예제 #25
0
class FlaskServer(object):
    """
    Encapsules a flask server instance.
    It also exports/defines the rpcservice interface.

    When a request comes in the following chain is walked through:
        --http--> nginx webserver --fcgi--> WSGIServer --WSGI--> FlaskApp
    When using the development server:
        werkzeug server --WSGI--> FlaskApp
    """

    def __init__(self):
        """Constructor for the server wrapper."""
        #self._app = Flask(__name__) # imports the named package, in this case this file
        # Imports the named module (package includes "." and this is not nice with PyMongo)
        self.config = ConfParser("flask.conf")
        self.general_section = self.config.get("general")
        self.template_folder = self.general_section.get("template_folder")
        self.fcgi_section = self.config.get("fcgi")
        self.certificates_section = self.config.get("certificates")
        self._app = Flask(__name__.split(".")[-1], template_folder = self.template_folder)
        self._mongo = PyMongo(self._app)
        # Added in order to be able to execute "before_request" method
        app = self._app

        # Setup debugging for app
        cDebug = self.general_section.get("debug")
        if cDebug: # log all actions on the XML-RPC interface
            def log_request(sender, **extra):
                logger.info(">>> REQUEST %s:\n%s" % (request.path, request.data))
            request_started.connect(log_request, self._app)
            def log_response(sender, response, **extra):
                logger.info(">>> RESPONSE %s:\n%s" % (response.status, response.data))
            request_finished.connect(log_response, self._app)

        @app.before_request
        def before_request():
            # "Attach" objects within the "g" object. This is passed to each view method
            g.mongo = self._mongo

    @property
    def app(self):
        """Returns the flask instance (not part of the service interface, since it is specific to flask)."""
        return self._app

    def add_routes(self):
        """
        New method. Allows to register URLs from a the views file.
        """
#        from server.flask import views as flask_views
#        flask_views_custom_methods = filter(lambda x: x.startswith("view_"), dir(flask_views))
#        for custom_method in flask_views_custom_methods:
#            # Retrieve data needed to add the URL rule to the Flask app
#            view_method = getattr(locals()["flask_views"], custom_method)
#            docstring = getattr(view_method, "__doc__")
#            index_start = docstring.index("@app.route")
#            index_end = index_start + len("@app.route") + 1
#            custom_method_url = docstring[index_end:].replace(" ","").replace("\n","")
#            # Get: (a) method URL to bind flask app, (b), method name, (c) method object to invoke
#            self._app.add_url_rule(custom_method_url, custom_method, view_func=view_method(self._mongo))
        self._app.register_blueprint(ro_flask_views)

    def runServer(self, services=[]):
        """Starts up the server. It (will) support different config options via the config plugin."""
        self.add_routes()
        debug = self.general_section.get("debug")
        host = self.general_section.get("host")
        app_port = int(self.general_section.get("port"))
        template_folder = self.general_section.get("template_folder")
        cFCGI = ast.literal_eval(self.fcgi_section.get("enabled"))
        fcgi_port = int(self.fcgi_section.get("port"))
        must_have_client_cert = ast.literal_eval(self.certificates_section.get("force_client_certificate"))
        if cFCGI:
            logger.info("registering fcgi server at %s:%i", host, fcgi_port)
            from flup.server.fcgi import WSGIServer
            WSGIServer(self._app, bindAddress=(host, fcgi_port)).run()
        else:
            logger.info("registering app server at %s:%i", host, app_port)
            # this workaround makes sure that the client cert can be acquired later (even when running the development server)
            # copied all this stuff from the actual flask implementation, so we can intervene and adjust the ssl context
            # self._app.run(host=host, port=app_port, ssl_context='adhoc', debug=debug, request_handler=ClientCertHTTPRequestHandler)

            # the code from flask's `run...`
            # see https://github.com/mitsuhiko/flask/blob/master/flask/app.py
            options = {}
            try:
                # now the code from werkzeug's `run_simple(host, app_port, self._app, **options)`
                # see https://github.com/mitsuhiko/werkzeug/blob/master/werkzeug/serving.py
                from werkzeug.debug import DebuggedApplication
                import socket
                application = DebuggedApplication(self._app, True)
                
                # Set up an SSL context
                from OpenSSL import SSL
                context = SSL.Context(SSL.SSLv23_METHOD)
                certs_path = os.path.normpath(os.path.join(os.path.dirname(__file__), "../../..", "cert"))
                context_crt = os.path.join(certs_path, "server.crt")
                context_key = os.path.join(certs_path, "server.key")
                try:
                    context.use_certificate_file(context_crt)
                    context.use_privatekey_file(context_key)
                except Exception as e:
                    logger.critical("error starting flask server. Cert or key is missing under %s", certs_path)
                    sys.exit(e)
                
                def inner():
                    #server = serving.make_server(host, app_port, self._app, False, 1, ClientCertHTTPRequestHandler, False, 'adhoc')
                    server = serving.make_server(host, app_port, self._app, False, 1, ClientCertHTTPRequestHandler, False, ssl_context=context)
                    #server = serving.make_server(host, app_port, self._app, False, 1, ClientCertHTTPRequestHandler, False, ssl_context=(context_crt, context_key))
                    # The following line is the reason why I copied all that code!
                    if must_have_client_cert:
                        # FIXME: what works with web app does not work with cli. Check this out
                        server.ssl_context.set_verify(SSL.VERIFY_PEER | SSL.VERIFY_FAIL_IF_NO_PEER_CERT, lambda a,b,c,d,e: True)
                    # before enter in the loop, start the supplementary services
                    for s in services:
                        s.start()
                    # That's it
                    server.serve_forever()
                address_family = serving.select_ip_version(host, app_port)
                test_socket = socket.socket(address_family, socket.SOCK_STREAM)
                test_socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
                test_socket.bind((host, app_port))
                test_socket.close()
                serving.run_with_reloader(inner, None, 1)
            finally:
                self._app._got_first_request = False
예제 #26
0
파일: tn.py 프로젝트: HalasNet/felix
 def __init__(self):
     super(TNUtils, self).__init__()
     w_ = ConfParser("ro.conf").get("tnrm")
     self.__workaround_split_allocation =\
         ast.literal_eval(w_.get("split_workaround"))
예제 #27
0
class CommandMgr(object):
    def __init__(self):
        """
        Constructor of the service.
        """
        self.config = ConfParser("ro.conf")
        master_ro = self.config.get("master_ro")
        self.mro_enabled = ast.literal_eval(master_ro.get("mro_enabled"))

        self.TABLES = {
            "domain.routing": self.__get_table("domain.routing"),
            "resource.com.node": self.__get_table("resource.com.node"),
            "resource.com.link": self.__get_table("resource.com.link"),
            "resource.of.node": self.__get_table("resource.of.node"),
            "resource.of.link": self.__get_table("resource.of.link"),
            "resource.se.link": self.__get_table("resource.se.link"),
            "resource.se.node": self.__get_table("resource.se.node"),
            "resource.tn.link": self.__get_table("resource.tn.link"),
            "resource.tn.node": self.__get_table("resource.tn.node"),
            "topology.slice": self.__get_table("topology.slice"),
            "topology.slice.sdn": self.__get_table("topology.slice.sdn"),
            "scheduler.jobs": self.__get_table("scheduler.jobs"),
            "domain.info": self.__get_table("domain.info"),
            "topology.physical": self.__get_table("topology.physical"),
        }

    def __get_table(self, table_name):
        db_name = "felix_ro"
        if self.mro_enabled:
            db_name = "felix_mro"
        return getattr(getattr(pymongo.MongoClient(), db_name), table_name)

    def __select(self, table, name):
        print("\n\n" + "(RO) %s has %d rows\n" % (name, table.count()))
        for row in table.find():
            print("%s" % (row))

    def __delete(self, table, name):
        table.remove()
        print("\n\n" + "Deleted all rows of (RO) %s" % (name))

    def list_tables(self):
        print("\n\nManaged Tables: %s\n\n" % self.TABLES.keys())

    def select_routing_table(self):
        self.__select(self.TABLES["domain.routing"], "domain.routing")

    def select_ofdatapath_table(self):
        self.__select(self.TABLES["resource.of.node"], "resource.of.node")

    def select_oflink_table(self):
        self.__select(self.TABLES["resource.of.link"], "resource.of.link")

    def delete_routing_table(self):
        self.__delete(self.TABLES["domain.routing"], "domain.routing")

    def delete_ofdatapath_table(self):
        self.__delete(self.TABLES["resource.of.node"], "resource.of.node")

    def delete_oflink_table(self):
        self.__delete(self.TABLES["resource.of.link"], "resource.of.link")

    def delete_all_tables(self):
        for table, mongo_table in self.TABLES.items():
            self.__delete(mongo_table, table)
예제 #28
0
파일: action_db.py 프로젝트: HalasNet/felix
class CommandMgr(object):

    def __init__(self):
        """
        Constructor of the service.
        """
        self.config = ConfParser("ro.conf")
        master_ro = self.config.get("master_ro")
        self.mro_enabled = ast.literal_eval(master_ro.get("mro_enabled"))

        self.TABLES = {
            "domain.routing":
                self.__get_table("domain.routing"),
            "resource.com.node":
                self.__get_table("resource.com.node"),
            "resource.com.link":
                self.__get_table("resource.com.link"),
            "resource.of.node":
                self.__get_table("resource.of.node"),
            "resource.of.link":
                self.__get_table("resource.of.link"),
            "resource.se.link":
                self.__get_table("resource.se.link"),
            "resource.se.node":
                self.__get_table("resource.se.node"),
            "resource.tn.link":
                self.__get_table("resource.tn.link"),
            "resource.tn.node":
                self.__get_table("resource.tn.node"),
            "topology.slice":
                self.__get_table("topology.slice"),
            "topology.slice.sdn":
                self.__get_table("topology.slice.sdn"),
            "scheduler.jobs":
                self.__get_table("scheduler.jobs"),
            "domain.info":
                self.__get_table("domain.info"),
            "topology.physical":
                self.__get_table("topology.physical"),
        }

    def __get_table(self, table_name):
        db_name = "felix_ro"
        if self.mro_enabled:
            db_name = "felix_mro"
        return getattr(getattr(pymongo.MongoClient(), db_name), table_name)

    def __select(self, table, name):
        print("\n\n" + "(RO) %s has %d rows\n" % (name, table.count()))
        for row in table.find():
            print("%s" % (row))

    def __delete(self, table, name):
        table.remove()
        print("\n\n" + "Deleted all rows of (RO) %s" % (name))

    def list_tables(self):
        print("\n\nManaged Tables: %s\n\n" % self.TABLES.keys())

    def select_routing_table(self):
        self.__select(self.TABLES["domain.routing"],
                      "domain.routing")

    def select_ofdatapath_table(self):
        self.__select(self.TABLES["resource.of.node"],
                      "resource.of.node")

    def select_oflink_table(self):
        self.__select(self.TABLES["resource.of.link"],
                      "resource.of.link")

    def delete_routing_table(self):
        self.__delete(self.TABLES["domain.routing"],
                      "domain.routing")

    def delete_ofdatapath_table(self):
        self.__delete(self.TABLES["resource.of.node"],
                      "resource.of.node")

    def delete_oflink_table(self):
        self.__delete(self.TABLES["resource.of.link"],
                      "resource.of.link")

    def delete_all_tables(self):
        for table, mongo_table in self.TABLES.items():
            self.__delete(mongo_table, table)
예제 #29
0
class FlaskServer(object):
    """
    Encapsules a flask server instance.
    It also exports/defines the rpcservice interface.

    When a request comes in the following chain is walked through:
        --http--> nginx webserver --fcgi--> WSGIServer --WSGI--> FlaskApp
    When using the development server:
        werkzeug server --WSGI--> FlaskApp
    """
    def __init__(self):
        """Constructor for the server wrapper."""
        #self._app = Flask(__name__) # imports the named package, in this case this file
        # Imports the named module (package includes "." and this is not nice with PyMongo)
        self.config = ConfParser("flask.conf")
        self.general_section = self.config.get("general")
        self.template_folder = self.general_section.get("template_folder")
        self.fcgi_section = self.config.get("fcgi")
        self.certificates_section = self.config.get("certificates")
        self._app = Flask(__name__.split(".")[-1],
                          template_folder=self.template_folder)
        self._mongo = PyMongo(self._app)
        # Added in order to be able to execute "before_request" method
        app = self._app

        # Setup debugging for app
        cDebug = self.general_section.get("debug")
        if cDebug:  # log all actions on the XML-RPC interface

            def log_request(sender, **extra):
                logger.info(">>> REQUEST %s:\n%s" %
                            (request.path, request.data))

            request_started.connect(log_request, self._app)

            def log_response(sender, response, **extra):
                logger.info(">>> RESPONSE %s:\n%s" %
                            (response.status, response.data))

            request_finished.connect(log_response, self._app)

        @app.before_request
        def before_request():
            # "Attach" objects within the "g" object. This is passed to each view method
            g.mongo = self._mongo

    @property
    def app(self):
        """Returns the flask instance (not part of the service interface, since it is specific to flask)."""
        return self._app

    def add_routes(self):
        """
        New method. Allows to register URLs from a the views file.
        """
        #        from server.flask import views as flask_views
        #        flask_views_custom_methods = filter(lambda x: x.startswith("view_"), dir(flask_views))
        #        for custom_method in flask_views_custom_methods:
        #            # Retrieve data needed to add the URL rule to the Flask app
        #            view_method = getattr(locals()["flask_views"], custom_method)
        #            docstring = getattr(view_method, "__doc__")
        #            index_start = docstring.index("@app.route")
        #            index_end = index_start + len("@app.route") + 1
        #            custom_method_url = docstring[index_end:].replace(" ","").replace("\n","")
        #            # Get: (a) method URL to bind flask app, (b), method name, (c) method object to invoke
        #            self._app.add_url_rule(custom_method_url, custom_method, view_func=view_method(self._mongo))
        self._app.register_blueprint(ro_flask_views)

    def runServer(self, services=[]):
        """Starts up the server. It (will) support different config options via the config plugin."""
        self.add_routes()
        debug = self.general_section.get("debug")
        host = self.general_section.get("host")
        app_port = int(self.general_section.get("port"))
        template_folder = self.general_section.get("template_folder")
        cFCGI = ast.literal_eval(self.fcgi_section.get("enabled"))
        fcgi_port = int(self.fcgi_section.get("port"))
        must_have_client_cert = ast.literal_eval(
            self.certificates_section.get("force_client_certificate"))
        if cFCGI:
            logger.info("registering fcgi server at %s:%i", host, fcgi_port)
            from flup.server.fcgi import WSGIServer
            WSGIServer(self._app, bindAddress=(host, fcgi_port)).run()
        else:
            logger.info("registering app server at %s:%i", host, app_port)
            # this workaround makes sure that the client cert can be acquired later (even when running the development server)
            # copied all this stuff from the actual flask implementation, so we can intervene and adjust the ssl context
            # self._app.run(host=host, port=app_port, ssl_context='adhoc', debug=debug, request_handler=ClientCertHTTPRequestHandler)

            # the code from flask's `run...`
            # see https://github.com/mitsuhiko/flask/blob/master/flask/app.py
            options = {}
            try:
                # now the code from werkzeug's `run_simple(host, app_port, self._app, **options)`
                # see https://github.com/mitsuhiko/werkzeug/blob/master/werkzeug/serving.py
                from werkzeug.debug import DebuggedApplication
                import socket
                application = DebuggedApplication(self._app, True)

                # Set up an SSL context
                from OpenSSL import SSL
                context = SSL.Context(SSL.SSLv23_METHOD)
                certs_path = os.path.normpath(
                    os.path.join(os.path.dirname(__file__), "../../..",
                                 "cert"))
                context_crt = os.path.join(certs_path, "server.crt")
                context_key = os.path.join(certs_path, "server.key")
                try:
                    context.use_certificate_file(context_crt)
                    context.use_privatekey_file(context_key)
                except Exception as e:
                    logger.critical(
                        "error starting flask server. Cert or key is missing under %s",
                        certs_path)
                    sys.exit(e)

                def inner():
                    #server = serving.make_server(host, app_port, self._app, False, 1, ClientCertHTTPRequestHandler, False, 'adhoc')
                    server = serving.make_server(host,
                                                 app_port,
                                                 self._app,
                                                 False,
                                                 1,
                                                 ClientCertHTTPRequestHandler,
                                                 False,
                                                 ssl_context=context)
                    #server = serving.make_server(host, app_port, self._app, False, 1, ClientCertHTTPRequestHandler, False, ssl_context=(context_crt, context_key))
                    # The following line is the reason why I copied all that code!
                    if must_have_client_cert:
                        # FIXME: what works with web app does not work with cli. Check this out
                        server.ssl_context.set_verify(
                            SSL.VERIFY_PEER | SSL.VERIFY_FAIL_IF_NO_PEER_CERT,
                            lambda a, b, c, d, e: True)
                    # before enter in the loop, start the supplementary services
                    for s in services:
                        s.start()
                    # That's it
                    server.serve_forever()

                address_family = serving.select_ip_version(host, app_port)
                test_socket = socket.socket(address_family, socket.SOCK_STREAM)
                test_socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR,
                                       1)
                test_socket.bind((host, app_port))
                test_socket.close()
                serving.run_with_reloader(inner, None, 1)
            finally:
                self._app._got_first_request = False
예제 #30
0
class RoutingTableCommand(GenericCommand):
    def __init__(self):
        GenericCommand.__init__(self)
        self.type_ = None
        self.addr_ = None
        self.port_ = None
        self.protocol_ = None
        self.endpoint_ = None
        self.user_ = None
        self.password_ = None
        self.am_type_ = None
        self.am_version_ = None
        self.config = ConfParser("ro.conf")
        self.master_ro = self.config.get("master_ro")
        self.mro_enabled = ast.literal_eval(self.master_ro.get("mro_enabled"))

    def __get_table(self, table_name):
        db_name = "felix_ro"
        if self.mro_enabled:
            db_name = "felix_mro"
        return getattr(getattr(pymongo.MongoClient(), db_name), table_name)

    def updateType(self, type_):
        self.type_ = type_

    def updateAddress(self, address_):
        self.addr_ = address_

    def updatePort(self, port_):
        self.port_ = port_

    def updateProtocol(self, protocol_):
        self.protocol_ = protocol_

    def updateEndpoint(self, endpoint_):
        self.endpoint_ = endpoint_

    def updateUser(self, user_):
        self.user_ = user_

    def updatePassword(self, password_):
        self.password_ = password_

    def updateAMType(self, am_type_):
        self.am_type_ = am_type_

    def updateAMVersion(self, am_version_):
        self.am_version_ = am_version_

    def checkAllNone(self):
        if self.type_ is not None:
            raise AttributeError("Type argument is NOT allowed!")

        if self.addr_ is not None:
            raise AttributeError("Address argument is NOT allowed!")

        if self.port_ is not None:
            raise AttributeError("Port argument is NOT allowed!")

    def getTable(self):
        db_name = "felix_ro"
        if self.mro_enabled:
            db_name = "felix_mro"
        return getattr(getattr(pymongo.MongoClient(), db_name),
                       "domain.routing")
예제 #31
0
파일: tn.py 프로젝트: dana-i2cat/felix
 def __init__(self):
     super(TNUtils, self).__init__()
     w_ = ConfParser("ro.conf").get("tnrm")
     self.__workaround_split_allocation =\
         ast.literal_eval(w_.get("split_workaround"))
예제 #32
0
class GENIv3DelegateBase(object):
    """
    Please find more information about the concept of Handlers and Delegates via the wiki (e.g. https://github.com/motine/AMsoil/wiki/GENI).
    
    The GENIv3 handler (see above) assumes that this class uses RSpec version 3 when interacting with the client.
    For creating new a new RSpec type/extension, please see the wiki via https://github.com/motine/AMsoil/wiki/RSpec.
    
    General parameters for all following methods:
    {client_cert} The client's certificate. See [flaskrpcs]XMLRPCDispatcher.requestCertificate(). Also see http://groups.geni.net/geni/wiki/GeniApiCertificates
    {credentials} The a list of credentials in the format specified at http://groups.geni.net/geni/wiki/GAPI_AM_API_V3/CommonConcepts#credentials

    Dates are converted to UTC and then made timezone-unaware (see http://docs.python.org/2/library/datetime.html#datetime.datetime.astimezone).
    """

    ALLOCATION_STATE_UNALLOCATED = 'geni_unallocated'
    """The sliver does not exist. (see http://groups.geni.net/geni/wiki/GAPI_AM_API_V3/CommonConcepts#SliverAllocationStates)"""
    ALLOCATION_STATE_ALLOCATED = 'geni_allocated'
    """The sliver is offered/promissed, but it does not consume actual resources. This state shall time out at some point in time."""
    ALLOCATION_STATE_PROVISIONED = 'geni_provisioned'
    """The sliver is/has been instanciated. Operational states apply here."""

    OPERATIONAL_STATE_PENDING_ALLOCATION = 'geni_pending_allocation'
    """Required for aggregates to support. A transient state."""
    OPERATIONAL_STATE_NOTREADY = 'geni_notready'
    """Optional. A stable state."""
    OPERATIONAL_STATE_CONFIGURING = 'geni_configuring'
    """Optional. A transient state."""
    OPERATIONAL_STATE_STOPPING = 'geni_stopping'
    """Optional. A transient state."""
    OPERATIONAL_STATE_READY = 'geni_ready'
    """Optional. A stable state."""
    OPERATIONAL_STATE_READY_BUSY = 'geni_ready_busy'
    """Optional. A transient state."""
    OPERATIONAL_STATE_FAILED = 'geni_failed'
    """Optional. A stable state."""

    OPERATIONAL_ACTION_START = 'geni_start'
    """Sliver shall become geni_ready. The AM developer may define more states (see http://groups.geni.net/geni/wiki/GAPI_AM_API_V3/CommonConcepts#SliverOperationalActions)"""
    OPERATIONAL_ACTION_RESTART = 'geni_restart'
    """Sliver shall become geni_ready again."""
    OPERATIONAL_ACTION_STOP = 'geni_stop'
    """Sliver shall become geni_notready."""
    def __init__(self):
        super(GENIv3DelegateBase, self).__init__()
        self.config = ConfParser("geniv3.conf")
        self.general_section = self.config.get("general")
        self.certificates_section = self.config.get("certificates")

    def get_request_extensions_list(self):
        """Not to overwrite by AM developer. Should retrun a list of request extensions (XSD schemas) to be sent back by GetVersion."""
        return [
            uri
            for prefix, uri in self.get_request_extensions_mapping().items()
        ]

    def get_request_extensions_mapping(self):
        """Overwrite by AM developer. Should return a dict of namespace names and request extensions (XSD schema's URLs as string).
        Format: {xml_namespace_prefix : namespace_uri, ...}
        """
        return {}

    def get_manifest_extensions_mapping(self):
        """Overwrite by AM developer. Should return a dict of namespace names and manifest extensions (XSD schema's URLs as string).
        Format: {xml_namespace_prefix : namespace_uri, ...}
        """
        return {}

    def get_ad_extensions_list(self):
        """Not to overwrite by AM developer. Should retrun a list of request extensions (XSD schemas) to be sent back by GetVersion."""
        return [
            uri for prefix, uri in self.get_ad_extensions_mapping().items()
        ]

    def get_ad_extensions_mapping(self):
        """Overwrite by AM developer. Should return a dict of namespace names and advertisement extensions (XSD schema URLs as string) to be sent back by GetVersion.
        Format: {xml_namespace_prefix : namespace_uri, ...}
        """
        return {}

    def is_single_allocation(self):
        """Overwrite by AM developer. Shall return a True or False. When True (not default), and performing one of (Describe, Allocate, Renew, Provision, Delete), such an AM requires you to include either the slice urn or the urn of all the slivers in the same state.
        see http://groups.geni.net/geni/wiki/GAPI_AM_API_V3/CommonConcepts#OperationsonIndividualSlivers"""
        return False

    def get_allocation_mode(self):
        """Overwrite by AM developer. Shall return a either 'geni_single', 'geni_disjoint', 'geni_many'.
        It defines whether this AM allows adding slivers to slices at an AM (i.e. calling Allocate multiple times, without first deleting the allocated slivers).
        For description of the options see http://groups.geni.net/geni/wiki/GAPI_AM_API_V3/CommonConcepts#OperationsonIndividualSlivers"""
        return 'geni_single'

    def list_resources(self, client_cert, credentials, geni_available):
        """Overwrite by AM developer. Shall return an RSpec version 3 (advertisement) or raise an GENIv3...Error.
        If {geni_available} is set, only return availabe resources.
        For full description see http://groups.geni.net/geni/wiki/GAPI_AM_API_V3#ListResources"""
        raise exceptions.GENIv3GeneralError("Method not implemented yet")

    def describe(self, urns, client_cert, credentials):
        """Overwrite by AM developer. Shall return an RSpec version 3 (manifest) or raise an GENIv3...Error.
        {urns} contains a list of slice identifiers (e.g. ['urn:publicid:IDN+ofelia:eict:gcf+slice+myslice']).

        For more information on possible {urns} see http://groups.geni.net/geni/wiki/GAPI_AM_API_V3/CommonConcepts#urns

        For full description see http://groups.geni.net/geni/wiki/GAPI_AM_API_V3#Describe"""
        raise exceptions.GENIv3GeneralError("Method not implemented yet")

    def allocate(self,
                 slice_urn,
                 client_cert,
                 credentials,
                 rspec,
                 end_time=None):
        """Overwrite by AM developer. 
        Shall return the two following values or raise an GENIv3...Error.
        - a RSpec version 3 (manifest) of newly allocated slivers 
        - a list of slivers of the format:
            [{'geni_sliver_urn' : String,
              'exceptionspires'    : Python-Date,
              'geni_allocation_status' : one of the ALLOCATION_STATE_xxx}, 
             ...]
        Please return like so: "return respecs, slivers"
        {slice_urn} contains a slice identifier (e.g. 'urn:publicid:IDN+ofelia:eict:gcf+slice+myslice').
        {end_time} Optional. A python datetime object which determines the desired expiry date of this allocation (see http://groups.geni.net/geni/wiki/GAPI_AM_API_V3/CommonConcepts#geni_end_time).
        >>> This is the first part of what CreateSliver used to do in previous versions of the AM API. The second part is now done by Provision, and the final part is done by PerformOperationalAction.
        
        For full description see http://groups.geni.net/geni/wiki/GAPI_AM_API_V3#Allocate"""
        raise exceptions.GENIv3GeneralError("Method not implemented yet")

    def renew(self, urns, client_cert, credentials, expiration_time,
              best_effort):
        """Overwrite by AM developer. 
        Shall return a list of slivers of the following format or raise an GENIv3...Error:
            [{'geni_sliver_urn'         : String,
              'geni_allocation_status'  : one of the ALLOCATION_STATE_xxx,
              'geni_operational_status' : one of the OPERATIONAL_STATE_xxx,
              'exceptionspires'            : Python-Date,
              'geni_error'              : optional String}, 
             ...]
        
        {urns} contains a list of slice identifiers (e.g. ['urn:publicid:IDN+ofelia:eict:gcf+slice+myslice']).
        {expiration_time} is a python datetime object
        {best_effort} determines if the method shall fail in case that not all of the urns can be renewed (best_effort=False).

        If the transactional behaviour of {best_effort}=False can not be provided, throw a GENIv3OperationUnsupportedError.
        For more information on possible {urns} see http://groups.geni.net/geni/wiki/GAPI_AM_API_V3/CommonConcepts#urns

        For full description see http://groups.geni.net/geni/wiki/GAPI_AM_API_V3#Renew"""
        raise exceptions.GENIv3GeneralError("Method not implemented yet")

    def provision(self, urns, client_cert, credentials, best_effort, end_time,
                  geni_users):
        """Overwrite by AM developer. 
        Shall return the two following values or raise an GENIv3...Error.
        - a RSpec version 3 (manifest) of slivers 
        - a list of slivers of the format:
            [{'geni_sliver_urn'         : String,
              'geni_allocation_status'  : one of the ALLOCATION_STATE_xxx,
              'geni_operational_status' : one of the OPERATIONAL_STATE_xxx,
              'exceptionspires'            : Python-Date,
              'geni_error'              : optional String}, 
             ...]
        Please return like so: "return respecs, slivers"

        {urns} contains a list of slice/resource identifiers (e.g. ['urn:publicid:IDN+ofelia:eict:gcf+slice+myslice']).
        {best_effort} determines if the method shall fail in case that not all of the urns can be provisioned (best_effort=False)
        {end_time} Optional. A python datetime object which determines the desired expiry date of this provision (see http://groups.geni.net/geni/wiki/GAPI_AM_API_V3/CommonConcepts#geni_end_time).
        {geni_users} is a list of the format: [ { 'urn' : ..., 'keys' : [sshkey, ...]}, ...]
        
        If the transactional behaviour of {best_effort}=False can not be provided, throw a GENIv3OperationUnsupportedError.
        For more information on possible {urns} see http://groups.geni.net/geni/wiki/GAPI_AM_API_V3/CommonConcepts#urns

        For full description see http://groups.geni.net/geni/wiki/GAPI_AM_API_V3#Provision"""
        raise exceptions.GENIv3GeneralError("Method not implemented yet")

    def status(self, urns, client_cert, credentials):
        """Overwrite by AM developer. 
        Shall return the two following values or raise an GENIv3...Error.
        - a slice urn
        - a list of slivers of the format:
            [{'geni_sliver_urn'         : String,
              'geni_allocation_status'  : one of the ALLOCATION_STATE_xxx,
              'geni_operational_status' : one of the OPERATIONAL_STATE_xxx,
              'exceptionspires'            : Python-Date,
              'geni_error'              : optional String}, 
             ...]
        Please return like so: "return slice_urn, slivers"

        {urns} contains a list of slice/resource identifiers (e.g. ['urn:publicid:IDN+ofelia:eict:gcf+slice+myslice']).
        For more information on possible {urns} see http://groups.geni.net/geni/wiki/GAPI_AM_API_V3/CommonConcepts#urns
        
        For full description see http://groups.geni.net/geni/wiki/GAPI_AM_API_V3#Status"""
        raise exceptions.GENIv3GeneralError("Method not implemented yet")

    def perform_operational_action(self, urns, client_cert, credentials,
                                   action, best_effort):
        """Overwrite by AM developer. 
        Shall return a list of slivers of the following format or raise an GENIv3...Error:
            [{'geni_sliver_urn'         : String,
              'geni_allocation_status'  : one of the ALLOCATION_STATE_xxx,
              'geni_operational_status' : one of the OPERATIONAL_STATE_xxx,
              'exceptionspires'            : Python-Date,
              'geni_error'              : optional String}, 
             ...]

        {urns} contains a list of slice or sliver identifiers (e.g. ['urn:publicid:IDN+ofelia:eict:gcf+slice+myslice']).
        {action} an arbitraty string, but the following should be possible: "geni_start", "geni_stop", "geni_restart"
        {best_effort} determines if the method shall fail in case that not all of the urns can be changed (best_effort=False)

        If the transactional behaviour of {best_effort}=False can not be provided, throw a GENIv3OperationUnsupportedError.
        For more information on possible {urns} see http://groups.geni.net/geni/wiki/GAPI_AM_API_V3/CommonConcepts#urns
        
        For full description see http://groups.geni.net/geni/wiki/GAPI_AM_API_V3#PerformOperationalAction"""
        raise exceptions.GENIv3GeneralError("Method not implemented yet")

    def delete(self, urns, client_cert, credentials, best_effort):
        """Overwrite by AM developer. 
        Shall return a list of slivers of the following format or raise an GENIv3...Error:
            [{'geni_sliver_urn'         : String,
              'geni_allocation_status'  : one of the ALLOCATION_STATE_xxx,
              'exceptionspires'            : Python-Date,
              'geni_error'              : optional String}, 
             ...]

        {urns} contains a list of slice/resource identifiers (e.g. ['urn:publicid:IDN+ofelia:eict:gcf+slice+myslice']).
        {best_effort} determines if the method shall fail in case that not all of the urns can be deleted (best_effort=False)
        
        If the transactional behaviour of {best_effort}=False can not be provided, throw a GENIv3OperationUnsupportedError.
        For more information on possible {urns} see http://groups.geni.net/geni/wiki/GAPI_AM_API_V3/CommonConcepts#urns

        For full description see http://groups.geni.net/geni/wiki/GAPI_AM_API_V3#Delete"""
        raise exceptions.GENIv3GeneralError("Method not implemented yet")

    def shutdown(self, slice_urn, client_cert, credentials):
        """Overwrite by AM developer. 
        Shall return True or False or raise an GENIv3...Error.

        For full description see http://groups.geni.net/geni/wiki/GAPI_AM_API_V3#Shutdown"""
        raise exceptions.GENIv3GeneralError("Method not implemented yet")

    def auth(self, client_cert, credentials, slice_urn=None, privileges=()):
        """
        This method authenticates and authorizes.
        It returns the client's urn, uuid, email (extracted from the {client_cert}). Example call: "urn, uuid, email = self.auth(...)"
        Be aware, the email is not required in the certificate, hence it might be empty.
        If the validation fails, an GENIv3ForbiddenError is thrown.
        
        The credentials are checked so the user has all the required privileges (success if any credential fits all privileges).
        The client certificate is not checked: this is usually done via the webserver configuration.
        This method only treats certificates of type 'geni_sfa'.
        
        Here a list of possible privileges (format: right_in_credential: [privilege1, privilege2, ...]):
            "authority" : ["register", "remove", "update", "resolve", "list", "getcredential", "*"],
            "refresh"   : ["remove", "update"],
            "resolve"   : ["resolve", "list", "getcredential"],
            "sa"        : ["getticket", "redeemslice", "redeemticket", "createslice", "createsliver", "deleteslice", "deletesliver", "updateslice",
                           "getsliceresources", "getticket", "loanresources", "stopslice", "startslice", "renewsliver",
                            "deleteslice", "deletesliver", "resetslice", "listslices", "listnodes", "getpolicy", "sliverstatus"],
            "embed"     : ["getticket", "redeemslice", "redeemticket", "createslice", "createsliver", "renewsliver", "deleteslice", 
                           "deletesliver", "updateslice", "sliverstatus", "getsliceresources", "shutdown"],
            "bind"      : ["getticket", "loanresources", "redeemticket"],
            "control"   : ["updateslice", "createslice", "createsliver", "renewsliver", "sliverstatus", "stopslice", "startslice", 
                           "deleteslice", "deletesliver", "resetslice", "getsliceresources", "getgids"],
            "info"      : ["listslices", "listnodes", "getpolicy"],
            "ma"        : ["setbootstate", "getbootstate", "reboot", "getgids", "gettrustedcerts"],
            "operator"  : ["gettrustedcerts", "getgids"],                   
            "*"         : ["createsliver", "deletesliver", "sliverstatus", "renewsliver", "shutdown"]
            
        When using the gcf clearinghouse implementation the credentials will have the rights:
        - user: "******", "resolve", "info" (which resolves to the privileges: "remove", "update", "resolve", "list", "getcredential", "listslices", "listnodes", "getpolicy").
        - slice: "refresh", "embed", "bind", "control", "info" (well, do the resolving yourself...)        
        """
        # check variables
        if not isinstance(privileges, tuple):
            raise TypeError("Privileges need to be a tuple.")
        # collect credentials (only GENI certs, version ignored)
        geni_credentials = []
        for c in credentials:
            if c['geni_type'] == 'geni_sfa':
                geni_credentials.append(c['geni_value'])

        # Get the cert_root from the configuration settings
        root_path = os.path.normpath(
            os.path.join(os.path.dirname(__file__), "../../../../"))
        cert_root = os.path.join(root_path,
                                 self.certificates_section.get("cert_root"))
        logger.debug("client_certificate trusted, present at: %s" %
                     str(cert_root))
        logger.debug("client_certificate:\n%s" % str(client_cert))

        if client_cert == None:
            raise exceptions.GENIv3ForbiddenError(
                "Could not determine the client SSL certificate")
        # test the credential
        try:
            cred_verifier = extensions.geni.util.cred_util.CredentialVerifier(
                cert_root)
            cred_verifier.verify_from_strings(client_cert, geni_credentials,
                                              slice_urn, privileges)
        except Exception as e:
            raise exceptions.GENIv3ForbiddenError(str(e))

        user_gid = extensions.sfa.trust.gid.GID(string=client_cert)
        user_urn = user_gid.get_urn()
        user_uuid = user_gid.get_uuid()
        user_email = user_gid.get_email()
        return user_urn, user_uuid, user_email  # TODO document return

    def urn_type(self, urn):
        """Returns the type of the urn (e.g. slice, sliver).
        For the possible types see: http://groups.geni.net/geni/wiki/GeniApiIdentifiers#ExamplesandUsage"""
        return urn.split('+')[2].strip()

    def lxml_ad_root(self):
        """Returns a xml root node with the namespace extensions specified by self.get_ad_extensions_mapping."""
        return etree.Element('rspec',
                             self.get_ad_extensions_mapping(),
                             type='advertisement')

    def lxml_manifest_root(self):
        """Returns a xml root node with the namespace extensions specified by self.get_manifest_extensions_mapping."""
        return etree.Element('rspec',
                             self.get_manifest_extensions_mapping(),
                             type='manifest')

    def lxml_to_string(self, rspec):
        """Converts a lxml root node to string (for returning to the client)."""
        return etree.tostring(rspec, pretty_print=True)

    def lxml_ad_element_maker(self, prefix):
        """Returns a lxml.builder.ElementMaker configured for avertisements and the namespace given by {prefix}."""
        ext = self.get_ad_extensions_mapping()
        return ElementMaker(namespace=ext[prefix], nsmap=ext)

    def lxml_manifest_element_maker(self, prefix):
        """Returns a lxml.builder.ElementMaker configured for manifests and the namespace given by {prefix}."""
        ext = self.get_manifest_extensions_mapping()
        return ElementMaker(namespace=ext[prefix], nsmap=ext)

    def lxml_parse_rspec(self, rspec_string):
        """Returns a the root element of the given {rspec_string} as lxml.Element.
        If the config key is set, the rspec is validated with the schemas found at the URLs specified in schemaLocation of the the given RSpec."""
        # parse
        rspec_root = etree.fromstring(rspec_string)
        # validate RSpec against specified schemaLocations
        should_validate = ast.literal_eval(
            self.general_section.get("rspec_validation"))

        if should_validate:
            schema_locations = rspec_root.get(
                "{http://www.w3.org/2001/XMLSchema-instance}schemaLocation")
            if schema_locations:
                schema_location_list = schema_locations.split(" ")
                schema_location_list = map(
                    lambda x: x.strip(),
                    schema_location_list)  # strip whitespaces
                for sl in schema_location_list:
                    try:
                        xmlschema_contents = urllib2.urlopen(
                            sl)  # try to download the schema
                        xmlschema_doc = etree.parse(xmlschema_contents)
                        xmlschema = etree.XMLSchema(xmlschema_doc)
                        xmlschema.validate(rspec_root)
                    except Exception as e:
                        logger.warning(
                            "RSpec validation failed failed (%s: %s)" % (
                                sl,
                                str(e),
                            ))
            else:
                logger.warning("RSpec does not specify any schema locations")
        return rspec_root

    def lxml_elm_has_request_prefix(self, lxml_elm, ns_name):
        return str(lxml_elm.tag).startswith(
            "{%s}" % (self.get_request_extensions_mapping()[ns_name], ))

    def lxml_elm_equals_request_tag(self, lxml_elm, ns_name, tagname):
        """Determines if the given tag by {ns_name} and {tagname} equals lxml_tag. The namespace URI is looked up via get_request_extensions_mapping()['ns_name']"""
        return ("{%s}%s" % (self.get_request_extensions_mapping()[ns_name],
                            tagname)) == str(lxml_elm.tag)
예제 #33
0
class BaseMonitoring(object):
    """
    Base class for both physical and slice topology sent to the MS.
    """
    def __init__(self):
        super(BaseMonitoring, self).__init__()
        self.peers = [p for p in db_sync_manager.get_configured_peers()]
        self.peers_info = [p for p in db_sync_manager.get_info_peers()]
        self.domain_urn = ""
        self.domain_last_update = ""
        self.topology_list = etree.Element("topology_list")
        self.topology = etree.SubElement(self.topology_list, "topology")
        ## Operation mode
        self.config = ConfParser("ro.conf")
        master_ro = self.config.get("master_ro")
        self.mro_enabled = ast.literal_eval(master_ro.get("mro_enabled"))
        self.software_stacks = {
            "ofelia": "ocf",
            "felix": "fms",
        }
        self.urn_type_resources = {
            "crm": "vtam",
            "sdnrm": "openflow",
            "serm": "se",
            "tnrm": "tn",
        }
        self.urn_type_resources_variations = {
            "crm": ["vtam"],
            "sdnrm": ["openflow", "ofam"],
            "serm": ["se"],
            "tnrm": ["tn", "NSI"],
        }
        self.management_type_resources = {
            "crm": "server",
            "sdnrm": "switch",
            "serm": "se",
        }
        self.monitoring_expected_nodes = {
            "crm": "",
            "sdnrm": "switch",
            "serm": "se",
            "tnrm": "tn",
        }
        self.peers_by_domain = {}
        self.__group_peers_by_domain()
        ## Configurations
        # CRM config
        self.config_crm = core.config.JSONParser("crm.json")
        self.crm_mgmt_info = self.config_crm.get("device_management_info")
        # SDNRM config
        self.config_sdnrm = core.config.JSONParser("sdnrm.json")
        self.sdnrm_mgmt_info = self.config_sdnrm.get("device_management_info")
        # SERM config
        self.config_serm = core.config.JSONParser("serm.json")
        self.serm_mgmt_info = self.config_serm.get("device_management_info")

    def _get_timestamp(self):
        # Return integer part as a string
        return str(int(time.time()))

    def __group_peers_by_domain(self):
        for peer in self.peers_info:
            filter_params = {
                "_id": peer.get("_id"),
            }
            domain_peer = db_sync_manager.get_domain_info(filter_params)
            peer_domain_urn = domain_peer.get("domain_urn")
            authority = URNUtils.get_felix_authority_from_urn(peer_domain_urn)
            # If authority (domain name) does not exist yet, create
            if not self.peers_by_domain.get(authority):
                self.peers_by_domain[authority] = []
            # Extend list of peers with new one
            self.peers_by_domain[authority].append(peer_domain_urn)

            # Stores last_update_time for the physical topology
            # on a given domain
            try:
                last_update_time = self._get_timestamp()
                db_sync_manager.store_physical_info(peer_domain_urn,
                                                    last_update_time)
            except Exception as e:
                logger.error(
                    "Error storing last_update_time for phy-topology.")
                logger.error("Exception: %s" % e)

        # XXX: (M)MS assumes one TNRM per island
        # With this, (M)MS receives at least one TNRM per island
        type_resource_peer_tnrm = self.urn_type_resources_variations.get(
            "tnrm")
        for peer in self.peers:
            filter_params = {
                "_ref_peer": peer.get("_id"),
            }
            domain_peer = db_sync_manager.get_domain_info(filter_params)
            peer_domain_urn = domain_peer.get("domain_urn")
            peer_is_tnrm = any(
                [rt in peer_domain_urn for rt in type_resource_peer_tnrm])
            # MRO: TNRM added at this level. Use information from peer to add it as a TNRM per domain
            if peer_is_tnrm:
                # Add the TNRM peer to each authority that does not have it yet
                for authority in self.peers_by_domain:
                    if peer_domain_urn not in self.peers_by_domain.get(
                            authority):
                        self.peers_by_domain[authority].append(peer_domain_urn)

    def _send(self, xml_data, peer=None):
        try:
            if not peer:
                peer = self.monitoring_system

            url = "%s:%s/%s" % (peer.get("address"), peer.get("port"),
                                peer.get("endpoint"))
            # Post-process URL to remove N slashes in a row
            url = re.sub("/{2,}", "/", url)
            # And add protocol (with 2 slashes)
            url = "%s://%s" % (peer.get("protocol"), url)
            logger.info("url=%s" % (url, ))
            logger.info("data=%s" % (xml_data, ))

            # NOTE This may require certificates or BasicAuth at some point
            reply = requests.post(url=url,
                                  headers={
                                      "Content-Type": "application/xml"
                                  },
                                  data=xml_data).text
            logger.info("Reply=%s" % (reply, ))

        except Exception as e:
            logger.error("Could not connect to %s. Exception: %s" % (
                url,
                e,
            ))

    ##########################
    # Set and return topology
    ##########################

    def set_topology_tree(self, topology_list_tree):
        # Return whole list of topologies
        try:
            ## If the following line works, 'topology_list_tree' is a proper xml tree
            etree.tostring(topology_list_tree)
            self.topology_list = topology_list_tree
        except:
            pass

    def get_topology_tree(self):
        # Return whole list of topologies
        return self.topology_list

    def get_topology(self):
        if self.get_topology_tree() is not None:
            return etree.tostring(self.get_topology_tree())

    def get_topology_pretty(self):
        # XML not in proper format: need to convert to lxml, then pretty-print
        return etree.tostring(etree.fromstring(self.get_topology()),
                              pretty_print=True)

    def flush_topology(self):
        # Create a new sub-element of the list of topologies
        self.topology = etree.SubElement(self.topology_list, "topology")

    def remove_empty_nodes(self):
        """Remove empty 'topology' nodes from the list/tree of topologies"""
        for node in self.topology_list.findall(".//topology"):
            if len(node.getchildren()) == 0:
                node.getparent().remove(node)

    ##########
    # Helpers
    ##########

    def _update_topology_name(self):
        filter_string = "[@last_update_time='%s']" % str(
            self.domain_last_update)
        filtered_nodes = self.get_topology_tree().xpath("//topology%s" %
                                                        filter_string)
        # There should only be one
        filtered_nodes[0].set("name", str(self.domain_urn))
        self.get_topology_tree().xpath("//topology%s" % filter_string)[0].set(
            "name", str(self.domain_urn))

    def _remove_empty_topologies(self,
                                 filter_name=None,
                                 filter_update_time=None):
        filter_string = ""
        if filter_name:
            filter_string += "[@name='%s']" % str(filter_name)
        if filter_update_time:
            filter_string += "[@last_update_time='%s']" % str(
                filter_update_time)
        topology_tree = etree.fromstring(self.get_topology())
        filtered_nodes = topology_tree.xpath("//topology%s" % filter_string)
        for filtered_node in filtered_nodes:
            # Remove any node whose length is 0 (=> no content inside)
            if list(filtered_node) == 0:
                filtered_node.get_parent().remove(filtered_node)

    def _get_management_data_devices(self, parent_node):
        configuration_data = {}
        if self.urn_type_resources.get("crm") in parent_node.get("id"):
            configuration_data = self.crm_mgmt_info
        elif self.urn_type_resources.get("sdnrm") in parent_node.get("id"):
            configuration_data = self.sdnrm_mgmt_info
        elif self.urn_type_resources.get("serm") in parent_node.get("id"):
            configuration_data = self.serm_mgmt_info
        return configuration_data

    def _add_management_section(self, parent_node):
        management = etree.Element("management")
        #        resource_management_info = db_sync_manager.get_management_info(
        #                                        component_id=parent_node.get("component_id"))
        management.set("type", "snmp")
        address = etree.SubElement(management, "address")
        address.text = ""
        port = etree.SubElement(management, "port")
        port.text = ""
        auth_id = etree.SubElement(management, "auth_id")
        auth_id.text = "public"
        auth_pass = etree.SubElement(management, "auth_pass")
        auth_pass.text = ""
        try:
            configuration_data = self._get_management_data_devices(parent_node)
            if configuration_data is not None:
                # Possible mismatch between URN of *RM that is configured in the *rm.json config file
                # and the URN directly received from the RM. Issue a comprehensive warning here
                if not parent_node.get("id") in configuration_data.keys():
                    raise Exception(
                        "Mismatch between configuration device URN and received URN for URN='%s'. Please check the settings of your RMs under RO's configuration folder"
                        % parent_node.get("id"))
                address.text = configuration_data.get(
                    parent_node.get("id")).get("ip")
                port.text = configuration_data.get(
                    parent_node.get("id")).get("port")
                auth_id.text = configuration_data.get(
                    parent_node.get("id")).get("snmp").get("id")
                auth_pass.text = configuration_data.get(
                    parent_node.get("id")).get("snmp").get("password")
        except Exception as e:
            logger.warning(
                "Physical monitoring. Cannot add management data on '%s'. Details: %s"
                % (etree.tostring(parent_node), e))
        return management

    def _add_generic_node(self, parent_tag, node, node_type):
        n = etree.SubElement(parent_tag, "node")
        n.set("id", node.get("component_id"))
        n.set("type", node_type)
        # Generate management section for node
        # This is only active for normal RO operation (MRO should
        # probably not send this information to MMS)
        # XXX In case it should, MRO would store the full topology_list
        # from each RO and send them to MMS
        if not self.mro_enabled:
            if node_type in self.management_type_resources.values():
                try:
                    management = self._add_management_section(n)
                    n.append(management)
                except Exception as e:
                    logger.warning(
                        "Physical topology - Cannot add management section. Details: %s"
                        % e)
        return n

    #################
    # C-RM resources
    #################

    def _add_com_info(self, parent_node=None):
        # If no parent node passed, COM info is attached to root topology node
        if parent_node is None:
            parent_node = self.topology
        # 1. Nodes
        nodes = [
            n for n in db_sync_manager.get_com_nodes_by_domain(self.domain_urn)
        ]
        for node in nodes:
            if MonitoringUtils.check_existing_tag_in_topology(
                    parent_node, "node", "server", node.get("component_id")):
                break
            logger.debug("com-node=%s" % (node, ))
            # If no parent node passed, COM info is attached to root topology node
            if parent_node is None:
                parent_node = self.topology
            n = self._add_generic_node(parent_node, node, "server")
            # Output interfaces (avoid "*") per server
            node_ifaces = filter(lambda x: x != "*", node.get("interfaces"))
            logger.debug("com-node-interfaces=%s" % node_ifaces)
            for iface in node_ifaces:
                interface = etree.SubElement(n, "interface")
                # NOTE this is extending the "interface" URN
                interface.set("id", "%s+interface+%s" % (n.get("id"), iface))
        # 2. Links
        links = [
            l for l in db_sync_manager.get_com_links_by_domain(self.domain_urn)
        ]
        logger.debug("com-links=%s" % (links, ))
        for link in links:
            self._add_com_link(link, parent_node)

    def _add_com_link(self, link, parent_node=None):
        if parent_node is None:
            parent_node = self.topology
        logger.debug("com-links=%s" % (link, ))
        #l = etree.SubElement(parent_node, "link")
        l = etree.Element("link")
        # NOTE that this cannot be empty
        l.set("type", MonitoringUtilsLinks._translate_link_type(link))
        link_id = ""
        links = link.get("links")
        link_exists = False
        for link_i in links:
            if MonitoringUtils.check_existing_tag_in_topology(
                    parent_node, "link", "lan",
                [link_i.get("source_id"),
                 link_i.get("dest_id")]):
                link_exists = True
                break
            # Modify link on-the-fly to add the DPID port as needed
            link_i = MonitoringUtilsLinks._set_dpid_port_from_link(
                link.get("component_id"), link_i)
            # Source
            iface_source = etree.SubElement(l, "interface_ref")
            iface_source.set("client_id", link_i.get("source_id"))
            # Destination
            iface_dest = etree.SubElement(l, "interface_ref")
            iface_dest.set("client_id", link_i.get("dest_id"))
            # - Prepare link ID for CRM-SDNRM link
            link_id = MonitoringUtilsLinks.get_id_for_link_crm_sdnrm(link_i)
        # Finally, add it as subelement
        if not link_exists:
            l.set("id", link_id)
            parent_node.append(l)

    ###################
    # SDN-RM resources
    ###################

    def _add_sdn_info(self, parent_node=None):
        # If no parent node passed, SDN info is attached to root topology node
        if parent_node is None:
            parent_node = self.topology
        # 1. Nodes
        datapaths = [
            d for d in db_sync_manager.get_sdn_datapaths_by_domain(
                self.domain_urn)
        ]
        for dp in datapaths:
            if MonitoringUtils.check_existing_tag_in_topology(
                    parent_node, "node", "switch", dp.get("component_id")):
                break
            logger.debug("sdn-datapath=%s" % (dp, ))
            switch = self._add_generic_node(parent_node, dp, "switch")
            for p in dp.get("ports"):
                iface = etree.SubElement(switch, "interface")
                iface.set("id", "%s_%s" % (switch.get("id"), p.get("num")))
                port = etree.SubElement(iface, "port")
                port.set("num", p.get("num"))
        # 2. Links
        (sdn_links, fed_links) = [
            l for l in db_sync_manager.get_sdn_links_by_domain(self.domain_urn)
        ]
        for sdn_link in sdn_links:
            logger.debug("sdn-link=%s" % (sdn_link, ))
            self._add_sdn_link(sdn_link, parent_node)
        for sdn_fed_link in fed_links:
            logger.debug("fed-sdn-link=%s" % (sdn_fed_link, ))

    def _add_sdn_link(self, link, parent_node=None):
        if parent_node is None:
            parent_node = self.topology
        auth1 = link.get("dpids")[0].get("component_manager_id").replace(
            "authority+cm", "datapath")
        dpid1 = auth1 + "+" + link.get("dpids")[0].get(
            "dpid") + "_" + link.get("ports")[0].get("port_num")
        auth2 = link.get("dpids")[1].get("component_manager_id").replace(
            "authority+cm", "datapath")
        dpid2 = auth2 + "+" + link.get("dpids")[1].get(
            "dpid") + "_" + link.get("ports")[1].get("port_num")
        if MonitoringUtils.check_existing_tag_in_topology(
                parent_node, "link", "lan", [dpid1, dpid2]):
            return
        l = etree.SubElement(parent_node, "link")
        # NOTE that this cannot be empty
        l.set("type", MonitoringUtilsLinks._translate_link_type(link))
        link_id = ""
        ports = link.get("ports")
        dpids = link.get("dpids")
        try:
            for dpid_port in zip(dpids, ports):
                iface = etree.SubElement(l, "interface_ref")
                dpid = dpid_port[0]["component_id"]
                port = dpid_port[1]["port_num"]
                iface.set("client_id", "%s_%s" % (dpid, port))
        except Exception as e:
            logger.warning(
                "Physical topology - Cannot add SDN interface %s. Details: %s"
                % (link.get("component_id", "(unknown)"), e))
        try:
            # - Prepare link ID for SDNRM-SDNRM link
            link_id = MonitoringUtilsLinks.get_id_for_link_sdnrm_sdnrm(
                zip(dpids, ports))
            l.set("id", link_id)
        except Exception as e:
            logger.warning(
                "Physical topology - Cannot add SDN link ID %s. Details: %s" %
                (link.get("component_id", "(unknown)"), e))

    ##################
    # TN-RM resources
    ##################

    def _add_tn_info(self, parent_node=None):
        # If no parent node passed, TN info is attached to root topology node
        if parent_node is None:
            parent_node = self.topology
        # 1. Nodes
        # XXX: (M)MS assumes one TNRM per island
        # This retrieves TN information from AIST instance
        # (providing MRO has TNRM as peer, or its information in its DB)
        felix_tn_urn = "urn:publicid:IDN+fms:aist:tnrm"
        nodes = [
            d for d in db_sync_manager.get_tn_nodes_by_domain(felix_tn_urn)
        ]
        #        nodes = [ d for d in db_sync_manager.get_tn_nodes_by_domain(self.domain_urn) ]
        #        nodes = [ d for d in db_sync_manager.get_tn_nodes() ]
        for node in nodes:
            if MonitoringUtils.check_existing_tag_in_topology(
                    parent_node, "node", "tn", node.get("component_id"),
                    self.domain_urn):
                break
            logger.debug("tn-node=%s" % (node, ))
            n = self._add_generic_node(parent_node, node, "tn")
            # Output interfaces per node
            logger.debug("tn-node-interfaces=%s" % node.get("interfaces"))
            for iface in node.get("interfaces"):
                interface = etree.SubElement(n, "interface")
                interface.set("id", iface.get("component_id"))
#        # 2. Links
#        links = [ l for l in db_sync_manager.get_tn_links_by_domain(self.domain_urn) ]

##################
# SE-RM resources
##################

    def _add_se_info(self, parent_node=None):
        # If no parent node passed, SE info is attached to root topology node
        if parent_node is None:
            parent_node = self.topology
        # 1. Nodes
        nodes = [
            d for d in db_sync_manager.get_se_nodes_by_domain(self.domain_urn)
        ]
        for node in nodes:
            if MonitoringUtils.check_existing_tag_in_topology(
                    parent_node, "node", "se", node.get("component_id")):
                break
            logger.debug("se-node=%s" % (node, ))
            n = self._add_generic_node(parent_node, node, "se")
            # Output interfaces per node
            logger.debug("se-node-interfaces=%s" % node.get("interfaces"))
            for iface in node.get("interfaces"):
                interface = etree.SubElement(n, "interface")
                # Parse the component_id to get URN of SE and the port per interface
                component_id = iface.get("component_id")
                try:
                    interface.set("id", component_id)
                    #                    interface.attrib["id"] = component_id.split("_")[0]
                    port = etree.SubElement(interface, "port")
                    port.set("num", component_id.split("_")[1])
                except Exception as e:
                    logger.warning(
                        "Physical topology - Cannot add SE interface %s. Details: %s"
                        % (component_id, e))
        # 2. Links
        links = [
            l for l in db_sync_manager.get_se_links_by_domain(self.domain_urn)
        ]
        logger.debug("se-links=%s" % (links, ))
        for link in links:
            dpid1 = link.get("interface_ref")[0].get("component_id")
            dpid2 = link.get("interface_ref")[1].get("component_id")
            if MonitoringUtils.check_existing_tag_in_topology(
                    parent_node, "link", "lan", [dpid1, dpid2]):
                break
            logger.debug("se-links=%s" % (link, ))
            self._add_se_link(link)

    def _add_se_link(self, link):
        # Special case: links to be filtered in POST {(M)RO -> (M)MS}
        SE_FILTERED_LINKS = ["*"]
        interfaces_cid = [
            i.get("component_id") for i in link.get("interface_ref")
        ]
        interface_cid_in_filter = [
            f for f in SE_FILTERED_LINKS if f in interfaces_cid
        ]
        # Avoid reinserting existing link tags in the topology
        if MonitoringUtils.check_existing_tag_in_topology(
                self.topology, "link", "lan", link.get("component_id")):
            return
        if not interface_cid_in_filter:
            l = etree.SubElement(self.topology, "link")
            # NOTE that this cannot be empty
            l.set("type", MonitoringUtilsLinks._translate_link_type(link))
            link_id = ""
            links = link.get("interface_ref")
            for link in links:
                # SE link
                iface = etree.SubElement(l, "interface_ref")
                iface.set("client_id", link.get("component_id"))
            # - Prepare link ID for SERM-SDNRM link
            link_id = MonitoringUtilsLinks.get_id_for_link_serm_sdnrm_tnrm(
                links)
            l.set("id", link_id)
예제 #34
0
class DBManager(object):
    """
    This object is a wrapper for MongoClient to communicate to the RO
    (local) mongo-db
    """

    def __init__(self):
        self.__mutex = threading.Lock()
        self.config = ConfParser("ro.conf")
        master_ro = self.config.get("master_ro")
        self.mro_enabled = ast.literal_eval(master_ro.get("mro_enabled"))

    def __get_table(self, table_name):
        db_name = "felix_ro"
        if self.mro_enabled:
            db_name = "felix_mro"
        return getattr(getattr(pymongo.MongoClient(), db_name), table_name)

    def __check_return_rows(self, rows, custom_table, filter_params={}):
        if rows is None:
            raise Exception("%s -- could not find entry with params=%s!" %
                            (custom_table.full_name, filter_params))
        return rows

    def __get_one(self, custom_table, filter_params={}):
        table = custom_table
        try:
            self.__mutex.acquire()
            row = table.find_one(filter_params)
            return self.__check_return_rows(row, custom_table, filter_params)
        finally:
            self.__mutex.release()

    def __get_all(self, custom_table, filter_params={}):
        table = custom_table
        try:
            self.__mutex.acquire()
            rows = table.find(filter_params)
            return self.__check_return_rows(rows, custom_table, filter_params)
        finally:
            self.__mutex.release()

    def __set_update(self, custom_table, object_id, fields_dict={}):
        table = custom_table
        try:
            self.__mutex.acquire()
            table.update({"_id": object_id},
                         {"$set": fields_dict})
        finally:
            self.__mutex.release()

    # (felix_ro) domain.routing
    def get_configured_peers(self):
        """
        Collection that stores peers (either RMs or ROs)
        """
        table = self.__get_table("domain.routing")
        return self.__get_all(table)

    def get_configured_peer(self, filter_params):
        table = self.__get_table("domain.routing")
        return self.__get_one(table, filter_params)

    def get_configured_peer_by_routing_key(self, key):
        filter_params = {"_id": key}
        return self.get_configured_peer(filter_params)

    def get_configured_peer_by_urn(self, domain_urn):
        filter_params = {"domain_urn": domain_urn}
        peer_domain_info = self.get_domain_info(filter_params)
        peer_domain_ref = peer_domain_info.get("_ref_peer")
        return self.get_configured_peer_by_routing_key(peer_domain_ref)

    def get_configured_peer_by_uri(self, rm_url, extra_filter={}):
        # Parse URL in order to filtering entry in domain.routing collection
        rm_url = urlparse.urlparse(rm_url)
        rm_endpoint = rm_url.path
        if rm_endpoint and len(rm_endpoint):
            # Remove internal slashes
            if rm_endpoint[0] == "/":
                rm_endpoint = rm_endpoint[1:]
            if rm_endpoint[-1] == "/":
                rm_endpoint = rm_endpoint[:-1]
        # Prepare "rm_endpoint" for "like" query (as regexp)
        # rm_endpoint = rm_endpoint.replace("/","\/")
        rm_endpoint_re = self.__get_regexp_for_query(rm_endpoint)
        url_port = rm_url.netloc
        # NOTE: The following is a corner-case for non-standard RMs
        # using basic-auth (user:password)
        if "@" in rm_url.netloc:
            auth, url_port = rm_url.netloc.split("@")
        rm_address, rm_port = url_port.split(":")
        rm_protocol = rm_url.scheme
        filter_params = extra_filter
        filter_params.update(
            {"protocol": rm_protocol, "address": rm_address,
             # Port is stored as an integer, but the query looks for a string..
             # "endpoint": {"$regex": rm_endpoint_re}, })
             "port": rm_port, "endpoint": {"$regex": rm_endpoint_re}, })
        peer = self.get_configured_peer(filter_params)
        return peer

    # TODO Consider making this more flexible by passing
    # a dictionary with any parameter
    def update_peer_info(self, object_id, am_type, am_version):
        table = self.__get_table("domain.routing")
        fields_dict = {"am_type": am_type,
                       "am_version": am_version}
        self.__set_update(table, object_id, fields_dict)

    def set_peer_urn(self, object_id, domain_urn):
        table = self.__get_table("domain.routing")
        fields_dict = {"domain_urn": domain_urn}
        self.__set_update(table, object_id, fields_dict)

    # (felix_ro) domain.info
    def get_info_peers(self, filter_params={}):
        table = self.__get_table("domain.info")
        return self.__get_all(table, filter_params)

    def get_domains_info(self, filter_params):
        table = self.__get_table("domain.info")
        return self.__get_all(table, filter_params)

    def get_domain_info(self, filter_params):
        table = self.__get_table("domain.info")
        return self.__get_one(table, filter_params)

    def store_domain_info(self, rm_url, domain_urn):
        table = self.__get_table("domain.info")
        # Search for entry in domain.routing first
        peer = self.get_configured_peer_by_uri(rm_url)
        # Search in domain.routing for any RM matching the filtering parameters
        try:
            self.__mutex.acquire()
#            row = table.find_one({"_ref_peer": peer.get("_id")})
            # There may be: 1 domain URN per RO, N domain URNs per MRO
            row = table.find_one({"_ref_peer": peer.get("_id"),
                                  "domain_urn": domain_urn})
            if not row:
                entry = {"domain_urn": domain_urn,
                         "_ref_peer": peer.get("_id")}
                return table.insert(entry)
        finally:
            self.__mutex.release()

    def get_domain_urn(self, filter_params):
        return self.get_domain_info(filter_params).get("domain_urn")

    def get_domain_urn_from_uri(self, url, filter_params={}):
        peer = self.get_configured_peer_by_uri(url, filter_params)
        peer_id = peer.get("_id")
        return self.get_domain_info({"_ref_peer": peer_id}).get("domain_urn")

    def __get_domain_authority(self, domain_urn):
        # Domain URN = Domain authority
        # Remove the bit of the authority,
        # then create a RE that starts this way
        domain_urn = domain_urn.split("+authority+")[0]
        domain_authority = self.__get_regexp_for_query(domain_urn)
        return domain_authority

    # (felix_ro) topology.physical
    def store_physical_info(self, domain_urn, last_update):
        """
        Keep track of last update time for physical topology within a domain.
        """
        table = self.__get_table("topology.physical")
        # Get ID of domain related to physical topology
        domain = self.get_domain_info({"domain_urn": domain_urn})
        try:
            self.__mutex.acquire()
            row = table.find_one({"_ref_domain": domain.get("_id")})
            if row is None:
                entry = {"last_update": last_update,
                         "_ref_domain": domain.get("_id"), }
                return table.insert(entry)
        except:
            e = "Cannot store physical information for domain with URN: %s" %\
                str(domain_urn)
            raise Exception(e)
        finally:
            self.__mutex.release()

    def get_physical_info_from_domain(self, domain_id):
        """
        Retrieve physical topology information through domain.info's ID.
        """
        table = self.__get_table("topology.physical")
        filter_params = {"_ref_domain": domain_id}
        return self.__get_one(table, filter_params)

    # (felix_ro) topology.slice
    def store_slice_info(self, urn, slivers):
        table = self.__get_table("topology.slice")
        try:
            self.__mutex.acquire()
            row = table.find_one({"slice_urn": urn})
            if row is None:
                entry = {"slice_urn": urn,
                         "slivers": slivers}
                return table.insert(entry)
            # update the slivers list (if needed)
            self.__update_list("slice-table", table, row, "slivers", slivers)
        finally:
            self.__mutex.release()

    def get_slice_routing_keys(self, urns):
        table = self.__get_table("topology.slice")
        try:
            self.__mutex.acquire()
            ret = {}
            for u in urns:
                for r in table.find():
                    for s in r.get("slivers"):
                        if (r.get("slice_urn") == u) or\
                           (s.get("geni_sliver_urn") == u):
                            if (s.get("routing_key") in ret) and\
                               (u not in ret[s.get("routing_key")]):
                                ret[s.get("routing_key")].append(u)
                            else:
                                ret[s.get("routing_key")] = [u]
            return ret
        finally:
            self.__mutex.release()

    def get_slice_urn(self, urns):
        table = self.__get_table("topology.slice")
        try:
            self.__mutex.acquire()
            for u in urns:
                for r in table.find():
                    if r.get("slice_urn") == u:
                        return u
                    for s in r.get("slivers"):
                        if s.get("geni_sliver_urn") == u:
                            return r.get("slice_urn")
            return None
        finally:
            self.__mutex.release()

    def delete_slice_urns(self, urns):
        table = self.__get_table("topology.slice")
        try:
            self.__mutex.acquire()
            # Keep track of the slice containing any of the slivers passed
            # This is done in order to remove the slice once it is empty
            containing_slice = {}
            containing_slice_urn = None
            if len(urns) > 0:
                containing_slice = table.find_one(
                    {"slivers.geni_sliver_urn": urns[0]}) or {}
                if "slice_urn" in containing_slice:
                    containing_slice_urn = containing_slice.get(
                        "slice_urn", None)
            for u in urns:
                for r in table.find():
                    # Passed URN belongs to a slice
                    if r.get("slice_urn") == u:
                        table.remove({"slice_urn": u})
                        logger.info("Removed slice entry: %s" % (u,))
                    else:
                        # Passed URN belongs to a sliver (inside slice)
                        for s in r.get("slivers"):
                            if s.get("geni_sliver_urn") == u:
                                # remove the element from the list
                                self.__delete_sliver_urn(
                                    table, r.get("slice_urn"),
                                    r.get("slivers"), s)
                                logger.info(
                                    "Removed sliver from slice entry: %s" %
                                    (u,))
                                break
            # Remove slice when it does not contain slivers anymore
            containing_slice = table.find_one(
                {"slice_urn": containing_slice_urn}) or {}
            if (len(containing_slice.get("slivers", [])) == 0) and\
               (containing_slice_urn is not None):
                table.remove({"slice_urn": containing_slice_urn})
        finally:
            self.__mutex.release()

    def store_slice_monitoring_info(self, slice_urn, monitoring_info):
        table = self.__get_table("topology.slice")
        try:
            self.__mutex.acquire()
            row = table.find_one({"slice_urn": slice_urn})
            if row is not None:
                table.update({"slice_urn": slice_urn},
                             {"slice_urn": slice_urn,
                              "slivers": row.get("slivers"),
                              "slice_monitoring": monitoring_info})
        finally:
            self.__mutex.release()

    def get_slice_monitoring_info(self):
        table = self.__get_table("topology.slice")
        try:
            self.__mutex.acquire()
            return [i.get('slice_monitoring') for i in table.find()
                    if i.get('slice_monitoring') is not None]

        finally:
            self.__mutex.release()

    def get_slice_monitoring_from_urn(self, slice_urn):
        table = self.__get_table("topology.slice")
        try:
            self.__mutex.acquire()
            row = table.find_one({"slice_urn": slice_urn})
            return row.get("slice_monitoring") if row is not None else None

        finally:
            self.__mutex.release()

    # (felix_ro) resource.com.node
    # TODO Ensure correctness
    def store_com_nodes(self, routingKey, values):
        table = self.__get_table("resource.com.node")
        try:
            ids = []
            self.__mutex.acquire()
            for v in values:
                row = table.find_one({
                    "routing_key": routingKey,
                    "component_id": v.get("component_id"),
                    "component_manager_id": v.get("component_manager_id"), })
                #   "sliver_type_name": v.get("sliver_type_name")})
                if not row:
                    v["routing_key"] = routingKey
                    ids.append(table.insert(v))
                    continue
                # update the object (if needed)
                self.__update_list("comnodes-table", table, row, "interfaces",
                                   v.get("interfaces"))
            return ids
        finally:
            self.__mutex.release()

    def get_com_nodes(self, filter_params={}):
        table = self.__get_table("resource.com.node")
        return self.__get_all(table, filter_params)

    def get_com_nodes_by_domain(self, domain_urn):
        domain_authority = self.__get_domain_authority(domain_urn)
        # Look for all those resources that start with a given URN
        filter_params = {"component_id": domain_authority, }
        nodes = self.get_com_nodes(filter_params)
        return nodes

    def get_com_node_routing_key(self, cid):
        table = self.__get_table("resource.com.node")
        filter_params = {"component_id": cid}
        return self.__get_one(table, filter_params).get("routing_key")

    # (felix_ro) resource.com.link
    def store_com_links(self, routingKey, values):
        table = self.__get_table("resource.com.link")
        try:
            ids = []
            self.__mutex.acquire()
            for v in values:
                row = table.find_one({
                    "routing_key": routingKey,
                    "component_id": v.get("component_id")})
                if not row:
                    v["routing_key"] = routingKey
                    ids.append(table.insert(v))
                else:
                    logger.debug(
                        "(link-table) %s already stored!" % (row.get("_id")))
            return ids
        finally:
            self.__mutex.release()

    def get_com_links(self, filter_params={}):
        table = self.__get_table("resource.com.link")
        return self.__get_all(table, filter_params)

    def get_com_links_by_domain(self, domain_urn):
        domain_authority = self.__get_domain_authority(domain_urn)
        # Look for all those resources that start with a given URN
        filter_params = {"component_id": domain_authority, }
        links = self.get_com_links(filter_params)
        return links

    def get_com_link_by_sdnkey(self, sdn_link_key):
        table = self.__get_table("resource.com.link")
        for l in self.__get_all(table, {}):
            # if l.get("component_id").endswith(sdn_link_key):
            if sdn_link_key in l.get("component_id"):
                return l
        return None

    def get_com_link_by_src_dst_links(self, link_id):
        table = self.__get_table("resource.com.link")
        com_links = []
        for l in self.__get_all(table, {}):
            links = l.get("links")
            for l_link in links:
                if link_id in l_link.get("source_id") or \
                        link_id in l_link.get("dest_id"):
                    com_links.append(l)
        return com_links

    def get_com_interface_by_nodekey(self, com_node_key):
        table = self.__get_table("resource.com.link")
        ret = []
        try:
            self.__mutex.acquire()
            for row in table.find():
                if row.get("component_id").startswith(com_node_key):
                    for l in row.get("links"):
                        ret.append(l.get("source_id"))
            return ret
        finally:
            self.__mutex.release()

    # (felix_ro) resource.of.node
    def store_sdn_datapaths(self, routingKey, values):
        table = self.__get_table("resource.of.node")
        try:
            ids = []
            self.__mutex.acquire()
            for v in values:
                row = table.find_one({
                    "routing_key": routingKey,
                    "component_id": v.get("component_id"),
                    "component_manager_id": v.get("component_manager_id"),
                    "dpid": v.get("dpid")})
                if row is None:
                    v["routing_key"] = routingKey
                    ids.append(table.insert(v))
                    continue
                # update the object (if needed)
                self.__update_list("datapth-table", table, row, "ports",
                                   v.get("ports"))
            return ids
        finally:
            self.__mutex.release()

    def get_sdn_datapaths(self, filter_params={}):
        table = self.__get_table("resource.of.node")
        return self.__get_all(table, filter_params)

    def get_sdn_datapaths_by_domain(self, domain_urn):
        domain_authority = self.__get_domain_authority(domain_urn)
        # Look for all those resources that start with a given URN
        filter_params = {"component_id": domain_authority, }
        nodes = self.get_sdn_datapaths(filter_params)
        return nodes

    def get_sdn_datapath_routing_key(self, dpid):
        table = self.__get_table("resource.of.node")
        filter_params =\
            {"component_id": dpid.get("component_id"),
             "component_manager_id": dpid.get("component_manager_id"),
             "dpid": dpid.get("dpid")}
        return self.__get_one(table, filter_params).get("routing_key")

    def get_sdn_datapath_by_componentid(self, cid):
        table = self.__get_table("resource.of.node")
        filter_params = {"component_id": cid}
        return self.__get_one(table, filter_params)

    # (felix_ro) resource.of.link
    def store_sdn_links(self, routingKey, values):
        table = self.__get_table("resource.of.link")
        try:
            ids = []
            self.__mutex.acquire()
            for v in values:
                row = table.find_one({
                    "routing_key": routingKey,
                    "component_id": v.get("component_id")})
                if row is None:
                    v["routing_key"] = routingKey
                    ids.append(table.insert(v))
                else:
                    logger.debug(
                        "(link-table) %s already stored!" % (row.get("_id")))
            return ids
        finally:
            self.__mutex.release()

    def get_sdn_links(self, filter_params={}):
        table = self.__get_table("resource.of.link")
        (of, fed) = ([], [])
        try:
            self.__mutex.acquire()
            for row in table.find(filter_params):
                if row.get("dpids") is not None:
                    of.append(row)
                elif row.get("interface_ref_id") is not None:
                    fed.append(row)
            return (of, fed)
        finally:
            self.__mutex.release()

    def get_sdn_links_by_domain(self, domain_urn):
        domain_authority = self.__get_domain_authority(domain_urn)
        # Look for all those resources that start with a given URN
        filter_params = {"component_id": domain_authority, }
        links = self.get_sdn_links(filter_params)
        return links

    def get_sdn_link_by_sdnkey(self, sdn_link_key):
        table = self.__get_table("resource.of.link")
        for l in self.__get_all(table, {}):
            if l.get('component_id').endswith(sdn_link_key):
                return l
        return None

    # (felix_ro) resource.se.node
    def store_se_nodes(self, routingKey, values):
        table = self.__get_table("resource.se.node")
        try:
            ids = []
            self.__mutex.acquire()
            for v in values:
                row = table.find_one({
                    "routing_key": routingKey,
                    "component_id": v.get("component_id"),
                    "component_manager_id": v.get("component_manager_id"),
                    "sliver_type_name": v.get("sliver_type_name")})
                if row is None:
                    v["routing_key"] = routingKey
                    ids.append(table.insert(v))
                    continue
                # update the object (if needed)
                self.__update_list("senodes-table", table, row, "interfaces",
                                   v.get("interfaces"))
            return ids
        finally:
            self.__mutex.release()

    def get_se_node_info(self, routingKey, node_id):
        table = self.__get_table("resource.se.node")
        filter_params = {'routing_key': routingKey, 'component_id': node_id}
        row = self.__get_one(table, filter_params)
        # Row has some value if passed this point
        return {'component_id': row.get('component_id'),
                'component_manager_id': row.get('component_manager_id')}

    def get_se_nodes(self, filter_params={}):
        table = self.__get_table("resource.se.node")
        return self.__get_all(table, filter_params)

    def get_se_nodes_by_domain(self, domain_urn):
        domain_authority = self.__get_domain_authority(domain_urn)
        # Look for all those resources that start with a given URN
        filter_params = {"component_id": domain_authority, }
        nodes = self.get_se_nodes(filter_params)
        return nodes

    def get_se_node_routing_key(self, cid):
        table = self.__get_table("resource.se.node")
        filter_params = {"component_id": cid}
        return self.__get_one(table, filter_params).get("routing_key")

    # (felix_ro) resource.se.link
    def store_se_links(self, routingKey, values):
        table = self.__get_table("resource.se.link")
        try:
            ids = []
            self.__mutex.acquire()
            for v in values:
                row = table.find_one({
                    "routing_key": routingKey,
                    "component_id": v.get("component_id"),
                    "component_manager_name": v.get("component_manager_name"),
                    "link_type": v.get("link_type")})
                if row is None:
                    v["routing_key"] = routingKey
                    ids.append(table.insert(v))
                else:
                    logger.debug(
                        "(selink-table) %s already stored!" % (row.get("_id")))
            return ids
        finally:
            self.__mutex.release()

    def get_se_link_routing_key(self, values):
        table = self.__get_table("resource.se.link")
        try:
            key, ifs = None, []
            self.__mutex.acquire()
            for r in table.find():
                ifrefs = r.get('interface_ref')
                for i in ifrefs:
                    if i.get('component_id') in values:
                        key = r.get('routing_key')
                        ifrefs.remove(i)
                        ifs.append(ifrefs[0])
            return key, ifs
        finally:
            self.__mutex.release()

    def get_se_link_info(self, node_port_id):
        table = self.__get_table("resource.se.link")
        try:
            self.__mutex.acquire()
            for r in table.find():
                for ifref in r.get("interface_ref"):
                    if ifref.get("component_id") == node_port_id:
                        return r.get('link_type'), \
                            r.get('component_manager_name')
            return None, None
        finally:
            self.__mutex.release()

    def get_se_links(self, filter_params={}):
        table = self.__get_table("resource.se.link")
        return self.__get_all(table, filter_params)

    def get_se_links_by_domain(self, domain_urn):
        domain_authority = self.__get_domain_authority(domain_urn)
        # Look for all those resources that start with a given URN
        filter_params = {"component_id": domain_authority, }
        links = self.get_se_links(filter_params)
        return links

    def get_direct_se_link_routing_key(self, cid, ifrefs):
        try:
            self.__mutex.acquire()
            table = self.__get_table("resource.se.link")
            row = table.find_one({"component_id": cid})
            if row is not None:
                return row.get("routing_key")

            table = self.__get_table("resource.se.node")
            for row in table.find():
                for i in row.get("interfaces"):
                    if i.get("component_id") in ifrefs:
                        return row.get("routing_key")

            raise Exception("Link (%s,%s) owner is not found into RO-DB!" %
                            (cid, ifrefs))
        finally:
            self.__mutex.release()

    def get_interface_ref_by_sekey(self, se_if):
        table = self.__get_table("resource.se.link")
        try:
            self.__mutex.acquire()
            ret = []
            for row in table.find():
                for i in row.get("interface_ref"):
                    if i.get("component_id") == se_if:
                        ret = row.get("interface_ref")
                        break

            for i in ret:
                if i.get("component_id") != se_if:
                    return i.get("component_id")

            return None
        finally:
            self.__mutex.release()

    # (felix_ro) resource.tn.node
    def store_tn_nodes(self, routingKey, values):
        table = self.__get_table("resource.tn.node")
        try:
            ids = []
            self.__mutex.acquire()
            for v in values:
                row = table.find_one({
                    "routing_key": routingKey,
                    "component_id": v.get("component_id"),
                    "component_manager_id": v.get("component_manager_id"),
                    "sliver_type_name": v.get("sliver_type_name")})
                if row is None:
                    v["routing_key"] = routingKey
                    ids.append(table.insert(v))
                    continue
                # update the object (if needed)
                self.__update_list("tnnodes-table", table, row, "interfaces",
                                   v.get("interfaces"))
            return ids
        finally:
            self.__mutex.release()

    def update_tn_nodes(self, routingKey, values):
        table = self.__get_table("resource.tn.node")
        try:
            ids = []
            self.__mutex.acquire()
            ret = table.remove({"routing_key": routingKey})
            logger.debug("Remove tn-nodes: %s" % (ret))
            for v in values:
                v["routing_key"] = routingKey
                ids.append(table.insert(v))
            return ids
        finally:
            self.__mutex.release()

    def get_tn_nodes(self, filter_params={}):
        table = self.__get_table("resource.tn.node")
        return self.__get_all(table, filter_params)

    def get_tn_nodes_by_domain(self, domain_urn):
        domain_authority = self.__get_domain_authority(domain_urn)
        # Look for all those resources that start with a given URN
        filter_params = {"component_id": domain_authority, }
        nodes = self.get_tn_nodes(filter_params)
        return nodes

    def get_tn_node_routing_key(self, cid):
        table = self.__get_table("resource.tn.node")
        filter_params = {"component_id": cid}
        return [r.get("routing_key")
                for r in self.__get_all(table, filter_params)]

    def get_tn_node_interface(self, filter_params={}):
        found_interfaces = []
        tn_node = [n for n in self.get_tn_nodes()][0]
        interfaces = tn_node["interfaces"]
        for interface in interfaces:
            # Find interfaces that meet all the filter parameters
            if all(map(lambda x: x[1] in
                       interface.get(x[0], None),
                       [(k, v) for k, v in filter_params.iteritems()])):
                found_interfaces.append(interface)
        return found_interfaces

    # (felix_ro) resource.tn.link
    def store_tn_links(self, routingKey, values):
        table = self.__get_table("resource.tn.link")
        try:
            ids = []
            self.__mutex.acquire()
            for v in values:
                row = table.find_one({
                    "routing_key": routingKey,
                    "component_id": v.get("component_id"),
                    "component_manager_name": v.get("component_manager_name")})
                if row is None:
                    v["routing_key"] = routingKey
                    ids.append(table.insert(v))
                else:
                    logger.debug(
                        "(tnlink-table) %s already stored!" % (row.get("_id")))
            return ids
        finally:
            self.__mutex.release()

    def get_tn_links(self, filter_params={}):
        table = self.__get_table("resource.tn.link")
        return self.__get_all(table, filter_params)

    def get_tn_links_by_domain(self, domain_urn):
        domain_authority = self.__get_domain_authority(domain_urn)
        # Look for all those resources that start with a given URN
        filter_params = {"component_id": domain_authority, }
        links = self.get_tn_links(filter_params)
        return links

    def get_tn_link_routing_key(self, cid, cmid, ifrefs):
        try:
            self.__mutex.acquire()
            table = self.__get_table("resource.tn.link")
            ret = [r.get("routing_key")
                   for r in table.find({"component_id": cid})]
            if ret:
                return ret

            table = self.__get_table("resource.tn.node")
            ret = [r.get("routing_key")
                   for r in table.find({"component_manager_id": cmid})]
            if ret:
                return ret

            ret = set()
            for row in table.find():
                for i in row.get("interfaces"):
                    if i.get("component_id") in ifrefs:
                        ret.add(row.get("routing_key"))

            if ret:
                return ret
            raise Exception("Link (%s,%s,%s) owner is not found into RO-DB!" %
                            (cid, cmid, ifrefs))
        finally:
            self.__mutex.release()

    # (felix_ro) topology.slice.sdn
    def store_slice_sdn(self, slice_urn, groups_info, matches_info):
        table = self.__get_table("topology.slice.sdn")
        try:
            self.__mutex.acquire()
            row = table.find_one({"slice_urn": slice_urn})
            if not row:
                value = {"slice_urn": slice_urn,
                         "groups": groups_info,
                         "matches": matches_info}
                return table.insert(value)

            logger.warning("The slice '%s' already exists!" % slice_urn)
            return None
        finally:
            self.__mutex.release()

    def get_slice_sdn(self, slice_urn):
        table = self.__get_table("topology.slice.sdn")
        gs, ms = [], []
        try:
            self.__mutex.acquire()
            for r in table.find({"slice_urn": slice_urn}):
                if r.get("groups"):
                    gs.extend(r.get("groups"))
                if r.get("matches"):
                    ms.extend(r.get("matches"))

            return gs, ms
        finally:
            self.__mutex.release()

    def delete_slice_sdn(self, slice_urn):
        table = self.__get_table("topology.slice.sdn")
        try:
            self.__mutex.acquire()
            table.remove({"slice_urn": slice_urn})

        finally:
            self.__mutex.release()

    # utilities
    def __update_list(self, tname, table, entry, key, values):
        logger.debug("(%s) %s already stored!" % (tname, entry.get("_id"),))
        modif = {key: []}
        for v in values:
            if v not in entry.get(key):
                modif.get(key).append(v)

        if len(modif.get(key)) > 0:
            modif.get(key).extend(entry.get(key))
            logger.debug("(%s) extend slivers info %s" % (tname, modif,))
            table.update({"_id": entry.get("_id")},
                         {"$set": modif})
        else:
            logger.debug("(%s) not needed to update %s" % (tname, key,))

    def __delete_sliver_urn(self, table, slice_urn, slivers, elem):
        logger.debug("(slice-table) %s remove %s from %s" %
                     (slice_urn, elem, slivers))
        slivers.remove(elem)
        modif = {"slivers": slivers}
        table.update({"slice_urn": slice_urn},
                     {"$set": modif})

    def __get_regexp_for_query(self, search_term):
        terms_to_replace = ["+"]
        for term in terms_to_replace:
            search_term = search_term.replace(term, "\%s" % term)
        return re.compile(search_term)
예제 #35
0
파일: base.py 프로젝트: HalasNet/felix
 def __init__(self):
     super(GENIv3DelegateBase, self).__init__()
     self.config = ConfParser("geniv3.conf")
     self.general_section = self.config.get("general")
     self.certificates_section = self.config.get("certificates")
예제 #36
0
class BaseMonitoring(object):
    """
    Base class for both physical and slice topology sent to the MS.
    """

    def __init__(self):
        super(BaseMonitoring, self).__init__()
        self.peers = [p for p in db_sync_manager.get_configured_peers()]
        self.peers_info = [p for p in db_sync_manager.get_info_peers()]
        self.domain_urn = ""
        self.domain_last_update = ""
        self.topology_list = etree.Element("topology_list")
        self.topology = etree.SubElement(self.topology_list, "topology")
        # Operation mode
        self.config = ConfParser("ro.conf")
        master_ro = self.config.get("master_ro")
        self.mro_enabled = ast.literal_eval(master_ro.get("mro_enabled"))
        self.software_stacks = {
            "ofelia": "ocf",
            "felix": "fms",
        }
        self.urn_type_resources = {
            "crm": "vtam",
            "sdnrm": "openflow",
            "serm": "se",
            "tnrm": "tn",
        }
        self.urn_type_resources_variations = {
            "crm": ["vtam"],
            "sdnrm": ["openflow", "ofam"],
            "serm": ["se"],
            "tnrm": ["tn", "NSI"],
        }
        self.management_type_resources = {
            "crm": "server",
            "sdnrm": "switch",
            "serm": "se",
        }
        self.monitoring_expected_nodes = {
            "crm": "",
            "sdnrm": "switch",
            "serm": "se",
            "tnrm": "tn",
        }
        self.peers_by_domain = {}
        self.__group_peers_by_domain()
        # CRM config
        self.config_crm = core.config.JSONParser("crm.json")
        self.crm_mgmt_info = self.config_crm.get("device_management_info")
        # SDNRM config
        self.config_sdnrm = core.config.JSONParser("sdnrm.json")
        self.sdnrm_mgmt_info = self.config_sdnrm.get("device_management_info")
        # SERM config
        self.config_serm = core.config.JSONParser("serm.json")
        self.serm_mgmt_info = self.config_serm.get("device_management_info")

    def _get_timestamp(self):
        # Return integer part as a string
        return str(int(time.time()))

    def __group_peers_by_domain(self):
        for peer in self.peers_info:
            filter_params = {"_id": peer.get("_id")}
            domain_peer = db_sync_manager.get_domain_info(filter_params)
            peer_domain_urn = domain_peer.get("domain_urn")
            authority = URNUtils.get_felix_authority_from_urn(peer_domain_urn)
            # If authority (domain name) does not exist yet, create
            if not self.peers_by_domain.get(authority):
                self.peers_by_domain[authority] = []
            # Extend list of peers with new one
            self.peers_by_domain[authority].append(peer_domain_urn)

            # Stores last_update_time for the physical topology
            # on a given domain
            try:
                last_update_time = self._get_timestamp()
                db_sync_manager.store_physical_info(peer_domain_urn,
                                                    last_update_time)
            except Exception as e:
                logger.error("Error storing last_update_time in phy-topology.")
                logger.error("Exception: %s" % e)

        # XXX: (M)MS assumes one TNRM per island
        # With this, (M)MS receives at least one TNRM per island
        type_resource_peer_tnrm = self.urn_type_resources_variations.get("tnrm")
        for peer in self.peers:
            filter_params = {"_ref_peer": peer.get("_id")}
            domain_peer = db_sync_manager.get_domain_info(filter_params)
            peer_domain_urn = domain_peer.get("domain_urn")
            peer_is_tnrm = any([rt in peer_domain_urn for rt
                in type_resource_peer_tnrm])
            # MRO: TNRM added at this level. Use information from peer to add
            # it as a TNRM per domain
            if peer_is_tnrm:
                # Add the TNRM peer to each authority that does not have it yet
                for authority in self.peers_by_domain:
                    if peer_domain_urn not in self.\
                            peers_by_domain.get(authority):
                        self.peers_by_domain[authority].append(peer_domain_urn)

    def _send(self, xml_data, peer=None):
        try:
            if not peer:
                peer = self.monitoring_system

            url = "%s:%s/%s" % (peer.get("address"),
                                     peer.get("port"), peer.get("endpoint"))
            # Post-process URL to remove N slashes in a row
            url = re.sub("/{2,}", "/", url)
            # And add protocol (with 2 slashes)
            url = "%s://%s" % (peer.get("protocol"), url)
            logger.info("url=%s" % (url,))
            logger.info("data=%s" % (xml_data,))

            # NOTE This may require certificates or BasicAuth at some point
            reply = requests.post(url=url,
                                 headers={"Content-Type": "application/xml"},
                                 data=xml_data).text
            logger.info("Reply=%s" % (reply,))

        except Exception as e:
            logger.error("Could not connect to %s. Exception: %s" % (url, e,))

    ##########################
    # Set and return topology
    ##########################

    def set_topology_tree(self, topology_list_tree):
        # Return whole list of topologies
        try:
            # If the following line works, 'topology_list_tree' is a proper xml tree
            etree.tostring(topology_list_tree)
            self.topology_list = topology_list_tree
        except:
            pass

    def get_topology_tree(self):
        # Return whole list of topologies
        return self.topology_list

    def get_topology(self):
        if self.get_topology_tree() is not None:
            return etree.tostring(self.get_topology_tree())

    def get_topology_pretty(self):
        # XML not in proper format: need to convert to lxml, then pretty-print
        return etree.tostring(etree.fromstring(self.get_topology()), pretty_print=True)

    def flush_topology(self):
        # Create a new sub-element of the list of topologies
        self.topology = etree.SubElement(self.topology_list, "topology")

    def remove_empty_nodes(self):
        """Remove empty 'topology' nodes from the list/tree of topologies"""
        for node in self.topology_list.findall(".//topology"):
            if len(node.getchildren()) == 0:
                node.getparent().remove(node)

    ##########
    # Helpers
    ##########

    def _update_topology_name(self):
        filter_string = "[@last_update_time='%s']" % str(self.domain_last_update)
        filtered_nodes = self.get_topology_tree().xpath("//topology%s" % filter_string)
        # There should only be one
        filtered_nodes[0].set("name", str(self.domain_urn))
        self.get_topology_tree().xpath("//topology%s" % filter_string)[0].set("name", str(self.domain_urn))

    def _remove_empty_topologies(self, filter_name=None, filter_update_time=None):
        filter_string = ""
        if filter_name:
            filter_string += "[@name='%s']" % str(filter_name)
        if filter_update_time:
            filter_string += "[@last_update_time='%s']" % str(filter_update_time)
        topology_tree = etree.fromstring(self.get_topology())
        filtered_nodes = topology_tree.xpath("//topology%s" % filter_string)
        for filtered_node in filtered_nodes:
            # Remove any node whose length is 0 (=> no content inside)
            if list(filtered_node) == 0:
                filtered_node.get_parent().remove(filtered_node)

    def _get_management_data_devices(self, parent_node):
        configuration_data = {}
        if self.urn_type_resources.get("crm") in parent_node.get("id"):
            configuration_data = self.crm_mgmt_info
        elif self.urn_type_resources.get("sdnrm") in parent_node.get("id"):
            configuration_data = self.sdnrm_mgmt_info
        elif self.urn_type_resources.get("serm") in parent_node.get("id"):
            configuration_data = self.serm_mgmt_info
        return configuration_data

    def _add_management_section(self, parent_node):
        management = etree.Element("management")
#        resource_management_info = db_sync_manager.get_management_info(
#                                        component_id=parent_node.get("component_id"))
        management.set("type", "snmp")
        address = etree.SubElement(management, "address")
        address.text = ""
        port = etree.SubElement(management, "port")
        port.text = ""
        auth_id = etree.SubElement(management, "auth_id")
        auth_id.text = "public"
        auth_pass = etree.SubElement(management, "auth_pass")
        auth_pass.text = ""
        try:
            conf_data = self._get_management_data_devices(parent_node)
            if conf_data is not None:
                # Possible mismatch between URN of *RM that is configured
                # in the *rm.json config file and the URN directly received
                # from the RM. Issue a comprehensive warning here
                if not parent_node.get("id") in conf_data.keys():
                    raise Exception("Mismatch between configuration device \
                    URN and received URN for URN='%s'. Please check the \
                    settings of your RMs under RO's configuration folder"
                                    % parent_node.get("id"))
                address.text = conf_data.get(parent_node.get("id")).get("ip")
                port.text = conf_data.get(parent_node.get("id")).get("port")
                auth_id.text = conf_data.get(parent_node.get("id")).get("snmp").get("id")
                auth_pass.text = conf_data.get(parent_node.get("id")).get("snmp").get("password")
        except Exception as e:
            logger.warning("Physical monitoring. Cannot add management data \
            on '%s'. Details: %s" % (etree.tostring(parent_node), e))
        return management

    def _add_generic_node(self, parent_tag, node, node_type):
        n = etree.SubElement(parent_tag, "node")
        n.set("id", node.get("component_id"))
        n.set("type", node_type)
        # Generate management section for node
        # This is only active for normal RO operation (MRO should
        # probably not send this information to MMS)
        # XXX In case it should, MRO would store the full topology_list
        # from each RO and send them to MMS
        if not self.mro_enabled:
            if node_type in self.management_type_resources.values():
                try:
                    management = self._add_management_section(n)
                    n.append(management)
                except Exception as e:
                    logger.warning("Physical topology - Cannot add management \
                    section. Details: %s" % e)
        return n

    #################
    # C-RM resources
    #################

    def _add_com_info(self, parent_node=None):
        # If no parent node passed, COM info is attached to root topology node
        if parent_node is None:
            parent_node = self.topology
        # 1. Nodes
        nodes = [n for n in db_sync_manager.get_com_nodes_by_domain(self.domain_urn)]
        for node in nodes:
            if MonitoringUtils.check_existing_tag_in_topology(parent_node, "node", "server", node.get("component_id")):
                break
            logger.debug("com-node=%s" % (node))
            # If no parent node passed, COM info is attached to root topology node
            if parent_node is None:
                parent_node = self.topology
            n = self._add_generic_node(parent_node, node, "server")
            # Output interfaces (avoid "*") per server
            node_ifaces = filter(lambda x: x != "*", node.get("interfaces"))
            logger.debug("com-node-interfaces=%s" % node_ifaces)
            for iface in node_ifaces:
                interface = etree.SubElement(n, "interface")
                # NOTE this is extending the "interface" URN
                interface.set("id", "%s+interface+%s" % (n.get("id"), iface))
        # 2. Links
        links = [l for l in db_sync_manager.get_com_links_by_domain(self.domain_urn)]
        logger.debug("com-links=%s" % (links,))
        for link in links:
            self._add_com_link(link, parent_node)

    def _add_com_link(self, link, parent_node=None):
        if parent_node is None:
            parent_node = self.topology
        logger.debug("com-links=%s" % (link,))
        # l = etree.SubElement(parent_node, "link")
        l = etree.Element("link")
        # NOTE that this cannot be empty
        l.set("type", MonitoringUtilsLinks._translate_link_type(link))
        link_id = ""
        links = link.get("links")
        link_exists = False
        for link_i in links:
            if MonitoringUtils.check_existing_tag_in_topology(parent_node, "link", "lan", [link_i.get("source_id"), link_i.get("dest_id")]):
                link_exists = True
                break
            # Modify link on-the-fly to add the DPID port as needed
            link_i = MonitoringUtilsLinks._set_dpid_port_from_link(link.get("component_id"), link_i)
            # Source
            iface_source = etree.SubElement(l, "interface_ref")
            iface_source.set("client_id", link_i.get("source_id"))
            # Destination
            iface_dest = etree.SubElement(l, "interface_ref")
            iface_dest.set("client_id", link_i.get("dest_id"))
            # - Prepare link ID for CRM-SDNRM link
            link_id = MonitoringUtilsLinks.get_id_for_link_crm_sdnrm(link_i)
        # Finally, add it as subelement
        if not link_exists:
            l.set("id", link_id)
            parent_node.append(l)

    ###################
    # SDN-RM resources
    ###################

    def _add_sdn_info(self, parent_node=None):
        # If no parent node passed, SDN info is attached to root topology node
        if parent_node is None:
            parent_node = self.topology
        # 1. Nodes
        datapaths = [d for d in db_sync_manager.get_sdn_datapaths_by_domain(self.domain_urn)]
        for dp in datapaths:
            if MonitoringUtils.check_existing_tag_in_topology(parent_node, "node", "switch", dp.get("component_id")):
                break
            logger.debug("sdn-datapath=%s" % (dp,))
            switch = self._add_generic_node(parent_node, dp, "switch")
            for p in dp.get("ports"):
                iface = etree.SubElement(switch, "interface")
                iface.set("id", "%s_%s" % (switch.get("id"), p.get("num")))
                port = etree.SubElement(iface, "port")
                port.set("num", p.get("num"))
        # 2. Links
        (sdn_links, fed_links) = [l for l in db_sync_manager.
            get_sdn_links_by_domain(self.domain_urn)]
        for sdn_link in sdn_links:
            logger.debug("sdn-link=%s" % (sdn_link,))
            self._add_sdn_link(sdn_link, parent_node)
        for sdn_fed_link in fed_links:
            logger.debug("fed-sdn-link=%s" % (sdn_fed_link,))

    def _add_sdn_link(self, link, parent_node=None):
        if parent_node is None:
            parent_node = self.topology
        auth1 = link.get("dpids")[0].get("component_manager_id").replace("authority+cm", "datapath")
        dpid1 = auth1 + "+" + link.get("dpids")[0].get("dpid") + "_" + link.get("ports")[0].get("port_num")
        auth2 = link.get("dpids")[1].get("component_manager_id").replace("authority+cm", "datapath")
        dpid2 = auth2 + "+" + link.get("dpids")[1].get("dpid") + "_" + link.get("ports")[1].get("port_num")
        if MonitoringUtils.check_existing_tag_in_topology(
                parent_node, "link", "lan", [dpid1, dpid2]):
            return
        l = etree.SubElement(parent_node, "link")
        # NOTE that this cannot be empty
        l.set("type", MonitoringUtilsLinks._translate_link_type(link))
        link_id = ""
        ports = link.get("ports")
        dpids = link.get("dpids")
        try:
            for dpid_port in zip(dpids, ports):
                iface = etree.SubElement(l, "interface_ref")
                dpid = dpid_port[0]["component_id"]
                port = dpid_port[1]["port_num"]
                iface.set("client_id", "%s_%s" % (dpid, port))
        except Exception as e:
            logger.warning("Physical topology - Cannot add SDN interface %s. \
             Details: %s" % (link.get("component_id", "(unknown)"), e))
        try:
            # - Prepare link ID for SDNRM-SDNRM link
            link_id = MonitoringUtilsLinks.get_id_for_link_sdnrm_sdnrm(zip(dpids, ports))
            l.set("id", link_id)
        except Exception as e:
            logger.warning("Physical topology - Cannot add SDN link ID %s. \
            Details: %s" % (link.get("component_id", "(unknown)"), e))

    ##################
    # TN-RM resources
    ##################

    def _add_tn_info(self, parent_node=None):
        # If no parent node passed, TN info is attached to root topology node
        if parent_node is None:
            parent_node = self.topology
        # 1. Nodes
        # XXX: (M)MS assumes one TNRM per island
        # This retrieves TN information from AIST instance
        # (providing MRO has TNRM as peer, or its information in its DB)
        felix_tn_urn = "urn:publicid:IDN+fms:aist:tnrm"
        nodes = [d for d in db_sync_manager.get_tn_nodes_by_domain(felix_tn_urn)]
#        nodes = [d for d in db_sync_manager.get_tn_nodes_by_domain(self.domain_urn)]
#        nodes = [d for d in db_sync_manager.get_tn_nodes()]
        for node in nodes:
            if MonitoringUtils.check_existing_tag_in_topology(parent_node, "node", "tn", node.get("component_id"), self.domain_urn):
                break
            logger.debug("tn-node=%s" % (node,))
            n = self._add_generic_node(parent_node, node, "tn")
            # Output interfaces per node
            logger.debug("tn-node-interfaces=%s" % node.get("interfaces"))
            for iface in node.get("interfaces"):
                interface = etree.SubElement(n, "interface")
                interface.set("id", iface.get("component_id"))
#        # 2. Links
#        links = [l for l in db_sync_manager.get_tn_links_by_domain(self.domain_urn)]

    ##################
    # SE-RM resources
    ##################

    def _add_se_info(self, parent_node=None):
        # If no parent node passed, SE info is attached to root topology node
        if parent_node is None:
            parent_node = self.topology
        # 1. Nodes
        nodes = [d for d in db_sync_manager.get_se_nodes_by_domain(self.domain_urn)]
        for node in nodes:
            if MonitoringUtils.check_existing_tag_in_topology(parent_node, "node", "se", node.get("component_id")):
                break
            logger.debug("se-node=%s" % (node,))
            n = self._add_generic_node(parent_node, node, "se")
            # Output interfaces per node
            logger.debug("se-node-interfaces=%s" % node.get("interfaces"))
            for iface in node.get("interfaces"):
                interface = etree.SubElement(n, "interface")
                # Parse the component_id to get URN of SE and the port per interface
                component_id = iface.get("component_id")
                try:
                    interface.set("id", component_id)
#                    interface.attrib["id"] = component_id.split("_")[0]
                    port = etree.SubElement(interface, "port")
                    port.set("num", component_id.split("_")[1])
                except Exception as e:
                    logger.warning("Physical topology - Cannot add SE\
                     interface %s. Details: %s" % (component_id, e))
        # 2. Links
        links = [l for l in db_sync_manager.get_se_links_by_domain(self.domain_urn)]
        logger.debug("se-links=%s" % (links))
        for link in links:
            dpid1 = link.get("interface_ref")[0].get("component_id")
            dpid2 = link.get("interface_ref")[1].get("component_id")
            if MonitoringUtils.check_existing_tag_in_topology(parent_node, "link", "lan", [dpid1, dpid2]):
                break
            logger.debug("se-links=%s" % (link))
            self._add_se_link(link)

    def _add_se_link(self, link):
        # Special case: links to be filtered in POST {(M)RO -> (M)MS}
        SE_FILTERED_LINKS = ["*"]
        interfaces_cid = [i.get("component_id") for i in link.get("interface_ref")]
        interface_cid_in_filter = [f for f in SE_FILTERED_LINKS if f in interfaces_cid]
        # Avoid reinserting existing link tags in the topology
        if MonitoringUtils.check_existing_tag_in_topology(
                self.topology, "link", "lan", link.get("component_id")):
            return
        if not interface_cid_in_filter:
            l = etree.SubElement(self.topology, "link")
            # NOTE that this cannot be empty
            l.set("type", MonitoringUtilsLinks._translate_link_type(link))
            link_id = ""
            links = link.get("interface_ref")
            for link in links:
                # SE link
                iface = etree.SubElement(l, "interface_ref")
                iface.set("client_id", link.get("component_id"))
            # - Prepare link ID for SERM-SDNRM link
            link_id = MonitoringUtilsLinks.get_id_for_link_serm_sdnrm_tnrm(links)
            l.set("id", link_id)
예제 #37
0
class DBManager(object):
    """
    This object is a wrapper for MongoClient to communicate to the RO
    (local) mongo-db
    """
    def __init__(self):
        self.__mutex = threading.Lock()
        self.config = ConfParser("ro.conf")
        master_ro = self.config.get("master_ro")
        self.mro_enabled = ast.literal_eval(master_ro.get("mro_enabled"))

    def __get_table(self, table_name):
        db_name = "felix_ro"
        if self.mro_enabled:
            db_name = "felix_mro"
        return getattr(getattr(pymongo.MongoClient(), db_name), table_name)

    def __check_return_rows(self, rows, custom_table, filter_params={}):
        if rows is None:
            raise Exception("%s -- could not find entry with params=%s!" %
                            (custom_table.full_name, filter_params))
        return rows

    def __get_one(self, custom_table, filter_params={}):
        table = custom_table
        try:
            self.__mutex.acquire()
            row = table.find_one(filter_params)
            return self.__check_return_rows(row, custom_table, filter_params)
        finally:
            self.__mutex.release()

    def __get_all(self, custom_table, filter_params={}):
        table = custom_table
        try:
            self.__mutex.acquire()
            rows = table.find(filter_params)
            return self.__check_return_rows(rows, custom_table, filter_params)
        finally:
            self.__mutex.release()

    def __set_update(self, custom_table, object_id, fields_dict={}):
        table = custom_table
        try:
            self.__mutex.acquire()
            table.update({"_id": object_id}, {"$set": fields_dict})
        finally:
            self.__mutex.release()

    # (felix_ro) domain.routing
    def get_configured_peers(self):
        """
        Collection that stores peers (either RMs or ROs)
        """
        table = self.__get_table("domain.routing")
        return self.__get_all(table)

    def get_configured_peer(self, filter_params):
        table = self.__get_table("domain.routing")
        return self.__get_one(table, filter_params)

    def get_configured_peer_by_routing_key(self, key):
        filter_params = {"_id": key}
        return self.get_configured_peer(filter_params)

    def get_configured_peer_by_urn(self, domain_urn):
        filter_params = {"domain_urn": domain_urn}
        peer_domain_info = self.get_domain_info(filter_params)
        peer_domain_ref = peer_domain_info.get("_ref_peer")
        return self.get_configured_peer_by_routing_key(peer_domain_ref)

    def get_configured_peer_by_uri(self, rm_url, extra_filter={}):
        # Parse URL in order to filtering entry in domain.routing collection
        rm_url = urlparse.urlparse(rm_url)
        rm_endpoint = rm_url.path
        if rm_endpoint and len(rm_endpoint):
            # Remove internal slashes
            if rm_endpoint[0] == "/":
                rm_endpoint = rm_endpoint[1:]
            if rm_endpoint[-1] == "/":
                rm_endpoint = rm_endpoint[:-1]
        # Prepare "rm_endpoint" for "like" query (as regexp)
        # rm_endpoint = rm_endpoint.replace("/","\/")
        rm_endpoint_re = self.__get_regexp_for_query(rm_endpoint)
        url_port = rm_url.netloc
        # NOTE: The following is a corner-case for non-standard RMs
        # using basic-auth (user:password)
        if "@" in rm_url.netloc:
            auth, url_port = rm_url.netloc.split("@")
        rm_address, rm_port = url_port.split(":")
        rm_protocol = rm_url.scheme
        filter_params = extra_filter
        filter_params.update({
            "protocol": rm_protocol,
            "address": rm_address,
            # Port is stored as an integer, but the query looks for a string..
            # "endpoint": {"$regex": rm_endpoint_re}, })
            "port": rm_port,
            "endpoint": {
                "$regex": rm_endpoint_re
            },
        })
        peer = self.get_configured_peer(filter_params)
        return peer

    # TODO Consider making this more flexible by passing
    # a dictionary with any parameter
    def update_peer_info(self, object_id, am_type, am_version):
        table = self.__get_table("domain.routing")
        fields_dict = {"am_type": am_type, "am_version": am_version}
        self.__set_update(table, object_id, fields_dict)

    def set_peer_urn(self, object_id, domain_urn):
        table = self.__get_table("domain.routing")
        fields_dict = {"domain_urn": domain_urn}
        self.__set_update(table, object_id, fields_dict)

    # (felix_ro) domain.info
    def get_info_peers(self, filter_params={}):
        table = self.__get_table("domain.info")
        return self.__get_all(table, filter_params)

    def get_domains_info(self, filter_params):
        table = self.__get_table("domain.info")
        return self.__get_all(table, filter_params)

    def get_domain_info(self, filter_params):
        table = self.__get_table("domain.info")
        return self.__get_one(table, filter_params)

    def store_domain_info(self, rm_url, domain_urn):
        table = self.__get_table("domain.info")
        # Search for entry in domain.routing first
        peer = self.get_configured_peer_by_uri(rm_url)
        # Search in domain.routing for any RM matching the filtering parameters
        try:
            self.__mutex.acquire()
            #            row = table.find_one({"_ref_peer": peer.get("_id")})
            # There may be: 1 domain URN per RO, N domain URNs per MRO
            row = table.find_one({
                "_ref_peer": peer.get("_id"),
                "domain_urn": domain_urn
            })
            if not row:
                entry = {
                    "domain_urn": domain_urn,
                    "_ref_peer": peer.get("_id")
                }
                return table.insert(entry)
        finally:
            self.__mutex.release()

    def get_domain_urn(self, filter_params):
        return self.get_domain_info(filter_params).get("domain_urn")

    def get_domain_urn_from_uri(self, url, filter_params={}):
        peer = self.get_configured_peer_by_uri(url, filter_params)
        peer_id = peer.get("_id")
        return self.get_domain_info({"_ref_peer": peer_id}).get("domain_urn")

    def __get_domain_authority(self, domain_urn):
        # Domain URN = Domain authority
        # Remove the bit of the authority,
        # then create a RE that starts this way
        domain_urn = domain_urn.split("+authority+")[0]
        domain_authority = self.__get_regexp_for_query(domain_urn)
        return domain_authority

    # (felix_ro) topology.physical
    def store_physical_info(self, domain_urn, last_update):
        """
        Keep track of last update time for physical topology within a domain.
        """
        table = self.__get_table("topology.physical")
        # Get ID of domain related to physical topology
        domain = self.get_domain_info({"domain_urn": domain_urn})
        try:
            self.__mutex.acquire()
            row = table.find_one({"_ref_domain": domain.get("_id")})
            if row is None:
                entry = {
                    "last_update": last_update,
                    "_ref_domain": domain.get("_id"),
                }
                return table.insert(entry)
        except:
            e = "Cannot store physical information for domain with URN: %s" %\
                str(domain_urn)
            raise Exception(e)
        finally:
            self.__mutex.release()

    def get_physical_info_from_domain(self, domain_id):
        """
        Retrieve physical topology information through domain.info's ID.
        """
        table = self.__get_table("topology.physical")
        filter_params = {"_ref_domain": domain_id}
        return self.__get_one(table, filter_params)

    # (felix_ro) topology.slice
    def store_slice_info(self, urn, slivers):
        table = self.__get_table("topology.slice")
        try:
            self.__mutex.acquire()
            row = table.find_one({"slice_urn": urn})
            if row is None:
                entry = {"slice_urn": urn, "slivers": slivers}
                return table.insert(entry)
            # update the slivers list (if needed)
            self.__update_list("slice-table", table, row, "slivers", slivers)
        finally:
            self.__mutex.release()

    def get_slice_routing_keys(self, urns):
        table = self.__get_table("topology.slice")
        try:
            self.__mutex.acquire()
            ret = {}
            for u in urns:
                for r in table.find():
                    for s in r.get("slivers"):
                        if (r.get("slice_urn") == u) or\
                           (s.get("geni_sliver_urn") == u):
                            if (s.get("routing_key") in ret) and\
                               (u not in ret[s.get("routing_key")]):
                                ret[s.get("routing_key")].append(u)
                            else:
                                ret[s.get("routing_key")] = [u]
            return ret
        finally:
            self.__mutex.release()

    def get_slice_urn(self, urns):
        table = self.__get_table("topology.slice")
        try:
            self.__mutex.acquire()
            for u in urns:
                for r in table.find():
                    if r.get("slice_urn") == u:
                        return u
                    for s in r.get("slivers"):
                        if s.get("geni_sliver_urn") == u:
                            return r.get("slice_urn")
            return None
        finally:
            self.__mutex.release()

    def delete_slice_urns(self, urns):
        table = self.__get_table("topology.slice")
        try:
            self.__mutex.acquire()
            # Keep track of the slice containing any of the slivers passed
            # This is done in order to remove the slice once it is empty
            containing_slice = {}
            containing_slice_urn = None
            if len(urns) > 0:
                containing_slice = table.find_one(
                    {"slivers.geni_sliver_urn": urns[0]}) or {}
                if "slice_urn" in containing_slice:
                    containing_slice_urn = containing_slice.get(
                        "slice_urn", None)
            for u in urns:
                for r in table.find():
                    # Passed URN belongs to a slice
                    if r.get("slice_urn") == u:
                        table.remove({"slice_urn": u})
                        logger.info("Removed slice entry: %s" % (u, ))
                    else:
                        # Passed URN belongs to a sliver (inside slice)
                        for s in r.get("slivers"):
                            if s.get("geni_sliver_urn") == u:
                                # remove the element from the list
                                self.__delete_sliver_urn(
                                    table, r.get("slice_urn"),
                                    r.get("slivers"), s)
                                logger.info(
                                    "Removed sliver from slice entry: %s" %
                                    (u, ))
                                break
            # Remove slice when it does not contain slivers anymore
            containing_slice = table.find_one(
                {"slice_urn": containing_slice_urn}) or {}
            if (len(containing_slice.get("slivers", [])) == 0) and\
               (containing_slice_urn is not None):
                table.remove({"slice_urn": containing_slice_urn})
        finally:
            self.__mutex.release()

    def store_slice_monitoring_info(self, slice_urn, monitoring_info):
        table = self.__get_table("topology.slice")
        try:
            self.__mutex.acquire()
            row = table.find_one({"slice_urn": slice_urn})
            if row is not None:
                table.update({"slice_urn": slice_urn}, {
                    "slice_urn": slice_urn,
                    "slivers": row.get("slivers"),
                    "slice_monitoring": monitoring_info
                })
        finally:
            self.__mutex.release()

    def get_slice_monitoring_info(self):
        table = self.__get_table("topology.slice")
        try:
            self.__mutex.acquire()
            return [
                i.get('slice_monitoring') for i in table.find()
                if i.get('slice_monitoring') is not None
            ]

        finally:
            self.__mutex.release()

    def get_slice_monitoring_from_urn(self, slice_urn):
        table = self.__get_table("topology.slice")
        try:
            self.__mutex.acquire()
            row = table.find_one({"slice_urn": slice_urn})
            return row.get("slice_monitoring") if row is not None else None

        finally:
            self.__mutex.release()

    # (felix_ro) resource.com.node
    # TODO Ensure correctness
    def store_com_nodes(self, routingKey, values):
        table = self.__get_table("resource.com.node")
        try:
            ids = []
            self.__mutex.acquire()
            for v in values:
                row = table.find_one({
                    "routing_key":
                    routingKey,
                    "component_id":
                    v.get("component_id"),
                    "component_manager_id":
                    v.get("component_manager_id"),
                })
                #   "sliver_type_name": v.get("sliver_type_name")})
                if not row:
                    v["routing_key"] = routingKey
                    ids.append(table.insert(v))
                    continue
                # update the object (if needed)
                self.__update_list("comnodes-table", table, row, "interfaces",
                                   v.get("interfaces"))
            return ids
        finally:
            self.__mutex.release()

    def get_com_nodes(self, filter_params={}):
        table = self.__get_table("resource.com.node")
        return self.__get_all(table, filter_params)

    def get_com_nodes_by_domain(self, domain_urn):
        domain_authority = self.__get_domain_authority(domain_urn)
        # Look for all those resources that start with a given URN
        filter_params = {
            "component_id": domain_authority,
        }
        nodes = self.get_com_nodes(filter_params)
        return nodes

    def get_com_node_routing_key(self, cid):
        table = self.__get_table("resource.com.node")
        filter_params = {"component_id": cid}
        return self.__get_one(table, filter_params).get("routing_key")

    # (felix_ro) resource.com.link
    def store_com_links(self, routingKey, values):
        table = self.__get_table("resource.com.link")
        try:
            ids = []
            self.__mutex.acquire()
            for v in values:
                row = table.find_one({
                    "routing_key": routingKey,
                    "component_id": v.get("component_id")
                })
                if not row:
                    v["routing_key"] = routingKey
                    ids.append(table.insert(v))
                else:
                    logger.debug("(link-table) %s already stored!" %
                                 (row.get("_id")))
            return ids
        finally:
            self.__mutex.release()

    def get_com_links(self, filter_params={}):
        table = self.__get_table("resource.com.link")
        return self.__get_all(table, filter_params)

    def get_com_links_by_domain(self, domain_urn):
        domain_authority = self.__get_domain_authority(domain_urn)
        # Look for all those resources that start with a given URN
        filter_params = {
            "component_id": domain_authority,
        }
        links = self.get_com_links(filter_params)
        return links

    def get_com_link_by_sdnkey(self, sdn_link_key):
        table = self.__get_table("resource.com.link")
        for l in self.__get_all(table, {}):
            if l.get("component_id").endswith(sdn_link_key):
                return l
        return None

    def get_com_interface_by_nodekey(self, com_node_key):
        table = self.__get_table("resource.com.link")
        ret = []
        try:
            self.__mutex.acquire()
            for row in table.find():
                if row.get("component_id").startswith(com_node_key):
                    for l in row.get("links"):
                        ret.append(l.get("source_id"))
            return ret
        finally:
            self.__mutex.release()

    # (felix_ro) resource.of.node
    def store_sdn_datapaths(self, routingKey, values):
        table = self.__get_table("resource.of.node")
        try:
            ids = []
            self.__mutex.acquire()
            for v in values:
                row = table.find_one({
                    "routing_key":
                    routingKey,
                    "component_id":
                    v.get("component_id"),
                    "component_manager_id":
                    v.get("component_manager_id"),
                    "dpid":
                    v.get("dpid")
                })
                if row is None:
                    v["routing_key"] = routingKey
                    ids.append(table.insert(v))
                    continue
                # update the object (if needed)
                self.__update_list("datapth-table", table, row, "ports",
                                   v.get("ports"))
            return ids
        finally:
            self.__mutex.release()

    def get_sdn_datapaths(self, filter_params={}):
        table = self.__get_table("resource.of.node")
        return self.__get_all(table, filter_params)

    def get_sdn_datapaths_by_domain(self, domain_urn):
        domain_authority = self.__get_domain_authority(domain_urn)
        # Look for all those resources that start with a given URN
        filter_params = {
            "component_id": domain_authority,
        }
        nodes = self.get_sdn_datapaths(filter_params)
        return nodes

    def get_sdn_datapath_routing_key(self, dpid):
        table = self.__get_table("resource.of.node")
        filter_params =\
            {"component_id": dpid.get("component_id"),
             "component_manager_id": dpid.get("component_manager_id"),
             "dpid": dpid.get("dpid")}
        return self.__get_one(table, filter_params).get("routing_key")

    def get_sdn_datapath_by_componentid(self, cid):
        table = self.__get_table("resource.of.node")
        filter_params = {"component_id": cid}
        return self.__get_one(table, filter_params)

    # (felix_ro) resource.of.link
    def store_sdn_links(self, routingKey, values):
        table = self.__get_table("resource.of.link")
        try:
            ids = []
            self.__mutex.acquire()
            for v in values:
                row = table.find_one({
                    "routing_key": routingKey,
                    "component_id": v.get("component_id")
                })
                if row is None:
                    v["routing_key"] = routingKey
                    ids.append(table.insert(v))
                else:
                    logger.debug("(link-table) %s already stored!" %
                                 (row.get("_id")))
            return ids
        finally:
            self.__mutex.release()

    def get_sdn_links(self, filter_params={}):
        table = self.__get_table("resource.of.link")
        (of, fed) = ([], [])
        try:
            self.__mutex.acquire()
            for row in table.find(filter_params):
                if row.get("dpids") is not None:
                    of.append(row)
                elif row.get("interface_ref_id") is not None:
                    fed.append(row)
            return (of, fed)
        finally:
            self.__mutex.release()

    def get_sdn_links_by_domain(self, domain_urn):
        domain_authority = self.__get_domain_authority(domain_urn)
        # Look for all those resources that start with a given URN
        filter_params = {
            "component_id": domain_authority,
        }
        links = self.get_sdn_links(filter_params)
        return links

    def get_sdn_link_by_sdnkey(self, sdn_link_key):
        table = self.__get_table("resource.of.link")
        for l in self.__get_all(table, {}):
            if l.get('component_id').endswith(sdn_link_key):
                return l
        return None

    # (felix_ro) resource.se.node
    def store_se_nodes(self, routingKey, values):
        table = self.__get_table("resource.se.node")
        try:
            ids = []
            self.__mutex.acquire()
            for v in values:
                row = table.find_one({
                    "routing_key":
                    routingKey,
                    "component_id":
                    v.get("component_id"),
                    "component_manager_id":
                    v.get("component_manager_id"),
                    "sliver_type_name":
                    v.get("sliver_type_name")
                })
                if row is None:
                    v["routing_key"] = routingKey
                    ids.append(table.insert(v))
                    continue
                # update the object (if needed)
                self.__update_list("senodes-table", table, row, "interfaces",
                                   v.get("interfaces"))
            return ids
        finally:
            self.__mutex.release()

    def get_se_node_info(self, routingKey, node_id):
        table = self.__get_table("resource.se.node")
        filter_params = {'routing_key': routingKey, 'component_id': node_id}
        row = self.__get_one(table, filter_params)
        # Row has some value if passed this point
        return {
            'component_id': row.get('component_id'),
            'component_manager_id': row.get('component_manager_id')
        }

    def get_se_nodes(self, filter_params={}):
        table = self.__get_table("resource.se.node")
        return self.__get_all(table, filter_params)

    def get_se_nodes_by_domain(self, domain_urn):
        domain_authority = self.__get_domain_authority(domain_urn)
        # Look for all those resources that start with a given URN
        filter_params = {
            "component_id": domain_authority,
        }
        nodes = self.get_se_nodes(filter_params)
        return nodes

    def get_se_node_routing_key(self, cid):
        table = self.__get_table("resource.se.node")
        filter_params = {"component_id": cid}
        return self.__get_one(table, filter_params).get("routing_key")

    # (felix_ro) resource.se.link
    def store_se_links(self, routingKey, values):
        table = self.__get_table("resource.se.link")
        try:
            ids = []
            self.__mutex.acquire()
            for v in values:
                row = table.find_one({
                    "routing_key":
                    routingKey,
                    "component_id":
                    v.get("component_id"),
                    "component_manager_name":
                    v.get("component_manager_name"),
                    "link_type":
                    v.get("link_type")
                })
                if row is None:
                    v["routing_key"] = routingKey
                    ids.append(table.insert(v))
                else:
                    logger.debug("(selink-table) %s already stored!" %
                                 (row.get("_id")))
            return ids
        finally:
            self.__mutex.release()

    def get_se_link_routing_key(self, values):
        table = self.__get_table("resource.se.link")
        try:
            key, ifs = None, []
            self.__mutex.acquire()
            for r in table.find():
                ifrefs = r.get('interface_ref')
                for i in ifrefs:
                    if i.get('component_id') in values:
                        key = r.get('routing_key')
                        ifrefs.remove(i)
                        ifs.append(ifrefs[0])
            return key, ifs
        finally:
            self.__mutex.release()

    def get_se_link_info(self, node_port_id):
        table = self.__get_table("resource.se.link")
        try:
            self.__mutex.acquire()
            for r in table.find():
                for ifref in r.get("interface_ref"):
                    if ifref.get("component_id") == node_port_id:
                        return r.get('link_type'), r.get(
                            'component_manager_name')

            return None, None
        finally:
            self.__mutex.release()

    def get_se_links(self, filter_params={}):
        table = self.__get_table("resource.se.link")
        return self.__get_all(table, filter_params)

    def get_se_links_by_domain(self, domain_urn):
        domain_authority = self.__get_domain_authority(domain_urn)
        # Look for all those resources that start with a given URN
        filter_params = {
            "component_id": domain_authority,
        }
        links = self.get_se_links(filter_params)
        return links

    def get_direct_se_link_routing_key(self, cid, ifrefs):
        try:
            self.__mutex.acquire()
            table = self.__get_table("resource.se.link")
            row = table.find_one({"component_id": cid})
            if row is not None:
                return row.get("routing_key")

            table = self.__get_table("resource.se.node")
            for row in table.find():
                for i in row.get("interfaces"):
                    if i.get("component_id") in ifrefs:
                        return row.get("routing_key")

            raise Exception("Link (%s,%s) owner is not found into RO-DB!" %
                            (cid, ifrefs))
        finally:
            self.__mutex.release()

    def get_interface_ref_by_sekey(self, se_if):
        table = self.__get_table("resource.se.link")
        try:
            self.__mutex.acquire()
            ret = []
            for row in table.find():
                for i in row.get("interface_ref"):
                    if i.get("component_id") == se_if:
                        ret = row.get("interface_ref")
                        break

            for i in ret:
                if i.get("component_id") != se_if:
                    return i.get("component_id")

            return None
        finally:
            self.__mutex.release()

    # (felix_ro) resource.tn.node
    def store_tn_nodes(self, routingKey, values):
        table = self.__get_table("resource.tn.node")
        try:
            ids = []
            self.__mutex.acquire()
            for v in values:
                row = table.find_one({
                    "routing_key":
                    routingKey,
                    "component_id":
                    v.get("component_id"),
                    "component_manager_id":
                    v.get("component_manager_id"),
                    "sliver_type_name":
                    v.get("sliver_type_name")
                })
                if row is None:
                    v["routing_key"] = routingKey
                    ids.append(table.insert(v))
                    continue
                # update the object (if needed)
                self.__update_list("tnnodes-table", table, row, "interfaces",
                                   v.get("interfaces"))
            return ids
        finally:
            self.__mutex.release()

    def update_tn_nodes(self, routingKey, values):
        table = self.__get_table("resource.tn.node")
        try:
            ids = []
            self.__mutex.acquire()
            ret = table.remove({"routing_key": routingKey})
            logger.debug("Remove tn-nodes: %s" % (ret))
            for v in values:
                v["routing_key"] = routingKey
                ids.append(table.insert(v))
            return ids
        finally:
            self.__mutex.release()

    def get_tn_nodes(self, filter_params={}):
        table = self.__get_table("resource.tn.node")
        return self.__get_all(table, filter_params)

    def get_tn_nodes_by_domain(self, domain_urn):
        domain_authority = self.__get_domain_authority(domain_urn)
        # Look for all those resources that start with a given URN
        filter_params = {
            "component_id": domain_authority,
        }
        nodes = self.get_tn_nodes(filter_params)
        return nodes

    def get_tn_node_routing_key(self, cid):
        table = self.__get_table("resource.tn.node")
        filter_params = {"component_id": cid}
        return [
            r.get("routing_key") for r in self.__get_all(table, filter_params)
        ]

    def get_tn_node_interface(self, filter_params={}):
        found_interfaces = []
        tn_node = [n for n in self.get_tn_nodes()][0]
        interfaces = tn_node["interfaces"]
        for interface in interfaces:
            # Find interfaces that meet all the filter parameters
            if all(
                    map(lambda x: x[1] in interface.get(x[0], None),
                        [(k, v) for k, v in filter_params.iteritems()])):
                found_interfaces.append(interface)
        return found_interfaces

    # (felix_ro) resource.tn.link
    def store_tn_links(self, routingKey, values):
        table = self.__get_table("resource.tn.link")
        try:
            ids = []
            self.__mutex.acquire()
            for v in values:
                row = table.find_one({
                    "routing_key":
                    routingKey,
                    "component_id":
                    v.get("component_id"),
                    "component_manager_name":
                    v.get("component_manager_name")
                })
                if row is None:
                    v["routing_key"] = routingKey
                    ids.append(table.insert(v))
                else:
                    logger.debug("(tnlink-table) %s already stored!" %
                                 (row.get("_id")))
            return ids
        finally:
            self.__mutex.release()

    def get_tn_links(self, filter_params={}):
        table = self.__get_table("resource.tn.link")
        return self.__get_all(table, filter_params)

    def get_tn_links_by_domain(self, domain_urn):
        domain_authority = self.__get_domain_authority(domain_urn)
        # Look for all those resources that start with a given URN
        filter_params = {
            "component_id": domain_authority,
        }
        links = self.get_tn_links(filter_params)
        return links

    def get_tn_link_routing_key(self, cid, cmid, ifrefs):
        try:
            self.__mutex.acquire()
            table = self.__get_table("resource.tn.link")
            ret = [
                r.get("routing_key") for r in table.find({"component_id": cid})
            ]
            if ret:
                return ret

            table = self.__get_table("resource.tn.node")
            ret = [
                r.get("routing_key")
                for r in table.find({"component_manager_id": cmid})
            ]
            if ret:
                return ret

            ret = set()
            for row in table.find():
                for i in row.get("interfaces"):
                    if i.get("component_id") in ifrefs:
                        ret.add(row.get("routing_key"))

            if ret:
                return ret
            raise Exception("Link (%s,%s,%s) owner is not found into RO-DB!" %
                            (cid, cmid, ifrefs))
        finally:
            self.__mutex.release()

    # (felix_ro) topology.slice.sdn
    def store_slice_sdn(self, slice_urn, groups_info, matches_info):
        table = self.__get_table("topology.slice.sdn")
        try:
            self.__mutex.acquire()
            row = table.find_one({"slice_urn": slice_urn})
            if not row:
                value = {
                    "slice_urn": slice_urn,
                    "groups": groups_info,
                    "matches": matches_info
                }
                return table.insert(value)

            logger.warning("The slice '%s' already exists!" % slice_urn)
            return None
        finally:
            self.__mutex.release()

    def get_slice_sdn(self, slice_urn):
        table = self.__get_table("topology.slice.sdn")
        gs, ms = [], []
        try:
            self.__mutex.acquire()
            for r in table.find({"slice_urn": slice_urn}):
                if r.get("groups"):
                    gs.extend(r.get("groups"))
                if r.get("matches"):
                    ms.extend(r.get("matches"))

            return gs, ms
        finally:
            self.__mutex.release()

    def delete_slice_sdn(self, slice_urn):
        table = self.__get_table("topology.slice.sdn")
        try:
            self.__mutex.acquire()
            table.remove({"slice_urn": slice_urn})

        finally:
            self.__mutex.release()

    # utilities
    def __update_list(self, tname, table, entry, key, values):
        logger.debug("(%s) %s already stored!" % (
            tname,
            entry.get("_id"),
        ))
        modif = {key: []}
        for v in values:
            if v not in entry.get(key):
                modif.get(key).append(v)

        if len(modif.get(key)) > 0:
            modif.get(key).extend(entry.get(key))
            logger.debug("(%s) extend slivers info %s" % (
                tname,
                modif,
            ))
            table.update({"_id": entry.get("_id")}, {"$set": modif})
        else:
            logger.debug("(%s) not needed to update %s" % (
                tname,
                key,
            ))

    def __delete_sliver_urn(self, table, slice_urn, slivers, elem):
        logger.debug("(slice-table) %s remove %s from %s" %
                     (slice_urn, elem, slivers))
        slivers.remove(elem)
        modif = {"slivers": slivers}
        table.update({"slice_urn": slice_urn}, {"$set": modif})

    def __get_regexp_for_query(self, search_term):
        terms_to_replace = ["+"]
        for term in terms_to_replace:
            search_term = search_term.replace(term, "\%s" % term)
        return re.compile(search_term)
예제 #38
0
class ROSchedulerService(Service):

    def __init__(self):
        """
        Constructor of the service.
        """
        self.config = ConfParser("ro.conf")
        self.scheduler = self.config.get("scheduler")
        interval = int(self.scheduler.get("frequency"))
        master_ro = self.config.get("master_ro")
        mro_enabled = ast.literal_eval(master_ro.get("mro_enabled"))
        self.tnrm_refresh = self.config.get("tnrm").get("refresh_timeout")
        db_name = "felix_ro"
        if mro_enabled:
            db_name = "felix_mro"
        global ro_scheduler
        ro_scheduler = BackgroundScheduler()
        ro_scheduler.add_jobstore(
            MongoDBJobStore(database=db_name, collection="scheduler.jobs"))
        ro_scheduler.start()

        super(ROSchedulerService, self).__init__(
            "ROSchedulerService", interval)
        self.first_time = True

    @staticmethod
    def get_scheduler():
        return ro_scheduler

    def do_action(self):
        jobs = ro_scheduler.get_jobs()
        logger.debug("Scheduled Jobs=%s" % (jobs,))

        if self.first_time:
            self.__oneshot_jobs()
            self.__cron_jobs()
            self.first_time = False

            if self.tnrm_refresh:
                self.__add_interval_tn_refresh(self.tnrm_refresh)

    def stop(self):
        # remove the stored jobs!
        self.__remove(ro_scheduler.get_jobs())
        ro_scheduler.shutdown()
        logger.info("ro_scheduler shutdown")
        super(ROSchedulerService, self).stop()

    def __remove(self, jobs):
        for job in jobs:
            job.remove()

    def __add_interval_tn_refresh(self, minutes):
        try:
            ro_scheduler.add_job(
                tn_resource_refresh,
                trigger=IntervalTrigger(minutes=int(minutes)),
                id="interval_tn_refresh",
                replace_existing=True)
        except Exception as e:
            logger.warning("interval_jobs failure: %s" % (e,))

    def __add_oneshot(self, secs_, func_, id_):
        try:
            run_time = datetime.now() + timedelta(seconds=secs_)
            ro_scheduler.add_job(
                func_, trigger=DateTrigger(run_date=run_time), id=id_)

        except Exception as e:
            logger.warning("oneshot_jobs failure: %s" % (e,))

    def __oneshot_jobs(self):
        self.__add_oneshot(int(self.scheduler.get("oneshot_ro")),
                           ro_resource_detector, "oneshot_ro_rd")
        self.__add_oneshot(int(self.scheduler.get("oneshot_com")),
                           com_resource_detector, "oneshot_com_rd")
        self.__add_oneshot(int(self.scheduler.get("oneshot_sdn")),
                           sdn_resource_detector, "oneshot_sdn_rd")
        self.__add_oneshot(int(self.scheduler.get("oneshot_se")),
                           se_resource_detector, "oneshot_se_rd")
        self.__add_oneshot(int(self.scheduler.get("oneshot_tn")),
                           tn_resource_detector, "oneshot_tn_rd")
        self.__add_oneshot(int(self.scheduler.get("oneshot_phy-monit")),
                           physical_monitoring, "oneshot_physical_monitoring")
        self.__add_oneshot(int(self.scheduler.get("oneshot_slice-monit")),
                           slice_monitoring, "oneshot_slice_monitoring")

    def __add_cron(self, func_, id_, hour_, min_, sec_):
        try:
            tr_ = CronTrigger(hour=hour_, minute=min_, second=sec_)
            ro_scheduler.add_job(func_, trigger=tr_, id=id_)
        except Exception as e:
            logger.warning("cron_jobs failure: %s" % (e,))

    def __cron_jobs(self):
        self.__add_cron(ro_resource_detector, "cron_ro_rd", 0, 1, 0)
        self.__add_cron(com_resource_detector, "cron_com_rd", 0, 11, 0)
        self.__add_cron(sdn_resource_detector, "cron_sdn_rd", 0, 21, 0)
        self.__add_cron(se_resource_detector, "cron_se_rd", 0, 31, 0)
        self.__add_cron(tn_resource_detector, "cron_tn_rd", 0, 41, 0)
        self.__add_cron(physical_monitoring, "cron_physical_monitoring",
                        0, 51, 0)
        self.__add_cron(slice_monitoring, "cron_slice_monitoring",
                        1, 1, 0)
예제 #39
0
파일: base.py 프로젝트: HalasNet/felix
class GENIv3DelegateBase(object):
    """
    Please find more information about the concept of Handlers and Delegates via the wiki (e.g. https://github.com/motine/AMsoil/wiki/GENI).
    
    The GENIv3 handler (see above) assumes that this class uses RSpec version 3 when interacting with the client.
    For creating new a new RSpec type/extension, please see the wiki via https://github.com/motine/AMsoil/wiki/RSpec.
    
    General parameters for all following methods:
    {client_cert} The client's certificate. See [flaskrpcs]XMLRPCDispatcher.requestCertificate(). Also see http://groups.geni.net/geni/wiki/GeniApiCertificates
    {credentials} The a list of credentials in the format specified at http://groups.geni.net/geni/wiki/GAPI_AM_API_V3/CommonConcepts#credentials

    Dates are converted to UTC and then made timezone-unaware (see http://docs.python.org/2/library/datetime.html#datetime.datetime.astimezone).
    """
    
    ALLOCATION_STATE_UNALLOCATED = 'geni_unallocated'
    """The sliver does not exist. (see http://groups.geni.net/geni/wiki/GAPI_AM_API_V3/CommonConcepts#SliverAllocationStates)"""
    ALLOCATION_STATE_ALLOCATED = 'geni_allocated'
    """The sliver is offered/promissed, but it does not consume actual resources. This state shall time out at some point in time."""
    ALLOCATION_STATE_PROVISIONED = 'geni_provisioned'
    """The sliver is/has been instanciated. Operational states apply here."""

    OPERATIONAL_STATE_PENDING_ALLOCATION = 'geni_pending_allocation'
    """Required for aggregates to support. A transient state."""
    OPERATIONAL_STATE_NOTREADY           = 'geni_notready'
    """Optional. A stable state."""
    OPERATIONAL_STATE_CONFIGURING        = 'geni_configuring'
    """Optional. A transient state."""
    OPERATIONAL_STATE_STOPPING           = 'geni_stopping'
    """Optional. A transient state."""
    OPERATIONAL_STATE_READY              = 'geni_ready'
    """Optional. A stable state."""
    OPERATIONAL_STATE_READY_BUSY         = 'geni_ready_busy'
    """Optional. A transient state."""
    OPERATIONAL_STATE_FAILED             = 'geni_failed'
    """Optional. A stable state."""

    OPERATIONAL_ACTION_START   = 'geni_start'
    """Sliver shall become geni_ready. The AM developer may define more states (see http://groups.geni.net/geni/wiki/GAPI_AM_API_V3/CommonConcepts#SliverOperationalActions)"""
    OPERATIONAL_ACTION_RESTART = 'geni_restart'
    """Sliver shall become geni_ready again."""
    OPERATIONAL_ACTION_STOP    = 'geni_stop'
    """Sliver shall become geni_notready."""

    def __init__(self):
        super(GENIv3DelegateBase, self).__init__()
        self.config = ConfParser("geniv3.conf")
        self.general_section = self.config.get("general")
        self.certificates_section = self.config.get("certificates")
    
    def get_request_extensions_list(self):
        """Not to overwrite by AM developer. Should retrun a list of request extensions (XSD schemas) to be sent back by GetVersion."""
        return [uri for prefix, uri in self.get_request_extensions_mapping().items()]
    def get_request_extensions_mapping(self):
        """Overwrite by AM developer. Should return a dict of namespace names and request extensions (XSD schema's URLs as string).
        Format: {xml_namespace_prefix : namespace_uri, ...}
        """
        return {}

    def get_manifest_extensions_mapping(self):
        """Overwrite by AM developer. Should return a dict of namespace names and manifest extensions (XSD schema's URLs as string).
        Format: {xml_namespace_prefix : namespace_uri, ...}
        """
        return {}
        
    def get_ad_extensions_list(self):
        """Not to overwrite by AM developer. Should retrun a list of request extensions (XSD schemas) to be sent back by GetVersion."""
        return [uri for prefix, uri in self.get_ad_extensions_mapping().items()]
    def get_ad_extensions_mapping(self):
        """Overwrite by AM developer. Should return a dict of namespace names and advertisement extensions (XSD schema URLs as string) to be sent back by GetVersion.
        Format: {xml_namespace_prefix : namespace_uri, ...}
        """
        return {}
    
    def is_single_allocation(self):
        """Overwrite by AM developer. Shall return a True or False. When True (not default), and performing one of (Describe, Allocate, Renew, Provision, Delete), such an AM requires you to include either the slice urn or the urn of all the slivers in the same state.
        see http://groups.geni.net/geni/wiki/GAPI_AM_API_V3/CommonConcepts#OperationsonIndividualSlivers"""
        return False

    def get_allocation_mode(self):
        """Overwrite by AM developer. Shall return a either 'geni_single', 'geni_disjoint', 'geni_many'.
        It defines whether this AM allows adding slivers to slices at an AM (i.e. calling Allocate multiple times, without first deleting the allocated slivers).
        For description of the options see http://groups.geni.net/geni/wiki/GAPI_AM_API_V3/CommonConcepts#OperationsonIndividualSlivers"""
        return 'geni_single'

    def list_resources(self, client_cert, credentials, geni_available):
        """Overwrite by AM developer. Shall return an RSpec version 3 (advertisement) or raise an GENIv3...Error.
        If {geni_available} is set, only return availabe resources.
        For full description see http://groups.geni.net/geni/wiki/GAPI_AM_API_V3#ListResources"""
        raise exceptions.GENIv3GeneralError("Method not implemented yet")

    def describe(self, urns, client_cert, credentials):
        """Overwrite by AM developer. Shall return an RSpec version 3 (manifest) or raise an GENIv3...Error.
        {urns} contains a list of slice identifiers (e.g. ['urn:publicid:IDN+ofelia:eict:gcf+slice+myslice']).

        For more information on possible {urns} see http://groups.geni.net/geni/wiki/GAPI_AM_API_V3/CommonConcepts#urns

        For full description see http://groups.geni.net/geni/wiki/GAPI_AM_API_V3#Describe"""
        raise exceptions.GENIv3GeneralError("Method not implemented yet")

    def allocate(self, slice_urn, client_cert, credentials, rspec, end_time=None):
        """Overwrite by AM developer. 
        Shall return the two following values or raise an GENIv3...Error.
        - a RSpec version 3 (manifest) of newly allocated slivers 
        - a list of slivers of the format:
            [{'geni_sliver_urn' : String,
              'exceptionspires'    : Python-Date,
              'geni_allocation_status' : one of the ALLOCATION_STATE_xxx}, 
             ...]
        Please return like so: "return respecs, slivers"
        {slice_urn} contains a slice identifier (e.g. 'urn:publicid:IDN+ofelia:eict:gcf+slice+myslice').
        {end_time} Optional. A python datetime object which determines the desired expiry date of this allocation (see http://groups.geni.net/geni/wiki/GAPI_AM_API_V3/CommonConcepts#geni_end_time).
        >>> This is the first part of what CreateSliver used to do in previous versions of the AM API. The second part is now done by Provision, and the final part is done by PerformOperationalAction.
        
        For full description see http://groups.geni.net/geni/wiki/GAPI_AM_API_V3#Allocate"""
        raise exceptions.GENIv3GeneralError("Method not implemented yet")

    def renew(self, urns, client_cert, credentials, expiration_time, best_effort):
        """Overwrite by AM developer. 
        Shall return a list of slivers of the following format or raise an GENIv3...Error:
            [{'geni_sliver_urn'         : String,
              'geni_allocation_status'  : one of the ALLOCATION_STATE_xxx,
              'geni_operational_status' : one of the OPERATIONAL_STATE_xxx,
              'exceptionspires'            : Python-Date,
              'geni_error'              : optional String}, 
             ...]
        
        {urns} contains a list of slice identifiers (e.g. ['urn:publicid:IDN+ofelia:eict:gcf+slice+myslice']).
        {expiration_time} is a python datetime object
        {best_effort} determines if the method shall fail in case that not all of the urns can be renewed (best_effort=False).

        If the transactional behaviour of {best_effort}=False can not be provided, throw a GENIv3OperationUnsupportedError.
        For more information on possible {urns} see http://groups.geni.net/geni/wiki/GAPI_AM_API_V3/CommonConcepts#urns

        For full description see http://groups.geni.net/geni/wiki/GAPI_AM_API_V3#Renew"""
        raise exceptions.GENIv3GeneralError("Method not implemented yet")

    def provision(self, urns, client_cert, credentials, best_effort, end_time, geni_users):
        """Overwrite by AM developer. 
        Shall return the two following values or raise an GENIv3...Error.
        - a RSpec version 3 (manifest) of slivers 
        - a list of slivers of the format:
            [{'geni_sliver_urn'         : String,
              'geni_allocation_status'  : one of the ALLOCATION_STATE_xxx,
              'geni_operational_status' : one of the OPERATIONAL_STATE_xxx,
              'exceptionspires'            : Python-Date,
              'geni_error'              : optional String}, 
             ...]
        Please return like so: "return respecs, slivers"

        {urns} contains a list of slice/resource identifiers (e.g. ['urn:publicid:IDN+ofelia:eict:gcf+slice+myslice']).
        {best_effort} determines if the method shall fail in case that not all of the urns can be provisioned (best_effort=False)
        {end_time} Optional. A python datetime object which determines the desired expiry date of this provision (see http://groups.geni.net/geni/wiki/GAPI_AM_API_V3/CommonConcepts#geni_end_time).
        {geni_users} is a list of the format: [ { 'urn' : ..., 'keys' : [sshkey, ...]}, ...]
        
        If the transactional behaviour of {best_effort}=False can not be provided, throw a GENIv3OperationUnsupportedError.
        For more information on possible {urns} see http://groups.geni.net/geni/wiki/GAPI_AM_API_V3/CommonConcepts#urns

        For full description see http://groups.geni.net/geni/wiki/GAPI_AM_API_V3#Provision"""
        raise exceptions.GENIv3GeneralError("Method not implemented yet")

    def status(self, urns, client_cert, credentials):
        """Overwrite by AM developer. 
        Shall return the two following values or raise an GENIv3...Error.
        - a slice urn
        - a list of slivers of the format:
            [{'geni_sliver_urn'         : String,
              'geni_allocation_status'  : one of the ALLOCATION_STATE_xxx,
              'geni_operational_status' : one of the OPERATIONAL_STATE_xxx,
              'exceptionspires'            : Python-Date,
              'geni_error'              : optional String}, 
             ...]
        Please return like so: "return slice_urn, slivers"

        {urns} contains a list of slice/resource identifiers (e.g. ['urn:publicid:IDN+ofelia:eict:gcf+slice+myslice']).
        For more information on possible {urns} see http://groups.geni.net/geni/wiki/GAPI_AM_API_V3/CommonConcepts#urns
        
        For full description see http://groups.geni.net/geni/wiki/GAPI_AM_API_V3#Status"""
        raise exceptions.GENIv3GeneralError("Method not implemented yet")

    def perform_operational_action(self, urns, client_cert, credentials, action, best_effort):
        """Overwrite by AM developer. 
        Shall return a list of slivers of the following format or raise an GENIv3...Error:
            [{'geni_sliver_urn'         : String,
              'geni_allocation_status'  : one of the ALLOCATION_STATE_xxx,
              'geni_operational_status' : one of the OPERATIONAL_STATE_xxx,
              'exceptionspires'            : Python-Date,
              'geni_error'              : optional String}, 
             ...]

        {urns} contains a list of slice or sliver identifiers (e.g. ['urn:publicid:IDN+ofelia:eict:gcf+slice+myslice']).
        {action} an arbitraty string, but the following should be possible: "geni_start", "geni_stop", "geni_restart"
        {best_effort} determines if the method shall fail in case that not all of the urns can be changed (best_effort=False)

        If the transactional behaviour of {best_effort}=False can not be provided, throw a GENIv3OperationUnsupportedError.
        For more information on possible {urns} see http://groups.geni.net/geni/wiki/GAPI_AM_API_V3/CommonConcepts#urns
        
        For full description see http://groups.geni.net/geni/wiki/GAPI_AM_API_V3#PerformOperationalAction"""
        raise exceptions.GENIv3GeneralError("Method not implemented yet")

    def delete(self, urns, client_cert, credentials, best_effort):
        """Overwrite by AM developer. 
        Shall return a list of slivers of the following format or raise an GENIv3...Error:
            [{'geni_sliver_urn'         : String,
              'geni_allocation_status'  : one of the ALLOCATION_STATE_xxx,
              'exceptionspires'            : Python-Date,
              'geni_error'              : optional String}, 
             ...]

        {urns} contains a list of slice/resource identifiers (e.g. ['urn:publicid:IDN+ofelia:eict:gcf+slice+myslice']).
        {best_effort} determines if the method shall fail in case that not all of the urns can be deleted (best_effort=False)
        
        If the transactional behaviour of {best_effort}=False can not be provided, throw a GENIv3OperationUnsupportedError.
        For more information on possible {urns} see http://groups.geni.net/geni/wiki/GAPI_AM_API_V3/CommonConcepts#urns

        For full description see http://groups.geni.net/geni/wiki/GAPI_AM_API_V3#Delete"""
        raise exceptions.GENIv3GeneralError("Method not implemented yet")

    def shutdown(self, slice_urn, client_cert, credentials):
        """Overwrite by AM developer. 
        Shall return True or False or raise an GENIv3...Error.

        For full description see http://groups.geni.net/geni/wiki/GAPI_AM_API_V3#Shutdown"""
        raise exceptions.GENIv3GeneralError("Method not implemented yet")
    
    def auth(self, client_cert, credentials, slice_urn=None, privileges=()):
        """
        This method authenticates and authorizes.
        It returns the client's urn, uuid, email (extracted from the {client_cert}). Example call: "urn, uuid, email = self.auth(...)"
        Be aware, the email is not required in the certificate, hence it might be empty.
        If the validation fails, an GENIv3ForbiddenError is thrown.
        
        The credentials are checked so the user has all the required privileges (success if any credential fits all privileges).
        The client certificate is not checked: this is usually done via the webserver configuration.
        This method only treats certificates of type 'geni_sfa'.
        
        Here a list of possible privileges (format: right_in_credential: [privilege1, privilege2, ...]):
            "authority" : ["register", "remove", "update", "resolve", "list", "getcredential", "*"],
            "refresh"   : ["remove", "update"],
            "resolve"   : ["resolve", "list", "getcredential"],
            "sa"        : ["getticket", "redeemslice", "redeemticket", "createslice", "createsliver", "deleteslice", "deletesliver", "updateslice",
                           "getsliceresources", "getticket", "loanresources", "stopslice", "startslice", "renewsliver",
                            "deleteslice", "deletesliver", "resetslice", "listslices", "listnodes", "getpolicy", "sliverstatus"],
            "embed"     : ["getticket", "redeemslice", "redeemticket", "createslice", "createsliver", "renewsliver", "deleteslice", 
                           "deletesliver", "updateslice", "sliverstatus", "getsliceresources", "shutdown"],
            "bind"      : ["getticket", "loanresources", "redeemticket"],
            "control"   : ["updateslice", "createslice", "createsliver", "renewsliver", "sliverstatus", "stopslice", "startslice", 
                           "deleteslice", "deletesliver", "resetslice", "getsliceresources", "getgids"],
            "info"      : ["listslices", "listnodes", "getpolicy"],
            "ma"        : ["setbootstate", "getbootstate", "reboot", "getgids", "gettrustedcerts"],
            "operator"  : ["gettrustedcerts", "getgids"],                   
            "*"         : ["createsliver", "deletesliver", "sliverstatus", "renewsliver", "shutdown"]
            
        When using the gcf clearinghouse implementation the credentials will have the rights:
        - user: "******", "resolve", "info" (which resolves to the privileges: "remove", "update", "resolve", "list", "getcredential", "listslices", "listnodes", "getpolicy").
        - slice: "refresh", "embed", "bind", "control", "info" (well, do the resolving yourself...)        
        """
        # check variables
        if not isinstance(privileges, tuple):
            raise TypeError("Privileges need to be a tuple.")
        # collect credentials (only GENI certs, version ignored)
        geni_credentials = []
        for c in credentials:
            if c['geni_type'] == 'geni_sfa':
                geni_credentials.append(c['geni_value'])

        # Get the cert_root from the configuration settings
        root_path = os.path.normpath(os.path.join(os.path.dirname(__file__), "../../../../"))
        cert_root = os.path.join(root_path, self.certificates_section.get("cert_root"))
        logger.debug("client_certificate trusted, present at: %s" % str(cert_root))
        logger.debug("client_certificate:\n%s" % str(client_cert))

        if client_cert == None:
            raise exceptions.GENIv3ForbiddenError("Could not determine the client SSL certificate")
        # test the credential
        try:
            cred_verifier = extensions.geni.util.cred_util.CredentialVerifier(cert_root)
            cred_verifier.verify_from_strings(client_cert, geni_credentials, slice_urn, privileges)
        except Exception as e:
            raise exceptions.GENIv3ForbiddenError(str(e))
        
        user_gid = extensions.sfa.trust.gid.GID(string=client_cert)
        user_urn = user_gid.get_urn()
        user_uuid = user_gid.get_uuid()
        user_email = user_gid.get_email()
        return user_urn, user_uuid, user_email # TODO document return

    def urn_type(self, urn):
        """Returns the type of the urn (e.g. slice, sliver).
        For the possible types see: http://groups.geni.net/geni/wiki/GeniApiIdentifiers#ExamplesandUsage"""
        return urn.split('+')[2].strip()

    def lxml_ad_root(self):
        """Returns a xml root node with the namespace extensions specified by self.get_ad_extensions_mapping."""
        return etree.Element('rspec', self.get_ad_extensions_mapping(), type='advertisement')

    def lxml_manifest_root(self):
        """Returns a xml root node with the namespace extensions specified by self.get_manifest_extensions_mapping."""
        return etree.Element('rspec', self.get_manifest_extensions_mapping(), type='manifest')

    def lxml_to_string(self, rspec):
        """Converts a lxml root node to string (for returning to the client)."""
        return etree.tostring(rspec, pretty_print=True)
        
    def lxml_ad_element_maker(self, prefix):
        """Returns a lxml.builder.ElementMaker configured for avertisements and the namespace given by {prefix}."""
        ext = self.get_ad_extensions_mapping()
        return ElementMaker(namespace=ext[prefix], nsmap=ext)

    def lxml_manifest_element_maker(self, prefix):
        """Returns a lxml.builder.ElementMaker configured for manifests and the namespace given by {prefix}."""
        ext = self.get_manifest_extensions_mapping()
        return ElementMaker(namespace=ext[prefix], nsmap=ext)
    
    def lxml_parse_rspec(self, rspec_string):
        """Returns a the root element of the given {rspec_string} as lxml.Element.
        If the config key is set, the rspec is validated with the schemas found at the URLs specified in schemaLocation of the the given RSpec."""
        # parse
        rspec_root = etree.fromstring(rspec_string)
        # validate RSpec against specified schemaLocations
        should_validate = ast.literal_eval(self.general_section.get("rspec_validation"))
        
        if should_validate:
            schema_locations = rspec_root.get("{http://www.w3.org/2001/XMLSchema-instance}schemaLocation")
            if schema_locations:
                schema_location_list = schema_locations.split(" ")
                schema_location_list = map(lambda x: x.strip(), schema_location_list) # strip whitespaces
                for sl in schema_location_list:
                    try:
                        xmlschema_contents = urllib2.urlopen(sl) # try to download the schema
                        xmlschema_doc = etree.parse(xmlschema_contents)
                        xmlschema = etree.XMLSchema(xmlschema_doc)
                        xmlschema.validate(rspec_root)
                    except Exception as e:
                        logger.warning("RSpec validation failed failed (%s: %s)" % (sl, str(e),))
            else:
                logger.warning("RSpec does not specify any schema locations")
        return rspec_root

    def lxml_elm_has_request_prefix(self, lxml_elm, ns_name):
        return str(lxml_elm.tag).startswith("{%s}" % (self.get_request_extensions_mapping()[ns_name],))
        
    def lxml_elm_equals_request_tag(self, lxml_elm, ns_name, tagname):
        """Determines if the given tag by {ns_name} and {tagname} equals lxml_tag. The namespace URI is looked up via get_request_extensions_mapping()['ns_name']"""
        return ("{%s}%s" % (self.get_request_extensions_mapping()[ns_name], tagname)) == str(lxml_elm.tag)