Ejemplo n.º 1
0
    def __init__(self, layer_API):
        """
    Initialize main Resource Orchestration Layer components.

    :param layer_API: layer API instance
    :type layer_API: :any:`ResourceOrchestrationAPI`
    :return: None
    """
        super(ResourceOrchestrator, self).__init__(layer_API=layer_API)
        log.debug("Init %s" % self.__class__.__name__)
        self.nffgManager = NFFGManager()
        # Init virtualizer manager
        # Listeners must be weak references in order the layer API can garbage
        # collected
        self.virtualizerManager = VirtualizerManager()
        self.virtualizerManager.addListeners(layer_API, weak=True)
        # Init NFIB manager
        self.nfibManager = NFIBManager().initialize()
Ejemplo n.º 2
0
class ResourceOrchestrator(AbstractOrchestrator):
  """
  Main class for the handling of the ROS-level mapping functions.
  """
  # Default Mapper class as a fallback mapper
  DEFAULT_MAPPER = ResourceOrchestrationMapper

  def __init__ (self, layer_API):
    """
    Initialize main Resource Orchestration Layer components.

    :param layer_API: layer API instance
    :type layer_API: :any:`ResourceOrchestrationAPI`
    :return: None
    """
    super(ResourceOrchestrator, self).__init__(layer_API=layer_API)
    log.debug("Init %s" % self.__class__.__name__)
    self.nffgManager = NFFGManager()
    # Init virtualizer manager
    # Listeners must be weak references in order the layer API can garbage
    # collected
    self.virtualizerManager = VirtualizerManager()
    self.virtualizerManager.addListeners(layer_API, weak=True)
    # Init NFIB manager
    self.nfibManager = NFIBManager()
    self.nfibManager.initialize()

  def preprocess_nffg(self, nffg):

    try:
      # if there is at least ONE SGHop in the graph, we don't do SGHop retrieval.
      next(nffg.sg_hops)
    except StopIteration:
      # retrieve the SGHops from the TAG values of the flow rules, in case they
      # are cannot be found in the request graph and can only be deduced from the 
      # flows
      log.warning("No SGHops were given in the Service Graph, retrieving them"
                      " based on the flowrules...")
      sg_hops_given = False
      sg_hop_info = NFFGToolBox.retrieve_all_SGHops(nffg)
      if len(sg_hop_info) == 0:
        raise uet.BadInputException("If SGHops are not given, flowrules should be"
                                    " in the NFFG",
                                    "No SGHop could be retrieved based on the "
                                    "flowrules of the NFFG.")
      for k, v in sg_hop_info.iteritems():
        # VNF ports are given to the function
        nffg.add_sglink(v[0], v[1], flowclass=v[2], bandwidth=v[3], delay=v[4],
                           id=k[2])

    DPDK_COST=0
    VHOST_COST=0
    KNI_COST=0

    for sap in nffg.saps:
      if sap.id.startswith("dpdk"):
        dpdk=sap.id.split('-')[0]
        dpdk_nf=nffg.add_nf(id=dpdk)
        dpdk_nf.resources.cpu=DPDK_COST
        dpdk_nf.resources.mem=0
        dpdk_nf.resources.storage=0
        dpdk_nf.functional_type=dpdk

    hops= [hop for hop in nffg.sg_hops]
    for hop in hops:
      if hop.src.node.id.startswith("dpdk") and hop.dst.node.id.startswith("dpdk"):
        dpdk_in_id=hop.src.node.id.split('-')[0]
        dpdk_out_id=hop.dst.node.id.split('-')[0]
        dpdk_in=nffg.network.node[dpdk_in_id]
        dpdk_out=nffg.network.node[dpdk_out_id]
        link_in=nffg.add_sglink(hop.src,dpdk_in.add_port())
        link_out=nffg.add_sglink(dpdk_out.add_port(), hop.dst)
        link=hop.copy()
        link.src=dpdk_in.add_port()
        link.dst=dpdk_out.add_port()
        nffg.del_edge(hop.src,hop.dst,hop.id)
        nffg.add_sglink(link.src,link.dst,hop=link)

        for req in nffg.reqs: 
          if hop.id in req.sg_path:
            req.sg_path.insert(0,link_in.id)
            req.sg_path.insert(len(req.sg_path),link_out.id)

        
    nfs = [nf for nf in nffg.nfs if not nf.functional_type.startswith("dpdk")]
    for nf in nfs:

      if nf.functional_type.endswith("KNI"):
        NF_TYPE="KNI"
        PORT_RES=KNI_COST
      elif nf.functional_type.endswith("VHOST"): 
        NF_TYPE="VHOST"
        PORT_RES=VHOST_COST

      if len(nf.ports) > 1:
        in_nf = nf.copy()
        in_nf.ports.clear()
        in_nf.resources.cpu=PORT_RES
        in_nf.resources.mem=0
        in_nf.id=nf.id + "-in"
        in_nf.functional_type=NF_TYPE
        nffg.add_nf(nf=in_nf)
        out_nf = nf.copy()
        out_nf.ports.clear()
        out_nf.resources.cpu=PORT_RES
        out_nf.resources.mem=0
        out_nf.functional_type=NF_TYPE
        out_nf.id=nf.id + "-out"
        nffg.add_nf(nf=out_nf)
      else:
        in_nf = nf.copy()
        in_nf.ports.clear()
        in_nf.resources.cpu=PORT_RES
        in_nf.resources.mem=0
        in_nf.id=nf.id + "-inout"
        in_nf.functional_type=NF_TYPE
        nffg.add_nf(nf=in_nf)
        out_nf=in_nf

      in_tag=None
      out_tag=None

      dpdk_in=None
      dpdk_out=None

      hops= [hop for hop in nffg.sg_hops]
      for hop in hops:
        if hop.dst.node.id == nf.id:
          in_tag=hop.id
          try:
            in_port = nffg.network.node[in_nf.id].ports[hop.dst.id]
          except KeyError:
            in_port = nffg.network.node[in_nf.id].add_port(id=hop.dst.id)
          old_hop=hop.copy()
          old_hop.dst=in_port
          nffg.del_edge(hop.src,hop.dst,hop.id)
          if hop.src.node.id.startswith("dpdk"):
            dpdk_nf_id=hop.src.node.id.split('-')[0]
            dpdk_nf = nffg.network.node[dpdk_nf_id]
            port1=dpdk_nf.add_port()
            old_hop.src=port1
            nffg.add_sglink(port1,in_port,hop=old_hop)
            port2=dpdk_nf.add_port()
            dpdk_in=nffg.add_sglink(hop.src,port2)
          else:
            nffg.add_sglink(hop.src,in_port,hop=old_hop)

        if hop.src.node.id == nf.id:
          out_tag=hop.id
          try:
            out_port = nffg.network.node[out_nf.id].ports[hop.src.id]
          except KeyError:
            out_port = nffg.network.node[out_nf.id].add_port(id=hop.src.id)
          old_hop=hop.copy()
          old_hop.src=out_port
          nffg.del_edge(hop.src,hop.dst,hop.id)
          if hop.dst.node.id.startswith("dpdk"):
            dpdk_nf_id= hop.dst.node.id.split('-')[0]
            dpdk_nf = nffg.network.node[dpdk_nf_id]
            port1=dpdk_nf.add_port()
            old_hop.dst=port1
            nffg.add_sglink(out_port,port1,hop=old_hop)
            port2=dpdk_nf.add_port()
            dpdk_out=nffg.add_sglink(port2,hop.dst)
          else:
            nffg.add_sglink(out_port,hop.dst,hop=old_hop)

      vport_in=in_nf.add_port()
      vport_out=out_nf.add_port()

      aff_reqs=[]
      prev=None

      for req in nffg.reqs: 
        for elem in req.sg_path:
          if prev == in_tag and elem == out_tag:
            aff_reqs.append(req)
          prev=elem

      cpu_req=int(nf.resources.cpu)
      if cpu_req==0:
        num_cpu=1
      else:
        num_cpu=cpu_req

      for i in range(num_cpu):
        new_nf=nf.copy()
        new_nf.ports.clear()
        new_nf.resources.cpu=cpu_req / num_cpu
        new_nf.resources.mem=nf.resources.mem / num_cpu
        new_nf.id=nf.id + "-core" + str(i)
        nffg.add_nf(nf=new_nf)
        new_port1=new_nf.add_port()
        sg1=nffg.add_sglink(vport_in,new_port1) 
        new_port2=new_nf.add_port()
        sg2=nffg.add_sglink(new_port2,vport_out)
        for req in aff_reqs:
          new_req=req.copy()
          new_req.regenerate_id()
          poz=new_req.sg_path.index(in_tag)
          new_req.sg_path.insert(poz+1, sg1.id)
          new_req.sg_path.insert(poz+2, sg2.id) 
          if dpdk_in is not None:
            new_req.sg_path.insert(0, dpdk_in.id)
          if dpdk_out is not None:
            new_req.sg_path.insert(len(new_req.sg_path), dpdk_out.id) 
          nffg.add_req(req.src,req.dst,req=new_req)          
                    
      nffg.del_node(nf.id)
      for req in aff_reqs:
        nffg.del_edge(req.src,req.dst,req.id)

    print nffg.dump()
    return nffg

  def instantiate_nffg (self, nffg):
    """
    Main API function for NF-FG instantiation.

    :param nffg: NFFG instance
    :type nffg: :any:`NFFG`
    :return: mapped NFFG instance
    :rtype: :any:`NFFG`
    """

    log.debug("Invoke %s to instantiate given NF-FG" % self.__class__.__name__)
    # Store newly created NF-FG
    self.nffgManager.save(nffg)
    # Get Domain Virtualizer to acquire global domain view
    global_view = self.virtualizerManager.dov
    # Notify remote visualizer about resource view of this layer if it's needed
    notify_remote_visualizer(data=global_view.get_resource_info(),
                             id=LAYER_NAME)
    # Log verbose mapping request
    log.log(VERBOSE, "Orchestration Layer request graph:\n%s" % nffg.dump())
    # Start Orchestrator layer mapping
    print nffg.dump()
    if global_view is not None:
      if isinstance(global_view, AbstractVirtualizer):
        # If the request is a bare NFFG, it is probably an empty topo for domain
        # deletion --> skip mapping to avoid BadInputException and forward
        # topo to adaptation layer
        if nffg.is_bare():
          log.warning("No valid service request (VNFs/Flowrules/SGhops) has "
                      "been detected in SG request! Skip orchestration in "
                      "layer: %s and proceed with the bare %s..." %
                      (LAYER_NAME, nffg))
          if nffg.is_virtualized():
            if nffg.is_SBB():
              log.debug("Request is a bare SingleBiSBiS representation!")
            else:
              log.warning(
                "Detected virtualized representation with multiple BiSBiS "
                "nodes! Currently this type of virtualization is nut fully"
                "supported!")
          else:
            log.debug("Detected full view representation!")
          # Return with the original request
          return nffg
        else:
          log.info("Request check: detected valid content!")
        try:
          # Run Nf-FG mapping orchestration
          log.debug("Starting request preprocession...")
          log.info(int(round(time.time() * 1000)))
          self.preprocess_nffg(nffg)
          log.debug("Preprocession ended, start mapping")
          log.info(int(round(time.time() * 1000)))
          mapped_nffg = self.mapper.orchestrate(nffg, global_view)
          log.debug("NF-FG instantiation is finished by %s" %
                    self.__class__.__name__)
          log.info(int(round(time.time() * 1000)))
          return mapped_nffg
        except ProcessorError as e:
          log.warning("Mapping pre/post processing was unsuccessful! "
                      "Cause: %s" % e)
      else:
        log.warning("Global view is not subclass of AbstractVirtualizer!")
    else:
      log.warning("Global view is not acquired correctly!")
    log.error("Abort orchestration process!")
Ejemplo n.º 3
0
def testNFIB ():
  NFIB = NFIBManager()

  # start clean - all the existing info is removed from the DB
  NFIB.removeGraphDB()

  # add new high-level NF to the DB, all the information related to the NF
  # should be given as a dict

  NFIB.addNode({'label': 'NF', 'node_id': 'forwarder', 'type': 'NA'})
  NFIB.addNode({'label': 'NF', 'node_id': 'compressor', 'type': 'NA'})
  NFIB.addNode({'label': 'NF', 'node_id': 'decompressor', 'type': 'NA'})

  print "high-level  NFs were added to the DB"

  # generate a  decomposition for a high-level forwarder NF (in form of
  # networkx)
  G1 = networkx.DiGraph()
  G1.add_path(['SAP1', 'simpleForwarder', 'SAP2'])

  # create node properties
  for n in G1.nodes():
    properties = {}
    properties['node_id'] = n

    if 'SAP' in n:
      properties['label'] = 'SAP'
      properties['type'] = 'NA'
    else:
      properties['label'] = 'NF'
      properties['type'] = 'click'
      properties['cpu'] = 10
      properties['mem'] = 100
      properties['storage'] = 100
    G1.node[n]['properties'] = properties

  # create edge properties
  properties = {}
  properties['BW'] = 100
  properties['src_port'] = 1
  properties['dst_port'] = 1
  G1.edge['SAP1']['simpleForwarder']['properties'] = properties

  properties1 = {}
  properties1['BW'] = 100
  properties1['src_port'] = 2
  properties1['dst_port'] = 2
  G1.edge['simpleForwarder']['SAP2']['properties'] = properties1

  # generate a decomposition for a high-level compressor NF (in form of
  # networkx)
  G2 = networkx.DiGraph()
  G2.add_path(['SAP3', 'headerCompressor', 'SAP4'])

  # create node properties
  for n in G2.nodes():
    properties = {}
    properties['node_id'] = n
    if 'SAP' in n:
      properties['label'] = 'SAP'
      properties['type'] = 'NA'
    else:
      properties['label'] = 'NF'
      properties['type'] = 'click'
      properties['cpu'] = 20
      properties['mem'] = 200
      properties['storage'] = 200
    G2.node[n]['properties'] = properties

  # create edge properties 
  properties3 = {}
  properties3['BW'] = 200
  properties3['src_port'] = 1
  properties3['dst_port'] = 1
  G2.edge['SAP3']['headerCompressor']['properties'] = properties3

  properties4 = {}
  properties4['BW'] = 200
  properties4['src_port'] = 2
  properties4['dst_port'] = 2
  G2.edge['headerCompressor']['SAP4']['properties'] = properties4

  # generate a decomposition for a high-level decompressor NF (in form of
  # networkx)
  G3 = networkx.DiGraph()
  G3.add_path(['SAP5', 'headerDecompressor', 'SAP6'])

  # create node properties
  for n in G3.nodes():
    properties = {}
    properties['node_id'] = n
    if 'SAP' in n:
      properties['label'] = 'SAP'
      properties['type'] = 'NA'
    else:
      properties['label'] = 'NF'
      properties['type'] = 'click'
      properties['cpu'] = 30
      properties['mem'] = 300
      properties['storage'] = 300
    G3.node[n]['properties'] = properties

  # create edge properties
  properties5 = {}
  properties5['BW'] = 300
  properties5['src_port'] = 1
  properties5['dst_port'] = 1
  G3.edge['SAP5']['headerDecompressor']['properties'] = properties5

  properties6 = {}
  properties6['BW'] = 300
  properties6['src_port'] = 2
  properties6['dst_port'] = 2
  G3.edge['headerDecompressor']['SAP6']['properties'] = properties6

  # required elementary NFs should be added first to the DB
  NFIB.addClickNF({'label': 'NF', 'node_id': 'Queue', 'type:': 'click'})
  NFIB.addClickNF({'label': 'NF', 'node_id': 'Classifier', 'type': 'click'})
  NFIB.addClickNF({'label': 'NF', 'node_id': 'Counter', 'type': 'click'})
  NFIB.addClickNF({'label': 'NF', 'node_id': 'RFC2507Comp', 'type': 'click'})
  NFIB.addClickNF({'label': 'NF', 'node_id': 'RFC2507Decomp', 'type': 'click'})

  # the NF decompositions are added to the DB
  NFIB.addDecomp('forwarder', 'G1', G1)
  NFIB.addDecomp('compressor', 'G2', G2)
  NFIB.addDecomp('decompressor', 'G3', G3)

  print "NF decompositions were added to the DB"

  # create an NFFG with high-level NFs
  nffg = NFFG(id="iMinds-001")
  infra = nffg.add_infra(id="node0", name="INFRA0")
  sap0 = nffg.add_sap(id="SG_SAP1")
  sap1 = nffg.add_sap(id="SG_SAP2")
  nf1 = nffg.add_nf(id="compressor")
  nf2 = nffg.add_nf(id="forwarder")
  nf3 = nffg.add_nf(id="decompressor")
  nffg.add_link(sap0.add_port(1), infra.add_port(0), id="infra_in")
  nffg.add_link(sap1.add_port(1), infra.add_port(1), id="infra_out")
  nffg.add_link(infra.add_port(2), nf1.add_port(1), id="nf1_in", dynamic=True)
  nffg.add_link(nf1.add_port(2), infra.add_port(3), id="nf1_out", dynamic=True)
  nffg.add_link(infra.add_port(4), nf2.add_port(1), id="nf2_in", dynamic=True)
  nffg.add_link(nf2.add_port(2), infra.add_port(5), id="nf2_out", dynamic=True)
  nffg.add_link(infra.add_port(6), nf3.add_port(1), id="nf3_in", dynamic=True)
  nffg.add_link(nf3.add_port(2), infra.add_port(7), id="nf3_out", dynamic=True)

  nffg.add_sglink(sap0.ports[1], nf1.ports[1], id="hop1")
  nffg.add_sglink(nf1.ports[2], nf2.ports[1], id="hop2")
  nffg.add_sglink(nf2.ports[2], nf3.ports[1], id="hop3")
  nffg.add_sglink(nf3.ports[2], sap1.ports[1], id="hop4")
  nffg.add_sglink(sap1.ports[1], sap0.ports[1], id="hop_back")

  nffg.add_req(sap0.ports[1], sap1.ports[1], id="req", delay=10, bandwidth=100)

  # retrieve all possible decompositions for the generated nffg (a dict of nffg)
  decomps = NFIB.getDecomps(nffg)
  print "All possible decompositions were retrieved form the DB"

  for n in decomps['D0'].nfs:
    print NFIB.getNF(n.id)
Ejemplo n.º 4
0
class ResourceOrchestrator(AbstractOrchestrator):
    """
  Main class for the handling of the ROS-level mapping functions.
  """
    # Default Mapper class as a fallback mapper
    DEFAULT_MAPPER = ResourceOrchestrationMapper
    """Default Mapper class as a fallback mapper"""
    def __init__(self, layer_API):
        """
    Initialize main Resource Orchestration Layer components.

    :param layer_API: layer API instance
    :type layer_API: :any:`ResourceOrchestrationAPI`
    :return: None
    """
        super(ResourceOrchestrator, self).__init__(layer_API=layer_API)
        log.debug("Init %s" % self.__class__.__name__)
        self.nffgManager = NFFGManager()
        # Init virtualizer manager
        # Listeners must be weak references in order the layer API can garbage
        # collected
        self.virtualizerManager = VirtualizerManager()
        self.virtualizerManager.addListeners(layer_API, weak=True)
        # Init NFIB manager
        self.nfibManager = NFIBManager().initialize()

    def finalize(self):
        """
    Finalize func for class.

    :return: None
    """
        self.nfibManager.finalize()

    def instantiate_nffg(self, nffg, continued_request_id=False):
        """
    Main API function for NF-FG instantiation.

    :param nffg: NFFG instance
    :type nffg: :class:`NFFG`
    :return: mapped NFFG instance
    :rtype: :class:`NFFG`
    """
        log.debug("Invoke %s to instantiate given NF-FG" %
                  self.__class__.__name__)
        if not continued_request_id:
            # Store newly created NF-FG
            self.nffgManager.save(nffg)
        else:
            # Use the original NFFG requested for getting the original request
            nffg = self.nffgManager.get(nffg_id=continued_request_id)
            log.info("Using original request for remapping: %s" % nffg)
        # Get Domain Virtualizer to acquire global domain view
        global_view = self.virtualizerManager.dov
        # Notify remote visualizer about resource view of this layer if it's needed
        # notify_remote_visualizer(data=global_view.get_resource_info(),
        #                          id=LAYER_NAME)
        # Log verbose mapping request
        log.log(VERBOSE,
                "Orchestration Layer request graph:\n%s" % nffg.dump())
        # Start Orchestrator layer mapping
        if global_view is not None:
            # If the request is a bare NFFG, it is probably an empty topo for domain
            # deletion --> skip mapping to avoid BadInputException and forward
            # topo to adaptation layer
            if not continued_request_id:
                if nffg.is_bare():
                    log.warning(
                        "No valid service request (VNFs/Flowrules/SGhops) has "
                        "been detected in SG request! Skip orchestration in "
                        "layer: %s and proceed with the bare %s..." %
                        (LAYER_NAME, nffg))
                    if nffg.is_virtualized():
                        if nffg.is_SBB():
                            log.debug(
                                "Request is a bare SingleBiSBiS representation!"
                            )
                        else:
                            log.warning(
                                "Detected virtualized representation with multiple BiSBiS "
                                "nodes! Currently this type of virtualization is nut fully"
                                "supported!")
                    else:
                        log.debug("Detected full view representation!")
                    # Return with the original request
                    return nffg
                else:
                    log.info("Request check: detected valid NFFG content!")
            try:
                # Run NF-FG mapping orchestration
                mapped_nffg = self.mapper.orchestrate(
                    input_graph=nffg,
                    resource_view=global_view,
                    continued=bool(continued_request_id))
                log.debug("NF-FG instantiation is finished by %s" %
                          self.__class__.__name__)
                return mapped_nffg
            except ProcessorError as e:
                log.warning("Mapping pre/post processing was unsuccessful! "
                            "Cause: %s" % e)
                # Propagate the ProcessError to API layer
                raise
        else:
            log.warning("Global view is not acquired correctly!")
        log.error("Abort orchestration process!")

    def collect_mapping_info(self, service_id):
        """
    Return with collected information of mapping of a given service.

    :param service_id: service request ID
    :type service_id: str
    :return: mapping info
    :rtype: dict
    """
        # Get the service NFFG based on service ID
        request = self.nffgManager.get(service_id)
        if request is None:
            log.error("Service request(id: %s) is not found!" % service_id)
            return "Service request is not found!"
        # Get the overall view a.k.a. DoV
        dov = self.virtualizerManager.dov.get_resource_info()
        # Collect NFs
        nfs = [nf.id for nf in request.nfs]
        log.debug("Collected NFs: %s" % nfs)
        return self.__collect_binding(dov=dov, nfs=nfs)

    @staticmethod
    def __collect_binding(dov, nfs):
        """
    Collect mapping of given NFs on the global view(DoV) with the structure:

    .. code-block:: json

      [
        {
          "bisbis": {
            "domain": null,
            "id": "EE2"
          },
          "nf": {
            "id": "fwd",
            "ports": [
              {
                "id": 1,
                "management": {
                  "22/tcp": [
                    "0.0.0.0",
                    20000
                  ]
                }
              }
            ]
          }
        }
      ]

    :param dov: global topology
    :type dov: :class:`NFFG`
    :param nfs: list of NFs
    :type nfs: list
    :return: mapping
    :rtype: list of dict
    """
        mappings = []
        # Process NFs
        for nf_id in nfs:
            mapping = {}
            # Get the connected infra node
            if nf_id not in dov:
                log.warning(
                    "NF: %s is not found in the global topology(DoV)!" % nf_id)
                continue
            bisbis = [n.id for n in dov.infra_neighbors(nf_id)]
            log.log(VERBOSE, "Detected mapped BiSBiS node:" % bisbis)
            if len(bisbis) != 1:
                log.warning("Detected unexpected number of BiSBiS node: %s!" %
                            bisbis)
                continue
            bisbis = bisbis.pop()
            # Add NF id
            nf = {"id": nf_id, "ports": []}
            for dyn_link in dov.network[nf_id][bisbis].itervalues():
                port = OrderedDict(id=dyn_link.src.id)
                if dyn_link.src.l4 is not None:
                    try:
                        port['management'] = ast.literal_eval(dyn_link.src.l4)
                    except SyntaxError:
                        log.warning(
                            "L4 address entry: %s is not valid Python expression! "
                            "Add the original string..." % dyn_link.src.l4)
                        port['management'] = dyn_link.src.l4
                nf['ports'].append(port)
            mapping['nf'] = nf
            # Add infra node ID and domain name
            bisbis = bisbis.split('@')
            bb_mapping = {
                "id": bisbis[0],
                "domain": bisbis[1] if len(bisbis) > 1 else None
            }
            mapping['bisbis'] = bb_mapping
            mappings.append(mapping)
        return mappings

    def collect_mappings(self, mappings, slor_topo):
        """

    :param mappings:
    :param slor_topo:
    :return:
    """
        dov = self.virtualizerManager.dov.get_resource_info()
        response = mappings.full_copy()
        log.debug("Start checking mappings...")
        for mapping in response:
            bb, nf = detect_bb_nf_from_path(path=mapping.object.get_value(),
                                            topo=slor_topo)
            if not nf:
                mapping.target.object.set_value("NOT_FOUND")
                mapping.target.domain.set_value("N/A")
                continue
            m_result = self.__collect_binding(dov=dov, nfs=[nf])
            if not m_result:
                log.warning("Mapping is not found for NF: %s!" % nf)
                mapping.target.object.set_value("NOT_FOUND")
                mapping.target.domain.set_value("N/A")
                continue
            try:
                node = m_result[0]['bisbis']['id']
            except KeyError:
                log.warning("Missing mapping node element from: %s" % m_result)
                node = "NOT_FOUND"
            try:
                domain = m_result[0]['bisbis']['domain']
            except KeyError:
                log.warning("Missing mapping domain element from: %s" %
                            m_result)
                domain = "N/A"
            log.debug("Found mapping: %s@%s (domain: %s)" % (nf, node, domain))
            mapping.target.object.set_value(NF_PATH_TEMPLATE % (node, nf))
            mapping.target.domain.set_value(domain)
        return response

    def filter_info_request(self, info, slor_topo):
        """

    :param info:
    :param slor_topo:
    :return:
    """
        log.debug("Filter info request based on layer view: %s..." %
                  slor_topo.id)
        info = info.full_copy()
        for attr in (getattr(info, e) for e in info._sorted_children):
            deletable = []
            for element in attr:
                if hasattr(element, "object"):
                    bb, nf = detect_bb_nf_from_path(element.object.get_value(),
                                                    slor_topo)
                    if not nf:
                        log.debug("Remove element: %s from request..." %
                                  element._tag)
                        deletable.append(element)
            for d in deletable:
                attr.remove(d)
        return info