def _read_ovs_bridge_mappings():
    """Read the 'bridge_mappings' property from openvswitch_agent.ini

    This is done for Redhat environments, to allow an improved
    learning/programming of interface groups based on ports and the network
    to which the ports belong to.

    :return: bridge_mappings dictionary {'physnet_name': 'bridge_name', ...}
                {} empty dictionary when not found
    """
    mapping = {}
    mapping_str = None
    # read openvswitch_agent.ini for bridge_mapping info
    if not os.path.isfile(OVS_AGENT_INI_FILEPATH):
        # if ovs_agent.ini doesn't exists, return empty mapping
        LOG.warning(
            _LW("Unable to read OVS bridge_mappings, "
                "openvswitch_agent.ini file not present."))
        return mapping

    with open(OVS_AGENT_INI_FILEPATH) as f:
        for line in f:
            if ('#' not in line
                    and ('=' in line and 'bridge_mappings' in line)):
                # typical config line looks like the following:
                # bridge_mappings = datacentre:br-ex,dpdk:br-link
                key, value = line.split('=', 1)
                mapping_str = value.strip()

    # parse comma separated physnet list into individual mappings
    if not mapping_str:
        # if file did not have bridge_mappings, return empty mapping
        LOG.warning(
            _LW("Unable to read OVS bridge_mappings, either the line is commented "
                "or not present in openvswitch_agent.ini."))
        return mapping

    phy_map_list = mapping_str.split(',')
    for phy_map in phy_map_list:
        phy, bridge = phy_map.split(':')
        mapping[phy.strip()] = bridge.strip()

    LOG.info(_LI("OVS bridge_mappings are: %(br_map)s"), {'br_map': mapping})
    return mapping
 def bsn_delete_sg_rule(self, sg_rule, context):
     LOG.debug("Deleting security group rule from BCF: %s", sg_rule)
     if not context:
         LOG.error(
             _LE("Context missing when trying to delete security group rule. "
                 "Please force-bcf-sync to ensure consistency with BCF."))
     sg_id = sg_rule['security_group_id']
     # we over write the sg on bcf controller instead of deleting
     try:
         self.bsn_create_security_group(sg_id, context=context)
     except ext_sg.SecurityGroupNotFound:
         # DB query will throw exception when security group is
         # being deleted. delete_security_group_rule callback would
         # try to update BCF with new set of rules.
         LOG.warning(
             _LW("Security group with ID %(sg_id)s not found "
                 "when trying to update."), {'sg_id': sg_id})
示例#3
0
 def bsn_delete_sg_rule_callback(self, resource, event, trigger, **kwargs):
     context = kwargs.get('context')
     if context:
         LOG.debug("Callback delete sg_rule belongs to tenant: %s",
                   context.tenant_id)
         sgs = self.get_security_groups(context, filters={}) or []
         for sg in sgs:
             if sg.get('tenant_id') != context.tenant_id:
                 continue
             sg_id = sg.get('id')
             LOG.debug("Callback delete rule in sg_id: %s", sg_id)
             # we over write the sg on bcf controller instead of deleting
             try:
                 self.bsn_create_security_group(sg_id=sg_id,
                                                context=context)
             except ext_sg.SecurityGroupNotFound:
                 # DB query will throw exception when security group is
                 # being deleted. delete_security_group_rule callback would
                 # try to update BCF with new set of rules.
                 LOG.warning(
                     _LW("Security group with ID %(sg_id)s not found "
                         "when trying to update."), {'sg_id': sg_id})
示例#4
0
    def force_topo_sync(self, check_ts=True):
        """Execute a topology_sync between OSP and BCF.

        Topology sync collects all data from Openstack and pushes to BCF in
        one single REST call. This is a heavy operation and is not executed
        automatically.

        Conditions when this would be called:
        (1) during ServerPool initialization
            (a) must check previous timestamp
        (2) if periodic keystone tenant_cache find a diff in tenant list
            (a) should not check previous timestamp
        (3) externally triggered by neutron force-bcf-sync command
            (a) should not check previous timestamp
        (4) a rest_call to BCF fails on both servers and failure_code is not
            part of the ignore_codes list
            (a) must check previous timestamp

        :param check_ts: boolean flag to check previous
                         timestamp < TOPO_SYNC_EXPIRED_SECS
               prev_resp: a REST response tuple from the previous failed REST
                          call if available. If we skip topo_sync, return the
                          failure as previously observed.
        :return: (sync_executed, response)
                 sync_executed - Boolean - returns True if we were able to
                                acquire a lock to perform topo_sync,
                                False otherwise
                 response - tuple of the typical HTTP response from REST call
                        (response.status, response.reason, respstr, respdata)
        """
        LOG.info(_LI('TOPO_SYNC requested with check_ts %s'), check_ts)

        if not self.get_topo_function:
            raise cfg.Error(_('Server requires synchronization, '
                              'but no topology function was defined.'))

        # get current timestamp
        curr_ts = str(time.time())
        hash_handler = cdb.HashHandler(timestamp_ms=curr_ts)

        if not hash_handler.lock(check_ts):
            LOG.info(_LI("TOPO_SYNC: lock() returned False. Skipping."))
            return False, TOPO_RESPONSE_OK

        # else, perform topo_sync
        try:
            LOG.debug("TOPO_SYNC: requested at %(request_ts)s started at "
                      "%(start_ts)s",
                      {'request_ts': cdb.convert_ts_to_datetime(curr_ts),
                       'start_ts': cdb.convert_ts_to_datetime(time.time())})
            data = self.get_topo_function(
                **self.get_topo_function_args)
            if not data:
                # when keystone sync fails, it fails silently with data = None
                # that is wrong, we need to raise an exception
                raise Exception(_("TOPO_SYNC: failed to retrieve data."))
            LOG.debug("TOPO_SYNC: data received from OSP, sending "
                      "request to BCF.")
            errstr = _("Unable to perform forced topology_sync: %s")
            return True, self.rest_action('POST', TOPOLOGY_PATH, data, errstr)
        except Exception as e:
            # if encountered an exception, set to previous timestamp
            LOG.warning(_LW("TOPO_SYNC: Exception during topology sync. "
                            "Consistency DB timestamp will not be updated."))
            hash_handler.unlock(set_prev_ts=True)
            raise e
        finally:
            hash_handler.unlock()
            diff = time.time() - float(hash_handler.lock_ts)
            LOG.info(_LI("TOPO_SYNC: took %s seconds to execute topo_sync. "
                         "consistency_db unlocked."),
                     str(diff))