Exemple #1
0
    def _parse_flow_classifier(self, flow_classifier):
        dl_type, nw_proto, source_port_masks, destination_port_masks = (
            (None, ) * 4)

        if (
            not flow_classifier['source_port_range_min'] and
            not flow_classifier['source_port_range_max']
        ):
            # wildcard
            source_port_masks = ['0/0x0']
        elif not flow_classifier['source_port_range_min']:
            source_port_masks = ovs_ext_lib.get_port_mask(
                1,
                flow_classifier['source_port_range_max'])
        elif not flow_classifier['source_port_range_max']:
            source_port_masks = ovs_ext_lib.get_port_mask(
                flow_classifier['source_port_range_min'],
                65535)
        else:
            source_port_masks = ovs_ext_lib.get_port_mask(
                flow_classifier['source_port_range_min'],
                flow_classifier['source_port_range_max'])

        if (
            not flow_classifier['destination_port_range_min'] and
            not flow_classifier['destination_port_range_max']
        ):
            # wildcard
            destination_port_masks = ['0/0x0']
        elif not flow_classifier['destination_port_range_min']:
            destination_port_masks = ovs_ext_lib.get_port_mask(
                1,
                flow_classifier['destination_port_range_max'])
        elif not flow_classifier['destination_port_range_max']:
            destination_port_masks = ovs_ext_lib.get_port_mask(
                flow_classifier['destination_port_range_min'],
                65535)
        else:
            destination_port_masks = ovs_ext_lib.get_port_mask(
                flow_classifier['destination_port_range_min'],
                flow_classifier['destination_port_range_max'])

        if "IPv4" == flow_classifier['ethertype']:
            dl_type = 0x0800
            if n_const.PROTO_NAME_TCP == flow_classifier['protocol']:
                nw_proto = n_const.PROTO_NUM_TCP
            elif n_const.PROTO_NAME_UDP == flow_classifier['protocol']:
                nw_proto = n_const.PROTO_NUM_UDP
            elif n_const.PROTO_NAME_ICMP == flow_classifier['protocol']:
                nw_proto = n_const.PROTO_NUM_ICMP
            else:
                nw_proto = None
        elif "IPv6" == flow_classifier['ethertype']:
            LOG.error(_LE("Current portchain agent don't support Ipv6"))
        else:
            LOG.error(_LE("invalid protocol input"))
        return (dl_type, nw_proto,
                source_port_masks, destination_port_masks
                )
    def _parse_flow_classifier(self, flow_classifier):
        dl_type, nw_proto, source_port_masks, destination_port_masks = (
            (None, ) * 4)

        if (
            not flow_classifier['source_port_range_min'] and
            not flow_classifier['source_port_range_max']
        ):
            # wildcard
            source_port_masks = ['0/0x0']
        elif not flow_classifier['source_port_range_min']:
            source_port_masks = ovs_ext_lib.get_port_mask(
                1,
                flow_classifier['source_port_range_max'])
        elif not flow_classifier['source_port_range_max']:
            source_port_masks = ovs_ext_lib.get_port_mask(
                flow_classifier['source_port_range_min'],
                65535)
        else:
            source_port_masks = ovs_ext_lib.get_port_mask(
                flow_classifier['source_port_range_min'],
                flow_classifier['source_port_range_max'])

        if (
            not flow_classifier['destination_port_range_min'] and
            not flow_classifier['destination_port_range_max']
        ):
            # wildcard
            destination_port_masks = ['0/0x0']
        elif not flow_classifier['destination_port_range_min']:
            destination_port_masks = ovs_ext_lib.get_port_mask(
                1,
                flow_classifier['destination_port_range_max'])
        elif not flow_classifier['destination_port_range_max']:
            destination_port_masks = ovs_ext_lib.get_port_mask(
                flow_classifier['destination_port_range_min'],
                65535)
        else:
            destination_port_masks = ovs_ext_lib.get_port_mask(
                flow_classifier['destination_port_range_min'],
                flow_classifier['destination_port_range_max'])

        if "IPv4" == flow_classifier['ethertype']:
            dl_type = 0x0800
            if n_consts.PROTO_NAME_TCP == flow_classifier['protocol']:
                nw_proto = n_consts.PROTO_NUM_TCP
            elif n_consts.PROTO_NAME_UDP == flow_classifier['protocol']:
                nw_proto = n_consts.PROTO_NUM_UDP
            elif n_consts.PROTO_NAME_ICMP == flow_classifier['protocol']:
                nw_proto = n_consts.PROTO_NUM_ICMP
            else:
                nw_proto = None
        elif "IPv6" == flow_classifier['ethertype']:
            LOG.error(_LE("Current portchain agent don't support Ipv6"))
        else:
            LOG.error(_LE("invalid protocol input"))
        return (dl_type, nw_proto,
                source_port_masks, destination_port_masks
                )
Exemple #3
0
def main():
    common_config.init(sys.argv[1:])
    common_config.setup_logging()
    q_utils.log_opt_values(LOG)
    bridge_classes = {
        'br_int': br_int.OVSIntegrationBridge,
        'br_phys': br_phys.OVSPhysicalBridge,
        'br_tun': br_tun.OVSTunnelBridge,
    }

    ovs_neutron_agent.prepare_xen_compute()
    ovs_neutron_agent.validate_tunnel_config(
        cfg.CONF.AGENT.tunnel_types,
        cfg.CONF.OVS.local_ip
    )

    try:
        agent = OVSSfcAgent(bridge_classes, cfg.CONF)
    except (RuntimeError, ValueError) as e:
        LOG.exception(e)
        LOG.error(_LE('Agent terminated!'))
        sys.exit(1)

    LOG.info(_LI("Agent initialized successfully, now running... "))
    agent.daemon_loop()
Exemple #4
0
    def get_flowrules_by_host_portid(self, context, host, port_id):
        port_chain_flowrules = []
        sfc_plugin = directory.get_plugin(sfc.SFC_EXT)
        if not sfc_plugin:
            return port_chain_flowrules
        try:
            port_detail_list = []
            # one port only may be in egress/ingress port once time.
            ingress_port = self.get_port_detail_by_filter(
                dict(ingress=port_id))
            egress_port = self.get_port_detail_by_filter(
                dict(egress=port_id))
            if not ingress_port and not egress_port:
                return None
            # SF migrate to other host
            if ingress_port:
                port_detail_list.append(ingress_port)
                if ingress_port['host_id'] != host:
                    ingress_port.update(dict(host_id=host))

            if egress_port:
                port_detail_list.append(egress_port)
                if egress_port['host_id'] != host:
                    egress_port.update(dict(host_id=host))

            # this is a SF if there are both egress and engress.
            for i, ports in enumerate(port_detail_list):
                nodes_assocs = ports['path_nodes']
                for assoc in nodes_assocs:
                    # update current path flow rule
                    node = self.get_path_node(assoc['pathnode_id'])
                    port_chain = sfc_plugin.get_port_chain(
                        context,
                        node['portchain_id'])
                    flow_rule = self._build_portchain_flowrule_body(
                        node,
                        ports,
                        add_fc_ids=port_chain['flow_classifiers']
                    )
                    port_chain_flowrules.append(flow_rule)

                    # update the pre-path node flow rule
                    # if node['node_type'] != ovs_const.SRC_NODE:
                    #    node_filter = dict(nsp=node['nsp'],
                    #                       nsi=node['nsi'] + 1
                    #                       )
                    #    pre_node_list = self.get_path_nodes_by_filter(
                    #        node_filter)
                    #    if not pre_node_list:
                    #        continue
                    #    for pre_node in pre_node_list:
                    #        self._update_path_node_flowrules(
                    #            pre_node,
                    #            add_fc_ids=port_chain['flow_classifiers'])

            return port_chain_flowrules

        except Exception as e:
            LOG.exception(e)
            LOG.error(_LE("get_flowrules_by_host_portid failed"))
Exemple #5
0
 def update_flowrule_status(self, context, id, status):
     try:
         flowrule_status = dict(status=status)
         self.update_path_node(id, flowrule_status)
     except Exception as e:
         LOG.exception(e)
         LOG.error(_LE("update_flowrule_status failed"))
Exemple #6
0
    def update_port_pair_group(
        self, context, portpairgroup_id, port_pair_group
    ):
        with context.session.begin(subtransactions=True):
            original_portpairgroup = self.get_port_pair_group(
                context, portpairgroup_id)
            updated_portpairgroup = super(
                SfcPlugin, self).update_port_pair_group(
                context, portpairgroup_id, port_pair_group)
            portpairgroup_context = sfc_ctx.PortPairGroupContext(
                self, context, updated_portpairgroup,
                original_portpairgroup=original_portpairgroup)
            self.driver_manager.update_port_pair_group_precommit(
                portpairgroup_context)
        try:
            self.driver_manager.update_port_pair_group_postcommit(
                portpairgroup_context)
        except sfc_exc.SfcDriverError as e:
            LOG.exception(e)
            with excutils.save_and_reraise_exception():
                LOG.error(_LE("Update port pair group failed, "
                              "port_pair_group '%s'"),
                          updated_portpairgroup['id'])

        return updated_portpairgroup
    def _call_drivers(self, method_name, context, raise_orig_exc=False):
        """Helper method for calling a method across all drivers.

        :param method_name: name of the method to call
        :param context: context parameter to pass to each method call
        :param raise_orig_exc: whether or not to raise the original
        driver exception, or use a general one
        """
        for driver in self.ordered_drivers:
            try:
                getattr(driver.obj, method_name)(context)
            except Exception as e:
                # This is an internal failure.
                LOG.exception(e)
                LOG.error(
                    _LE("Flow Classifier driver '%(name)s' "
                        "failed in %(method)s"),
                    {'name': driver.name, 'method': method_name}
                )
                if raise_orig_exc:
                    raise
                else:
                    raise fc_exc.FlowClassifierDriverError(
                        method=method_name
                    )
Exemple #8
0
    def get_all_src_node_flowrules(self, context):
        sfc_plugin = (
            manager.NeutronManager.get_service_plugins().get(
                sfc.SFC_EXT
            )
        )
        if not sfc_plugin:
            return []
        try:
            frs = []
            port_chains = sfc_plugin.get_port_chains(context)

            for port_chain in port_chains:
                # get the first node of this chain
                node_filters = dict(portchain_id=port_chain['id'], nsi=0xff)
                portchain_node = self.get_path_node_by_filter(node_filters)
                if not portchain_node:
                    continue
                flow_rule = self._get_portchain_src_node_flowrule(
                    portchain_node,
                    port_chain['flow_classifiers']
                )
                if not flow_rule:
                    continue
                frs.append(flow_rule)
            return frs
        except Exception as e:
            LOG.exception(e)
            LOG.error(_LE("get_all_src_node_flowrules failed"))
Exemple #9
0
def main():
    common_config.init(sys.argv[1:])
    common_config.setup_logging()
    q_utils.log_opt_values(LOG)
    bridge_classes = {
        'br_int': br_int.OVSIntegrationBridge,
        'br_phys': br_phys.OVSPhysicalBridge,
        'br_tun': br_tun.OVSTunnelBridge,
    }

    ovs_neutron_agent.prepare_xen_compute()
    ovs_neutron_agent.validate_tunnel_config(
        cfg.CONF.AGENT.tunnel_types,
        cfg.CONF.OVS.local_ip
    )

    try:
        agent = OVSSfcAgent(bridge_classes, cfg.CONF)
    except (RuntimeError, ValueError) as e:
        LOG.exception(e)
        LOG.error(_LE('Agent terminated!'))
        sys.exit(1)

    LOG.info(_LI("Agent initialized successfully, now running... "))
    agent.daemon_loop()
Exemple #10
0
    def update_port_pair_group(self, context, portpairgroup_id,
                               port_pair_group):
        with context.session.begin(subtransactions=True):
            original_portpairgroup = self.get_port_pair_group(
                context, portpairgroup_id)
            updated_portpairgroup = super(SfcPlugin,
                                          self).update_port_pair_group(
                                              context, portpairgroup_id,
                                              port_pair_group)
            portpairgroup_context = sfc_ctx.PortPairGroupContext(
                self,
                context,
                updated_portpairgroup,
                original_portpairgroup=original_portpairgroup)
            self.driver_manager.update_port_pair_group_precommit(
                portpairgroup_context)
        try:
            self.driver_manager.update_port_pair_group_postcommit(
                portpairgroup_context)
        except sfc_exc.SfcDriverError as e:
            LOG.exception(e)
            with excutils.save_and_reraise_exception():
                LOG.error(
                    _LE("Update port pair group failed, "
                        "port_pair_group '%s'"), updated_portpairgroup['id'])

        return updated_portpairgroup
Exemple #11
0
 def update_flowrule_status(self, context, id, status):
     try:
         flowrule_status = dict(status=status)
         self.update_path_node(id, flowrule_status)
     except Exception as e:
         LOG.exception(e)
         LOG.error(_LE("update_flowrule_status failed"))
Exemple #12
0
    def handle_port(self, context, port):
        """Handle agent SFC extension port add/update."""
        port_id = port['port_id']
        resync = False
        flowrule_status = []
        try:
            LOG.debug("a new device %s is found", port_id)
            flows_list = (
                self.sfc_plugin_rpc.get_flowrules_by_host_portid(
                    context, port_id
                )
            )
            if flows_list:
                for flow in flows_list:
                    self.sfc_driver.update_flow_rules(
                        flow, flowrule_status)
        except Exception as e:
            LOG.exception(e)
            LOG.error(_LE("SFC L2 extension handle_port failed"))
            resync = True

        if flowrule_status:
            self.sfc_plugin_rpc.update_flowrules_status(
                context, flowrule_status)

        return resync
Exemple #13
0
    def _get_portchain_src_node_flowrule(self,
                                         node,
                                         add_fc_ids=None,
                                         del_fc_ids=None):
        try:
            add_fc_rt = []
            del_fc_rt = []

            if add_fc_ids:
                for fc in self._get_fcs_by_ids(add_fc_ids):
                    if not fc.get('logical_source_port', None):
                        add_fc_rt.append(fc)

            if del_fc_ids:
                for fc in self._get_fcs_by_ids(del_fc_ids):
                    if not fc.get('logical_source_port', None):
                        del_fc_rt.append(fc)

            if not add_fc_rt and not del_fc_rt:
                return None

            return self._build_portchain_flowrule_body_without_port(
                node, add_fc_rt, del_fc_rt)
        except Exception as e:
            LOG.exception(e)
            LOG.error(_LE("_get_portchain_src_node_flowrule failed"))
Exemple #14
0
    def _delete_flow_rule_with_mpls_enc(self, flowrule, flowrule_status):
        try:
            LOG.debug("_delete_flow_rule_with_mpls_enc, flowrule = %s",
                      flowrule)
            group_id = flowrule.get('next_group_id', None)

            # delete tunnel table flow rule on br-int(egress match)
            if flowrule['egress'] is not None:
                self._setup_local_switch_flows_on_int_br(
                    flowrule,
                    flowrule['del_fcs'],
                    None,
                    add_flow=False,
                    match_inport=True
                )

            # delete table INGRESS_TABLE ingress match flow rule
            # on br-int(ingress match)
            network_id = self._get_network_by_port(flowrule['ingress'])
            if network_id:
                # third, install br-int flow rule on table INGRESS_TABLE
                # for ingress traffic
                lvm = self.local_vlan_map[network_id]
                vif_port = lvm.vif_ports[flowrule['ingress']]
                self.int_br.delete_flows(
                    table=INGRESS_TABLE,
                    dl_type=0x8847,
                    dl_dst=vif_port.vif_mac,
                    mpls_label=flowrule['nsp'] << 8 | (flowrule['nsi'] + 1)
                )

            # delete group table, need to check again
            if group_id and flowrule.get('group_refcnt', None) <= 1:
                self.int_br.delete_group(group_id=group_id)
                for item in flowrule['next_hops']:
                    self.int_br.delete_flows(
                        table=ACROSS_SUBNET_TABLE,
                        dl_dst=item['mac_address'])
            elif (not group_id and
                  flowrule['egress'] is not None):
                # to delete last hop flow rule
                for each in flowrule['del_fcs']:
                    if each.get('logical_destination_port', None):
                        ldp = self._get_flow_classifier_dest_port_info(
                            each['logical_destination_port'],
                            flowrule
                        )
                        if ldp:
                            self.int_br.delete_flows(
                                table=ACROSS_SUBNET_TABLE,
                                dl_dst=ldp['mac_address'])

        except Exception as e:
            flowrule_status_temp = {}
            flowrule_status_temp['id'] = flowrule['id']
            flowrule_status_temp['status'] = constants.STATUS_ERROR
            flowrule_status.append(flowrule_status_temp)
            LOG.exception(e)
            LOG.error(_LE("_delete_flow_rule_with_mpls_enc failed"))
Exemple #15
0
    def get_flowrules_by_host_portid(self, context, host, port_id):
        port_chain_flowrules = []
        sfc_plugin = (manager.NeutronManager.get_service_plugins().get(
            sfc.SFC_EXT))
        if not sfc_plugin:
            return port_chain_flowrules
        try:
            port_detail_list = []
            # one port only may be in egress/ingress port once time.
            ingress_port = self.get_port_detail_by_filter(
                dict(ingress=port_id))
            egress_port = self.get_port_detail_by_filter(dict(egress=port_id))
            if not ingress_port and not egress_port:
                return None
            # SF migrate to other host
            if ingress_port:
                port_detail_list.append(ingress_port)
                if ingress_port['host_id'] != host:
                    ingress_port.update(dict(host_id=host))

            if egress_port:
                port_detail_list.append(egress_port)
                if egress_port['host_id'] != host:
                    egress_port.update(dict(host_id=host))

            # this is a SF if there are both egress and engress.
            for i, ports in enumerate(port_detail_list):
                nodes_assocs = ports['path_nodes']
                for assoc in nodes_assocs:
                    # update current path flow rule
                    node = self.get_path_node(assoc['pathnode_id'])
                    port_chain = sfc_plugin.get_port_chain(
                        context, node['portchain_id'])
                    flow_rule = self._build_portchain_flowrule_body(
                        node, ports, add_fc_ids=port_chain['flow_classifiers'])
                    port_chain_flowrules.append(flow_rule)

                    # update the pre-path node flow rule
                    # if node['node_type'] != ovs_const.SRC_NODE:
                    #    node_filter = dict(nsp=node['nsp'],
                    #                       nsi=node['nsi'] + 1
                    #                       )
                    #    pre_node_list = self.get_path_nodes_by_filter(
                    #        node_filter)
                    #    if not pre_node_list:
                    #        continue
                    #    for pre_node in pre_node_list:
                    #        self._update_path_node_flowrules(
                    #            pre_node,
                    #            add_fc_ids=port_chain['flow_classifiers'])

            return port_chain_flowrules

        except Exception as e:
            LOG.exception(e)
            LOG.error(_LE("get_flowrules_by_host_portid failed"))
Exemple #16
0
    def _get_flow_infos_from_flow_classifier(self, flow_classifier, flowrule):
        flow_infos = []
        nw_src, nw_dst, tp_src, tp_dst = ((None, ) * 4)

        if "IPv4" != flow_classifier['ethertype']:
            LOG.error(_LE("Current portchain agent only supports IPv4"))
            return flow_infos

        # parse and transfer flow info to match field info
        dl_type, nw_proto, source_port_masks, destination_port_masks = (
            self._parse_flow_classifier(flow_classifier))

        if flowrule['fwd_path']:
            if flow_classifier['source_ip_prefix']:
                nw_src = flow_classifier['source_ip_prefix']
            else:
                nw_src = '0.0.0.0/0.0.0.0'
            if flow_classifier['destination_ip_prefix']:
                nw_dst = flow_classifier['destination_ip_prefix']
            else:
                nw_dst = '0.0.0.0/0.0.0.0'
        else:
            if flow_classifier['source_ip_prefix']:
                nw_src = flow_classifier['destination_ip_prefix']
            else:
                nw_src = '0.0.0.0/0.0.0.0'
            if flow_classifier['destination_ip_prefix']:
                nw_dst = flow_classifier['source_ip_prefix']
            else:
                nw_dst = '0.0.0.0/0.0.0.0'

        if source_port_masks and destination_port_masks:
            for destination_port in destination_port_masks:
                for source_port in source_port_masks:
                    if flowrule['fwd_path']:
                        tp_src = '%s' % source_port
                        tp_dst = '%s' % destination_port
                    else:
                        tp_dst = '%s' % source_port
                        tp_src = '%s' % destination_port
                    if nw_proto is None:
                        flow_infos.append({'dl_type': dl_type,
                                           'nw_src': nw_src,
                                           'nw_dst': nw_dst,
                                           'tp_src': tp_src,
                                           'tp_dst': tp_dst})
                    else:
                        flow_infos.append({'dl_type': dl_type,
                                           'nw_proto': nw_proto,
                                           'nw_src': nw_src,
                                           'nw_dst': nw_dst,
                                           'tp_src': tp_src,
                                           'tp_dst': tp_dst})

        return flow_infos
Exemple #17
0
 def run_ofctl(self, cmd, args, process_input=None):
     # We need to dump-groups according to group Id,
     # which is a feature of OpenFlow1.5
     full_args = ["ovs-ofctl", "-O openflow13", cmd, self.br_name] + args
     try:
         return utils.execute(full_args,
                              run_as_root=True,
                              process_input=process_input)
     except Exception as e:
         LOG.exception(e)
         LOG.error(_LE("Unable to execute %(args)s."), {'args': full_args})
Exemple #18
0
 def delete_port_chain(self, context, portchain_id):
     pc = self.get_port_chain(context, portchain_id)
     pc_context = sfc_ctx.PortChainContext(self, context, pc)
     try:
         self.driver_manager.delete_port_chain(pc_context)
     except sfc_exc.SfcDriverError as e:
         LOG.exception(e)
         with excutils.save_and_reraise_exception():
             LOG.error(_LE("Delete port chain failed, portchain '%s'"),
                       portchain_id)
     # TODO(qijing): unsync in case deleted in driver but fail in database
     super(SfcPlugin, self).delete_port_chain(context, portchain_id)
Exemple #19
0
 def delete_port_chain(self, context, portchain_id):
     pc = self.get_port_chain(context, portchain_id)
     pc_context = sfc_ctx.PortChainContext(self, context, pc)
     try:
         self.driver_manager.delete_port_chain(pc_context)
     except sfc_exc.SfcDriverError as e:
         LOG.exception(e)
         with excutils.save_and_reraise_exception():
             LOG.error(_LE("Delete port chain failed, portchain '%s'"),
                       portchain_id)
     # TODO(qijing): unsync in case deleted in driver but fail in database
     super(SfcPlugin, self).delete_port_chain(context, portchain_id)
Exemple #20
0
    def delete_port_pair(self, context, portpair_id):
        portpair = self.get_port_pair(context, portpair_id)
        portpair_context = sfc_ctx.PortPairContext(self, context, portpair)
        try:
            self.driver_manager.delete_port_pair(portpair_context)
        except sfc_exc.SfcDriverError as e:
            LOG.exception(e)
            with excutils.save_and_reraise_exception():
                LOG.error(_LE("Delete port pair failed, port_pair '%s'"),
                          portpair_id)

        super(SfcPlugin, self).delete_port_pair(context, portpair_id)
Exemple #21
0
    def delete_flow_rule(self, flowrule, flowrule_status):
        if flowrule['fwd_path'] is False and flowrule['node_type'] == \
                'sf_node':
            flowrule['ingress'], flowrule['egress'] = flowrule['egress'], \
                flowrule['ingress']
        try:
            LOG.debug("delete_flow_rule, flowrule = %s", flowrule)

            # delete tunnel table flow rule on br-int(egress match)
            if flowrule['egress'] is not None:
                self._setup_local_switch_flows_on_int_br(flowrule,
                                                         flowrule['del_fcs'],
                                                         None,
                                                         add_flow=False,
                                                         match_inport=True)
                # delete group table, need to check again
                group_id = flowrule.get('next_group_id', None)
                if group_id and flowrule.get('group_refcnt', None) <= 1:
                    if flowrule['fwd_path']:
                        self.br_int.delete_group(group_id=group_id)
                    else:
                        self.br_int.delete_group(group_id=group_id +
                                                 REVERSE_GROUP_NUMBER_OFFSET)
                    for item in flowrule['next_hops']:
                        if flowrule['fwd_path']:
                            self.br_int.delete_flows(
                                table=ACROSS_SUBNET_TABLE,
                                dl_dst=item['in_mac_address'])
                        else:
                            self.br_int.delete_flows(
                                table=ACROSS_SUBNET_TABLE,
                                dl_dst=item['mac_address'])

            if flowrule['ingress'] is not None:
                # delete table INGRESS_TABLE ingress match flow rule
                # on br-int(ingress match)
                vif_port = self.br_int.get_vif_port_by_id(flowrule['ingress'])
                if vif_port:
                    # third, install br-int flow rule on table INGRESS_TABLE
                    # for ingress traffic
                    self.br_int.delete_flows(
                        table=INGRESS_TABLE,
                        dl_type=0x8847,
                        dl_dst=vif_port.vif_mac,
                        mpls_label=flowrule['nsp'] << 8 | (flowrule['nsi'] + 1)
                    )
        except Exception as e:
            flowrule_status_temp = {'id': flowrule['id'],
                                    'status': constants.STATUS_ERROR}
            flowrule_status.append(flowrule_status_temp)
            LOG.exception(e)
            LOG.error(_LE("delete_flow_rule failed"))
Exemple #22
0
    def delete_port_pair(self, context, portpair_id):
        portpair = self.get_port_pair(context, portpair_id)
        portpair_context = sfc_ctx.PortPairContext(
            self, context, portpair)
        try:
            self.driver_manager.delete_port_pair(portpair_context)
        except sfc_exc.SfcDriverError as e:
            LOG.exception(e)
            with excutils.save_and_reraise_exception():
                LOG.error(_LE("Delete port pair failed, port_pair '%s'"),
                          portpair_id)

        super(SfcPlugin, self).delete_port_pair(context, portpair_id)
Exemple #23
0
 def run_ofctl(self, cmd, args, process_input=None):
     # We need to dump-groups according to group Id,
     # which is a feature of OpenFlow1.5
     full_args = [
         "ovs-ofctl", "-O openflow13", cmd, self.br_name
     ] + args
     try:
         return utils.execute(full_args, run_as_root=True,
                              process_input=process_input)
     except Exception as e:
         LOG.exception(e)
         LOG.error(_LE("Unable to execute %(args)s."),
                   {'args': full_args})
Exemple #24
0
    def sfc_treat_devices_removed(self, port_ids):
        resync = False
        for port_id in port_ids:
            LOG.info(_LI("a device %s is removed"), port_id)
            try:
                self._delete_ports_flowrules_by_id(port_id)
            except Exception as e:
                LOG.exception(e)
                LOG.error(_LE("delete port flow rule failed for %(port_id)s"),
                          {'port_id': port_id})
                resync = True

        return resync
Exemple #25
0
    def delete_flow_classifier(self, context, fc_id):
        fc = self.get_flow_classifier(context, fc_id)
        fc_context = fc_ctx.FlowClassifierContext(self, context, fc)
        try:
            self.driver_manager.delete_flow_classifier(fc_context)
        except fc_exc.FlowClassfierDriverError as e:
            LOG.exception(e)
            with excutils.save_and_reraise_exception():
                LOG.error(
                    _LE("Delete port pair group failed, "
                        "flow_classifier '%s'"), fc_id)

        super(FlowClassifierPlugin,
              self).delete_flow_classifier(context, fc_id)
Exemple #26
0
    def delete_flow_classifier(self, context, fc_id):
        fc = self.get_flow_classifier(context, fc_id)
        fc_context = fc_ctx.FlowClassifierContext(self, context, fc)
        try:
            self.driver_manager.delete_flow_classifier(fc_context)
        except fc_exc.FlowClassfierDriverError as e:
            LOG.exception(e)
            with excutils.save_and_reraise_exception():
                LOG.error(_LE("Delete port pair group failed, "
                              "flow_classifier '%s'"),
                          fc_id)

        super(FlowClassifierPlugin, self).delete_flow_classifier(
            context, fc_id)
Exemple #27
0
    def delete_port(self, context, port):
        """Handle agent SFC extension port delete."""
        port_id = port['port_id']
        resync = False
        LOG.info(_LI("a device %s is removed"), port_id)
        try:
            self._delete_ports_flowrules_by_id(context, port_id)
        except Exception as e:
            LOG.exception(e)
            LOG.error(_LE("delete port flow rule failed for %(port_id)s"),
                      {'port_id': port_id})
            resync = True

        return resync
Exemple #28
0
    def delete_flow_rules(self, context, **kwargs):
        try:
            flowrule_status = []
            flowrules = kwargs['flowrule_entries']
            LOG.debug("delete_flow_rules received,  flowrules= %s", flowrules)
            if flowrules:
                self._treat_delete_flow_rules(flowrules, flowrule_status)
        except Exception as e:
            LOG.exception(e)
            LOG.error(_LE("delete_flow_rules failed"))

        if flowrule_status:
            self.sfc_plugin_rpc.update_flowrules_status(
                self.context, flowrule_status)
Exemple #29
0
    def get_flow_classifier_by_portchain_id(self, context, portchain_id):
        try:
            flow_classifier_list = []
            sfc_plugin = (manager.NeutronManager.get_service_plugins().get(
                sfc.SFC_EXT))
            if not sfc_plugin:
                return []

            port_chain = sfc_plugin.get_port_chain(context, portchain_id)
            flow_classifier_list = self._get_portchain_fcs(port_chain)
            return flow_classifier_list
        except Exception as e:
            LOG.exception(e)
            LOG.error(_LE("get_flow_classifier_by_portchain_id failed"))
Exemple #30
0
    def sfc_treat_devices_removed(self, port_ids):
        resync = False
        for port_id in port_ids:
            LOG.info(_LI("a device %s is removed"), port_id)
            try:
                self._delete_ports_flowrules_by_id(port_id)
            except Exception as e:
                LOG.exception(e)
                LOG.error(
                    _LE("delete port flow rule failed for %(port_id)s"),
                    {'port_id': port_id}
                )
                resync = True

        return resync
Exemple #31
0
    def create_port_pair(self, context, port_pair):
        portpair_db = super(SfcPlugin,
                            self).create_port_pair(context, port_pair)
        portpair_context = sfc_ctx.PortPairContext(self, context, portpair_db)
        try:
            self.driver_manager.create_port_pair(portpair_context)
        except sfc_exc.SfcDriverError as e:
            LOG.exception(e)
            with excutils.save_and_reraise_exception():
                LOG.error(
                    _LE("Create port pair failed, "
                        "deleting port_pair '%s'"), portpair_db['id'])
                self.delete_port_pair(context, portpair_db['id'])

        return portpair_db
Exemple #32
0
    def delete_flow_rules(self, context, **kwargs):
        flowrule_status = []
        try:
            flowrules = kwargs['flowrule_entries']
            LOG.debug("delete_flow_rules received,  flowrules= %s", flowrules)
            if flowrules:
                self.sfc_driver.delete_flow_rule(
                    flowrules, flowrule_status)
        except Exception as e:
            LOG.exception(e)
            LOG.error(_LE("delete_flow_rules failed"))

        if flowrule_status:
            self.sfc_plugin_rpc.update_flowrules_status(
                context, flowrule_status)
    def _call_drivers(self, method_name, context):
        """Helper method for calling a method across all SFC drivers.

        :param method_name: name of the method to call
        :param context: context parameter to pass to each method call
        """
        for driver in self.ordered_drivers:
            try:
                getattr(driver.obj, method_name)(context)
            except Exception as e:
                # This is an internal failure.
                LOG.exception(e)
                LOG.error(
                    _LE("SFC driver '%(name)s' failed in %(method)s"), {"name": driver.name, "method": method_name}
                )
                raise sfc_exc.SfcDriverError(method=method_name)
Exemple #34
0
    def update_port_pair(self, context, portpair_id, port_pair):
        original_portpair = self.get_port_pair(context, portpair_id)
        updated_portpair = super(SfcPlugin, self).update_port_pair(
            context, portpair_id, port_pair)
        portpair_context = sfc_ctx.PortPairContext(
            self, context, updated_portpair,
            original_portpair=original_portpair)
        try:
            self.driver_manager.update_port_pair(portpair_context)
        except sfc_exc.SfcDriverError as e:
            LOG.exception(e)
            with excutils.save_and_reraise_exception():
                LOG.error(_LE("Update port pair failed, port_pair '%s'"),
                          updated_portpair['id'])

        return updated_portpair
Exemple #35
0
    def create_port_pair(self, context, port_pair):
        portpair_db = super(SfcPlugin, self).create_port_pair(
            context, port_pair)
        portpair_context = sfc_ctx.PortPairContext(
            self, context, portpair_db)
        try:
            self.driver_manager.create_port_pair(portpair_context)
        except sfc_exc.SfcDriverError as e:
            LOG.exception(e)
            with excutils.save_and_reraise_exception():
                LOG.error(_LE("Create port pair failed, "
                              "deleting port_pair '%s'"),
                          portpair_db['id'])
                self.delete_port_pair(context, portpair_db['id'])

        return portpair_db
Exemple #36
0
    def delete_port(self, context, port):
        """Handle agent SFC extension port delete."""
        port_id = port['port_id']
        resync = False
        LOG.info(_LI("a device %s is removed"), port_id)
        try:
            self._delete_ports_flowrules_by_id(context, port_id)
        except Exception as e:
            LOG.exception(e)
            LOG.error(
                _LE("delete port flow rule failed for %(port_id)s"),
                {'port_id': port_id}
            )
            resync = True

        return resync
Exemple #37
0
    def get_flowrules_by_host_portid(self, context, host, port_id):
        port_chain_flowrules = []
        sfc_plugin = directory.get_plugin(sfc.SFC_EXT)
        if not sfc_plugin:
            return port_chain_flowrules
        try:
            port_detail_list = []
            # one port only may be in egress/ingress port once time.
            ingress_port = self.get_port_detail_by_filter(
                dict(ingress=port_id))
            egress_port = self.get_port_detail_by_filter(
                dict(egress=port_id))
            if not ingress_port and not egress_port:
                return None
            # SF migrate to other host
            if ingress_port:
                port_detail_list.append(ingress_port)
                if ingress_port['host_id'] != host:
                    ingress_port.update(dict(host_id=host))

            if egress_port:
                port_detail_list.append(egress_port)
                if egress_port['host_id'] != host:
                    egress_port.update(dict(host_id=host))

            # this is a SF if there are both egress and engress.
            for i, ports in enumerate(port_detail_list):
                nodes_assocs = ports['path_nodes']
                for assoc in nodes_assocs:
                    # update current path flow rule
                    node = self.get_path_node(assoc['pathnode_id'])
                    port_chain = sfc_plugin.get_port_chain(
                        context,
                        node['portchain_id'])
                    flow_rule = self._build_portchain_flowrule_body(
                        node,
                        ports,
                        add_fc_ids=port_chain['flow_classifiers']
                    )
                    port_chain_flowrules.append(flow_rule)

            return port_chain_flowrules

        except Exception as e:
            LOG.exception(e)
            LOG.error(_LE("get_flowrules_by_host_portid failed"))
    def _call_drivers(self, method_name, context):
        """Helper method for calling a method across all SFC drivers.

        :param method_name: name of the method to call
        :param context: context parameter to pass to each method call
        """
        for driver in self.ordered_drivers:
            try:
                getattr(driver.obj, method_name)(context)
            except Exception as e:
                # This is an internal failure.
                LOG.exception(e)
                LOG.error(_LE("SFC driver '%(name)s' failed in %(method)s"), {
                    'name': driver.name,
                    'method': method_name
                })
                raise sfc_exc.SfcDriverError(method=method_name)
Exemple #39
0
    def update_port_pair(self, context, portpair_id, port_pair):
        original_portpair = self.get_port_pair(context, portpair_id)
        updated_portpair = super(SfcPlugin, self).update_port_pair(
            context, portpair_id, port_pair)
        portpair_context = sfc_ctx.PortPairContext(
            self,
            context,
            updated_portpair,
            original_portpair=original_portpair)
        try:
            self.driver_manager.update_port_pair(portpair_context)
        except sfc_exc.SfcDriverError as e:
            LOG.exception(e)
            with excutils.save_and_reraise_exception():
                LOG.error(_LE("Update port pair failed, port_pair '%s'"),
                          updated_portpair['id'])

        return updated_portpair
    def update_flow_rules(self, flowrule, flowrule_status):
        try:
            if flowrule.get('egress', None):
                self._setup_egress_flow_rules(flowrule)
            if flowrule.get('ingress', None):
                self._setup_ingress_flow_rules_with_mpls(flowrule)

            flowrule_status_temp = {}
            flowrule_status_temp['id'] = flowrule['id']
            flowrule_status_temp['status'] = constants.STATUS_ACTIVE
            flowrule_status.append(flowrule_status_temp)
        except Exception as e:
            flowrule_status_temp = {}
            flowrule_status_temp['id'] = flowrule['id']
            flowrule_status_temp['status'] = constants.STATUS_ERROR
            flowrule_status.append(flowrule_status_temp)
            LOG.exception(e)
            LOG.error(_LE("update_flow_rules failed"))
Exemple #41
0
    def create_flow_classifier(self, context, flow_classifier):
        with context.session.begin(subtransactions=True):
            fc_db = super(FlowClassifierPlugin, self).create_flow_classifier(
                context, flow_classifier)
            fc_db_context = fc_ctx.FlowClassifierContext(self, context, fc_db)
            self.driver_manager.create_flow_classifier_precommit(
                fc_db_context)

        try:
            self.driver_manager.create_flow_classifier(fc_db_context)
        except fc_exc.FlowClassifierDriverError as e:
            LOG.exception(e)
            with excutils.save_and_reraise_exception():
                LOG.error(_LE("Create flow classifier failed, "
                              "deleting flow_classifier '%s'"),
                          fc_db['id'])
                self.delete_flow_classifier(context, fc_db['id'])
        return fc_db
Exemple #42
0
    def update_port_chain(self, context, portchain_id, port_chain):
        original_portchain = self.get_port_chain(context, portchain_id)
        updated_portchain = super(SfcPlugin, self).update_port_chain(
            context, portchain_id, port_chain)
        portchain_db_context = sfc_ctx.PortChainContext(
            self, context, updated_portchain,
            original_portchain=original_portchain)

        try:
            self.driver_manager.update_port_chain(portchain_db_context)
        except sfc_exc.SfcDriverError as e:
            LOG.exception(e)
            with excutils.save_and_reraise_exception():
                LOG.error(_LE("Update port chain failed, port_chain '%s'"),
                          updated_portchain['id'])

        # TODO(qijing): should we rollback the database update here?
        return updated_portchain
Exemple #43
0
    def _update_flow_rules_with_mpls_enc(self, flowrule, flowrule_status):
        try:
            if flowrule.get('egress', None):
                self._setup_egress_flow_rules_with_mpls(flowrule)
            if flowrule.get('ingress', None):
                self._setup_ingress_flow_rules_with_mpls(flowrule)

            flowrule_status_temp = {}
            flowrule_status_temp['id'] = flowrule['id']
            flowrule_status_temp['status'] = constants.STATUS_ACTIVE
            flowrule_status.append(flowrule_status_temp)
        except Exception as e:
            flowrule_status_temp = {}
            flowrule_status_temp['id'] = flowrule['id']
            flowrule_status_temp['status'] = constants.STATUS_ERROR
            flowrule_status.append(flowrule_status_temp)
            LOG.exception(e)
            LOG.error(_LE("_update_flow_rules_with_mpls_enc failed"))
Exemple #44
0
    def create_flow_classifier(self, context, flow_classifier):
        with context.session.begin(subtransactions=True):
            fc_db = super(FlowClassifierPlugin,
                          self).create_flow_classifier(context,
                                                       flow_classifier)
            fc_db_context = fc_ctx.FlowClassifierContext(self, context, fc_db)
            self.driver_manager.create_flow_classifier_precommit(fc_db_context)

        try:
            self.driver_manager.create_flow_classifier(fc_db_context)
        except fc_exc.FlowClassifierDriverError as e:
            LOG.exception(e)
            with excutils.save_and_reraise_exception():
                LOG.error(
                    _LE("Create flow classifier failed, "
                        "deleting flow_classifier '%s'"), fc_db['id'])
                self.delete_flow_classifier(context, fc_db['id'])
        return fc_db
Exemple #45
0
    def delete_flow_classifier(self, context, fc_id):
        fc = self.get_flow_classifier(context, fc_id)
        fc_context = fc_ctx.FlowClassifierContext(self, context, fc)
        try:
            self.driver_manager.delete_flow_classifier(fc_context)
        except fc_exc.FlowClassfierDriverError as e:
            LOG.exception(e)
            with excutils.save_and_reraise_exception():
                LOG.error(_LE("Delete flow classifier failed, "
                              "flow_classifier '%s'"),
                          fc_id)

        with context.session.begin(subtransactions=True):
            fc = self.get_flow_classifier(context, fc_id)
            fc_context = fc_ctx.FlowClassifierContext(self, context, fc)
            super(FlowClassifierPlugin, self).delete_flow_classifier(
                context, fc_id)
            self.driver_manager.delete_flow_classifier_precommit(fc_context)
        self.driver_manager.delete_flow_classifier_postcommit(fc_context)
Exemple #46
0
    def update_flow_classifier(self, context, id, flow_classifier):
        original_flowclassifier = self.get_flow_classifier(context, id)
        updated_fc = super(
            FlowClassifierPlugin, self
        ).update_flow_classifier(
            context, id, flow_classifier)
        fc_db_context = fc_ctx.FlowClassifierContext(
            self, context, updated_fc,
            original_flowclassifier=original_flowclassifier)
        try:
            self.driver_manager.update_flow_classifier(fc_db_context)
        except fc_exc.FlowClassifierDriverError as e:
            LOG.exception(e)
            with excutils.save_and_reraise_exception():
                LOG.error(_LE("Update flow classifier failed, "
                              "flow_classifier '%s'"),
                          updated_fc['id'])

        return updated_fc
Exemple #47
0
    def update_flow_classifier(self, context, id, flow_classifier):
        original_flowclassifier = self.get_flow_classifier(context, id)
        updated_fc = super(FlowClassifierPlugin, self).update_flow_classifier(
            context, id, flow_classifier)
        fc_db_context = fc_ctx.FlowClassifierContext(
            self,
            context,
            updated_fc,
            original_flowclassifier=original_flowclassifier)
        try:
            self.driver_manager.update_flow_classifier(fc_db_context)
        except fc_exc.FlowClassifierDriverError as e:
            LOG.exception(e)
            with excutils.save_and_reraise_exception():
                LOG.error(
                    _LE("Update flow classifier failed, "
                        "flow_classifier '%s'"), updated_fc['id'])

        return updated_fc
Exemple #48
0
    def _delete_flow_rule_with_mpls_enc(self, flowrule, flowrule_status):
        try:
            LOG.debug("_delete_flow_rule_with_mpls_enc, flowrule = %s",
                      flowrule)

            # delete tunnel table flow rule on br-int(egress match)
            if flowrule['egress'] is not None:
                self._setup_local_switch_flows_on_int_br(
                    flowrule,
                    flowrule['del_fcs'],
                    None,
                    add_flow=False,
                    match_inport=True
                )
                # delete group table, need to check again
                group_id = flowrule.get('next_group_id', None)
                if group_id and flowrule.get('group_refcnt', None) <= 1:
                    self.int_br.delete_group(group_id=group_id)
                    for item in flowrule['next_hops']:
                        self.int_br.delete_flows(
                            table=ACROSS_SUBNET_TABLE,
                            dl_dst=item['mac_address'])

            if flowrule['ingress'] is not None:
                # delete table INGRESS_TABLE ingress match flow rule
                # on br-int(ingress match)
                vif_port = self.int_br.get_vif_port_by_id(flowrule['ingress'])
                if vif_port:
                    # third, install br-int flow rule on table INGRESS_TABLE
                    # for ingress traffic
                    self.int_br.delete_flows(
                        table=INGRESS_TABLE,
                        dl_type=0x8847,
                        dl_dst=vif_port.vif_mac,
                        mpls_label=flowrule['nsp'] << 8 | (flowrule['nsi'] + 1)
                    )
        except Exception as e:
            flowrule_status_temp = {}
            flowrule_status_temp['id'] = flowrule['id']
            flowrule_status_temp['status'] = constants.STATUS_ERROR
            flowrule_status.append(flowrule_status_temp)
            LOG.exception(e)
            LOG.error(_LE("_delete_flow_rule_with_mpls_enc failed"))
Exemple #49
0
    def _get_flow_infos_from_flow_classifier(self, flow_classifier):
        flow_infos = []
        nw_src, nw_dst = ((None, ) * 2)

        if "IPv4" != flow_classifier['ethertype']:
            LOG.error(_LE("Current portchain agent don't support Ipv6"))
            return flow_infos

        # parse and transfer flow info to match field info
        dl_type, nw_proto, source_port_masks, destination_port_masks = (
            self._parse_flow_classifier(flow_classifier))

        if flow_classifier['source_ip_prefix']:
            nw_src = flow_classifier['source_ip_prefix']
        else:
            nw_src = '0.0.0.0/0.0.0.0'
        if flow_classifier['destination_ip_prefix']:
            nw_dst = flow_classifier['destination_ip_prefix']
        else:
            nw_dst = '0.0.0.0/0.0.0.0'

        if source_port_masks and destination_port_masks:
            for destination_port in destination_port_masks:
                for source_port in source_port_masks:
                    if nw_proto is None:
                        flow_infos.append(dict(
                            dl_type=dl_type,
                            nw_src=nw_src,
                            nw_dst=nw_dst,
                            tp_src='%s' % source_port,
                            tp_dst='%s' % destination_port
                        ))
                    else:
                        flow_infos.append(dict(
                            dl_type=dl_type,
                            nw_proto=nw_proto,
                            nw_src=nw_src,
                            nw_dst=nw_dst,
                            tp_src='%s' % source_port,
                            tp_dst='%s' % destination_port
                        ))

        return flow_infos
Exemple #50
0
    def get_flow_classifier_by_portchain_id(self, context, portchain_id):
        try:
            flow_classifier_list = []
            sfc_plugin = (
                manager.NeutronManager.get_service_plugins().get(
                    sfc.SFC_EXT
                )
            )
            if not sfc_plugin:
                return []

            port_chain = sfc_plugin.get_port_chain(
                context,
                portchain_id)
            flow_classifier_list = self._get_portchain_fcs(port_chain)
            return flow_classifier_list
        except Exception as e:
            LOG.exception(e)
            LOG.error(_LE("get_flow_classifier_by_portchain_id failed"))
    def _get_flow_infos_from_flow_classifier(self, flow_classifier):
        flow_infos = []
        nw_src, nw_dst = ((None, ) * 2)

        if "IPv4" != flow_classifier['ethertype']:
            LOG.error(_LE("Current portchain agent don't support Ipv6"))
            return flow_infos

        # parse and transfer flow info to match field info
        dl_type, nw_proto, source_port_masks, destination_port_masks = (
            self._parse_flow_classifier(flow_classifier))

        if flow_classifier['source_ip_prefix']:
            nw_src = flow_classifier['source_ip_prefix']
        else:
            nw_src = '0.0.0.0/0.0.0.0'
        if flow_classifier['destination_ip_prefix']:
            nw_dst = flow_classifier['destination_ip_prefix']
        else:
            nw_dst = '0.0.0.0/0.0.0.0'

        if source_port_masks and destination_port_masks:
            for destination_port in destination_port_masks:
                for source_port in source_port_masks:
                    if nw_proto is None:
                        flow_infos.append(dict(
                            dl_type=dl_type,
                            nw_src=nw_src,
                            nw_dst=nw_dst,
                            tp_src='%s' % source_port,
                            tp_dst='%s' % destination_port
                        ))
                    else:
                        flow_infos.append(dict(
                            dl_type=dl_type,
                            nw_proto=nw_proto,
                            nw_src=nw_src,
                            nw_dst=nw_dst,
                            tp_src='%s' % source_port,
                            tp_dst='%s' % destination_port
                        ))

        return flow_infos
    def delete_flow_rule(self, flowrule, flowrule_status):
        try:
            LOG.debug("delete_flow_rule, flowrule = %s",
                      flowrule)

            # delete tunnel table flow rule on br-int(egress match)
            if flowrule['egress'] is not None:
                self._setup_local_switch_flows_on_int_br(
                    flowrule,
                    flowrule['del_fcs'],
                    None,
                    add_flow=False,
                    match_inport=True
                )
                # delete group table, need to check again
                group_id = flowrule.get('next_group_id', None)
                if group_id and flowrule.get('group_refcnt', None) <= 1:
                    self.br_int.delete_group(group_id=group_id)
                    for item in flowrule['next_hops']:
                        self.br_int.delete_flows(
                            table=ACROSS_SUBNET_TABLE,
                            dl_dst=item['mac_address'])

            if flowrule['ingress'] is not None:
                # delete table INGRESS_TABLE ingress match flow rule
                # on br-int(ingress match)
                vif_port = self.br_int.get_vif_port_by_id(flowrule['ingress'])
                if vif_port:
                    # third, install br-int flow rule on table INGRESS_TABLE
                    # for ingress traffic
                    self.br_int.delete_flows(
                        table=INGRESS_TABLE,
                        dl_type=0x8847,
                        dl_dst=vif_port.vif_mac,
                        mpls_label=flowrule['nsp'] << 8 | (flowrule['nsi'] + 1)
                    )
        except Exception as e:
            flowrule_status_temp = {}
            flowrule_status_temp['id'] = flowrule['id']
            flowrule_status_temp['status'] = constants.STATUS_ERROR
            flowrule_status.append(flowrule_status_temp)
            LOG.exception(e)
            LOG.error(_LE("delete_flow_rule failed"))
Exemple #53
0
    def create_port_chain(self, context, port_chain):
        with context.session.begin(subtransactions=True):
            port_chain_db = super(SfcPlugin,
                                  self).create_port_chain(context, port_chain)
            portchain_db_context = sfc_ctx.PortChainContext(
                self, context, port_chain_db)
            self.driver_manager.create_port_chain_precommit(
                portchain_db_context)
        try:
            self.driver_manager.create_port_chain_postcommit(
                portchain_db_context)
        except sfc_exc.SfcDriverError as e:
            LOG.exception(e)
            with excutils.save_and_reraise_exception():
                LOG.error(
                    _LE("Create port chain failed, "
                        "deleting port_chain '%s'"), port_chain_db['id'])
                self.delete_port_chain(context, port_chain_db['id'])

        return port_chain_db
Exemple #54
0
    def update_port_chain(self, context, portchain_id, port_chain):
        original_portchain = self.get_port_chain(context, portchain_id)
        updated_portchain = super(SfcPlugin, self).update_port_chain(
            context, portchain_id, port_chain)
        portchain_db_context = sfc_ctx.PortChainContext(
            self,
            context,
            updated_portchain,
            original_portchain=original_portchain)

        try:
            self.driver_manager.update_port_chain(portchain_db_context)
        except sfc_exc.SfcDriverError as e:
            LOG.exception(e)
            with excutils.save_and_reraise_exception():
                LOG.error(_LE("Update port chain failed, port_chain '%s'"),
                          updated_portchain['id'])

        # TODO(qijing): should we rollback the database update here?
        return updated_portchain
Exemple #55
0
    def create_port_chain(self, context, port_chain):
        with context.session.begin(subtransactions=True):
            port_chain_db = super(SfcPlugin, self).create_port_chain(
                context, port_chain)
            portchain_db_context = sfc_ctx.PortChainContext(
                self, context, port_chain_db)
            self.driver_manager.create_port_chain_precommit(
                portchain_db_context)
        try:
            self.driver_manager.create_port_chain_postcommit(
                portchain_db_context)
        except sfc_exc.SfcDriverError as e:
            LOG.exception(e)
            with excutils.save_and_reraise_exception():
                LOG.error(_LE("Create port chain failed, "
                              "deleting port_chain '%s'"),
                          port_chain_db['id'])
                self.delete_port_chain(context, port_chain_db['id'])

        return port_chain_db
Exemple #56
0
    def sfc_treat_devices_added_updated(self, port_id):
        resync = False
        flowrule_status = []
        try:
            LOG.debug("a new device %s is found", port_id)
            flows_list = (self.sfc_plugin_rpc.get_flowrules_by_host_portid(
                self.context, port_id))
            if flows_list:
                for flow in flows_list:
                    self._treat_update_flow_rules(flow, flowrule_status)
        except Exception as e:
            LOG.exception(e)
            LOG.error(_LE("portchain_treat_devices_added_updated failed"))
            resync = True

        if flowrule_status:
            self.sfc_plugin_rpc.update_flowrules_status(
                self.context, flowrule_status)

        return resync
Exemple #57
0
 def _delete_ports_flowrules_by_id(self, ports_id):
     flowrule_status = []
     try:
         LOG.debug("delete_port_id_flows received, ports_id= %s", ports_id)
         count = 0
         if ports_id:
             for port_id in ports_id:
                 flowrule = (
                     self.sfc_plugin_rpc.get_flowrules_by_host_portid(
                         self.context, port_id))
                 if flowrule:
                     self._treat_delete_flow_rules(flowrule,
                                                   flowrule_status)
         LOG.debug("_delete_ports_flowrules_by_id received, count= %s",
                   count)
     except Exception as e:
         LOG.exception(e)
         LOG.error(_LE("delete_port_id_flows failed"))
     if flowrule_status:
         self.sfc_plugin_rpc.update_flowrules_status(
             self.context, flowrule_status)
 def add_nsh_tunnel_port(self, port_name, remote_ip, local_ip,
                         tunnel_type=constants.TYPE_GRE,
                         vxlan_udp_port=constants.VXLAN_UDP_PORT,
                         dont_fragment=True,
                         in_nsp=None,
                         in_nsi=None):
     attrs = [('type', tunnel_type)]
     # This is an OrderedDict solely to make a test happy
     options = collections.OrderedDict()
     vxlan_uses_custom_udp_port = (
         tunnel_type == constants.TYPE_VXLAN and
         vxlan_udp_port != constants.VXLAN_UDP_PORT
     )
     if vxlan_uses_custom_udp_port:
         options['dst_port'] = vxlan_udp_port
     options['df_default'] = str(dont_fragment).lower()
     options['remote_ip'] = 'flow'
     options['local_ip'] = local_ip
     options['in_key'] = 'flow'
     options['out_key'] = 'flow'
     if in_nsp is not None and in_nsi is not None:
         options['nsp'] = str(in_nsp)
         options['nsi'] = str(in_nsi)
     elif in_nsp is None and in_nsi is None:
         options['nsp'] = 'flow'
         options['nsi'] = 'flow'
     attrs.append(('options', options))
     ofport = self.add_port(port_name, *attrs)
     if (
         tunnel_type == constants.TYPE_VXLAN and
         ofport == INVALID_OFPORT
     ):
         LOG.error(
             _LE('Unable to create VXLAN tunnel port for service chain. '
                 'Please ensure that an openvswitch version that supports '
                 'VXLAN for service chain is installed.')
         )
     return ofport
Exemple #59
0
 def _delete_ports_flowrules_by_id(self, context, ports_id):
     flowrule_status = []
     try:
         LOG.debug("delete_port_id_flows received, ports_id= %s", ports_id)
         count = 0
         if ports_id:
             for port_id in ports_id:
                 flowrule = (
                     self.sfc_plugin_rpc.get_flowrules_by_host_portid(
                         context, port_id
                     )
                 )
                 if flowrule:
                     self.sfc.driver.delete_flow_rule(
                         flowrule, flowrule_status)
         LOG.debug(
             "_delete_ports_flowrules_by_id received, count= %s", count)
     except Exception as e:
         LOG.exception(e)
         LOG.error(_LE("delete_port_id_flows failed"))
     if flowrule_status:
         self.sfc_plugin_rpc.update_flowrules_status(
             context, flowrule_status)
Exemple #60
0
    def delete_port_pair_group(self, context, portpairgroup_id):
        portpairgroup = self.get_port_pair_group(context, portpairgroup_id)
        portpairgroup_context = sfc_ctx.PortPairGroupContext(
            self, context, portpairgroup)
        try:
            self.driver_manager.delete_port_pair_group(portpairgroup_context)
        except sfc_exc.SfcDriverError as e:
            LOG.exception(e)
            with excutils.save_and_reraise_exception():
                LOG.error(_LE("Delete port pair group failed, "
                              "port_pair_group '%s'"),
                          portpairgroup_id)

        with context.session.begin(subtransactions=True):
            portpairgroup = self.get_port_pair_group(context, portpairgroup_id)
            portpairgroup_context = sfc_ctx.PortPairGroupContext(
                self, context, portpairgroup)
            super(SfcPlugin, self).delete_port_pair_group(context,
                                                          portpairgroup_id)
            self.driver_manager.delete_port_pair_group_precommit(
                portpairgroup_context)
        self.driver_manager.delete_port_pair_group_postcommit(
            portpairgroup_context)