예제 #1
0
    def is_fw_drvr_create_needed(self):
        """This API returns True if a driver init needs to be performed.

        This returns True if a FW is created with a active policy that has
        more than one rule associated with it and if a driver init is NOT
        done.
        """
        LOG.info(
            _LI("In Drvr create needed %(fw_created)s "
                "%(active_policy_id)s"
                " %(is_fw_drvr_created)s %(pol_present)s %(fw_type)s"), {
                    'fw_created': self.fw_created,
                    'active_policy_id': self.active_pol_id,
                    'is_fw_drvr_created': self.is_fw_drvr_created(),
                    'pol_present': self.active_pol_id in self.policies,
                    'fw_type': self.fw_type
                })
        if self.active_pol_id is not None and (self.active_pol_id
                                               in self.policies):
            LOG.info(
                _LI("In Drvr create needed %(len_policy)s %(one_rule)s"), {
                    'len_policy':
                    len(self.policies[self.active_pol_id]['rule_dict']),
                    'one_rule':
                    self.one_rule_present(self.active_pol_id)
                })
        return self.fw_created and self.active_pol_id and (
            not self.is_fw_drvr_created()) and self.fw_type and (
                self.active_pol_id in self.policies) and (len(
                    self.policies[self.active_pol_id]['rule_dict'])) > 0 and (
                        self.one_rule_present(self.active_pol_id))
예제 #2
0
    def clean_vrfs(self, conn, router_id_dict, parsed_cfg):
        ostk_router_ids = self.get_ostk_router_ids(router_id_dict)
        rconf_ids = self.get_running_config_router_ids(parsed_cfg)

        source_set = set(ostk_router_ids)
        dest_set = set(rconf_ids)

        # add_set = source_set.difference(dest_set)
        del_set = dest_set.difference(source_set)

        LOG.info(_LI("VRF DB set: %s"), (source_set))
        LOG.info(_LI("VRFs to delete: %s"), (del_set))
        # LOG.info("VRFs to add: %s" % (add_set))

        is_multi_region_enabled = cfg.CONF.multi_region.enable_multi_region
        invalid_vrfs = []
        for router_id in del_set:
            if (is_multi_region_enabled):
                my_region_id = cfg.CONF.multi_region.region_id
                invalid_vrfs.append("nrouter-%s-%s" %
                                    (router_id, my_region_id))
            else:
                invalid_vrfs.append("nrouter-%s" % (router_id))

        if not self.test_mode:
            for vrf_name in invalid_vrfs:
                confstr = asr_snippets.REMOVE_VRF_DEFN % vrf_name
                conn.edit_config(target='running', config=confstr)

        return invalid_vrfs
예제 #3
0
 def dfa_uplink_restart(self, uplink_dict):
     LOG.info(_LI("Obtained uplink after restart %s "), uplink_dict)
     # This shouldn't happen
     if self.phy_uplink is not None:
         LOG.error(_LE("Uplink detection already done %s"), self.phy_uplink)
         return
     uplink = uplink_dict.get('uplink')
     veth_intf = uplink_dict.get('veth_intf')
     # Logic is as follows:
     # If DB didn't have any uplink it means it's not yet detected or down
     # if DB has uplink and veth, then no need to scan all ports we can
     # start with this veth.
     # If uplink has been removed or modified during restart, then a
     # down will be returned by uplink detection code and it will be
     # removed then.
     # If DB has uplink, but no veth, it's an error condition and in
     # which case remove the uplink port from bridge and start fresh
     if uplink is None or len(uplink) == 0:
         LOG.info(_LI("uplink not discovered yet"))
         self.restart_uplink_called = True
         return
     if veth_intf is not None and len(veth_intf) != 0:
         LOG.info(_LI("veth interface is already added, %(ul)s %(veth)s"), {
             'ul': uplink,
             'veth': veth_intf
         })
         self.phy_uplink = uplink
         self.veth_intf = veth_intf
         self.restart_uplink_called = True
         return
     LOG.info(_LI("Error case removing the uplink %s from bridge"), uplink)
     ovs_vdp.delete_uplink_and_flows(self.root_helper, self.br_ex, uplink)
     self.restart_uplink_called = True
예제 #4
0
    def release_segmentation_id(self, seg_id):

        inside = any(lo <= seg_id <= hi for lo, hi in self.seg_id_ranges)
        session = db.get_session()
        with session.begin(subtransactions=True):
            query = session.query(self.model).filter_by(segmentation_id=seg_id)
            if inside:
                del_time = utils.utc_time(time.ctime())
                count = query.update({
                    "allocated": False,
                    "network_id": None,
                    "source": None,
                    "delete_time": del_time
                })
                if count:
                    LOG.info(
                        _LI("Releasing segmentation id %s to pool") % seg_id)
            else:
                count = query.delete()
                if count:
                    LOG.info(
                        _LI("Releasing segmentation_id %s outside pool") %
                        seg_id)

        if not count:
            LOG.info(_LI("segmentation_id %s not found") % seg_id)
    def delete_hosting_device_resources(self, context, tenant_id, mgmt_port,
                                        **kwargs):
        attempts = 1
        port_ids = set(p['id'] for p in kwargs['ports'])

        while mgmt_port is not None or port_ids:
            if attempts == DELETION_ATTEMPTS:
                LOG.warning(_LW('Aborting resource deletion after %d '
                                'unsuccessful attempts'), DELETION_ATTEMPTS)
                return
            else:
                if attempts > 1:
                    eventlet.sleep(SECONDS_BETWEEN_DELETION_ATTEMPTS)
                LOG.info(_LI('Resource deletion attempt %d starting'),
                         attempts)
            # Remove anything created.
            if mgmt_port is not None:
                ml = {mgmt_port['id']}
                self._delete_resources(context, "management port",
                                       self._core_plugin.delete_port,
                                       n_exc.PortNotFound, ml)
                if not ml:
                    mgmt_port = None
            self._delete_resources(context, "trunk port",
                                   self._core_plugin.delete_port,
                                   n_exc.PortNotFound, port_ids)
            attempts += 1
        self._safe_delete_t1_network(context, tenant_id)
        self._safe_delete_t2_network(context, tenant_id)
        LOG.info(_LI('Resource deletion succeeded'))
예제 #6
0
 def process_vm_event(self, msg, phy_uplink):
     LOG.info(_LI("In processing VM Event status %(status)s for MAC "
                  "%(mac)s UUID %(uuid)s oui %(oui)s"),
              {'status': msg.get_status(), 'mac': msg.get_mac(),
               'uuid': msg.get_port_uuid(), 'oui': msg.get_oui()})
     time.sleep(10)
     if msg.get_status() == 'up':
         res_fail = constants.CREATE_FAIL
     else:
         res_fail = constants.DELETE_FAIL
     if (not self.uplink_det_compl or
             phy_uplink not in self.ovs_vdp_obj_dict):
         LOG.error(_LE("Uplink Port Event not received yet"))
         self.update_vm_result(msg.get_port_uuid(), res_fail)
         return
     ovs_vdp_obj = self.ovs_vdp_obj_dict[phy_uplink]
     port_event_reply = ovs_vdp_obj.send_vdp_port_event(
         msg.get_port_uuid(), msg.get_mac(), msg.get_net_uuid(),
         msg.get_segmentation_id(), msg.get_status(), msg.get_oui())
     if not port_event_reply.get('result'):
         LOG.error(_LE("Error in VDP port event, Err Queue enq"))
         self.update_vm_result(
             msg.get_port_uuid(), res_fail,
             fail_reason=port_event_reply.get('fail_reason'))
     else:
         LOG.info(_LI("Success in VDP port event"))
         lvid, vdp_vlan = ovs_vdp_obj.get_lvid_vdp_vlan(msg.get_net_uuid(),
                                                        msg.get_port_uuid())
         self.update_vm_result(
             msg.get_port_uuid(), constants.RESULT_SUCCESS,
             lvid=lvid, vdp_vlan=vdp_vlan,
             fail_reason=port_event_reply.get('fail_reason'))
예제 #7
0
    def populate_local_cache(self):
        """This populates the local cache after reading the Database.

        It calls the appropriate rule create, fw create routines.
        It doesn't actually call the routine to prepare the fabric or cfg the
        device since it will be handled by retry module.
        """
        fw_dict = self.get_all_fw_db()
        LOG.info(_LI("Populating FW Mgr Local Cache"))
        for fw_id in fw_dict:
            fw_data = fw_dict.get(fw_id)
            tenant_id = fw_data.get('tenant_id')
            rule_dict = fw_data.get('rules').get('rules')
            policy_id = fw_data.get('rules').get('firewall_policy_id')
            for rule in rule_dict:
                fw_evt_data = self.convert_fwdb_event_msg(
                    rule_dict.get(rule), tenant_id, rule, policy_id)
                LOG.info(_LI("Populating Rules for tenant %s"), tenant_id)
                self.fw_rule_create(fw_evt_data, cache=True)
            fw_os_data = self.os_helper.get_fw(fw_id)
            # If enabler is stopped and FW is deleted, then the above routine
            # will fail.
            if fw_os_data is None:
                fw_os_data = self.convert_fwdb(tenant_id, fw_data.get('name'),
                                               policy_id, fw_id)
            LOG.info(_LI("Populating FW for tenant %s"), tenant_id)
            self.fw_create(fw_os_data, cache=True)
            if fw_data.get('device_status') == 'SUCCESS':
                self.fwid_attr[tenant_id].fw_drvr_created(True)
            else:
                self.fwid_attr[tenant_id].fw_drvr_created(False)
        return fw_dict
예제 #8
0
    def check_periodic_bulk_vm_notif_rcvd(self):
        """Bulk VM check handler called from periodic uplink detection.

        This gets called by the 'normal' stage of uplink detection.
        The bulk VM event sends all the VM's running in this agent.
        Sometimes during upgrades, it was found that due to some race
        condition, the server does not send the Bulk VM event.
        Whenever, a save_uplink is done by the agent, the server sends
        the Bulk VM event.
        If Bulk VM event is not received after few attempts, save_uplink is
        done to request the Bulk VM list.
        It's not protected with a mutex, since worst case,
        Bulk VM event will be sent twice, which is not that bad. When
        uplink is detected for the first time, it will hit the below
        else case and there a save_uplink is anyways done.
        """
        if not self.bulk_vm_rcvd_flag:
            if self.bulk_vm_check_cnt >= 1:
                self.bulk_vm_check_cnt = 0
                self.save_uplink(uplink=self.phy_uplink,
                                 veth_intf=self.veth_intf)
                LOG.info(_LI("Doing save_uplink again to request "
                             "Bulk VM's"))
            else:
                LOG.info(_LI("Bulk VM not received, incrementing count"))
                self.bulk_vm_check_cnt += 1
예제 #9
0
    def delete_hosting_device_resources(self, context, tenant_id, mgmt_port,
                                        **kwargs):
        attempts = 1
        port_ids = set(p['id'] for p in kwargs['ports'])

        while mgmt_port is not None or port_ids:
            if attempts == DELETION_ATTEMPTS:
                LOG.warning(_LW('Aborting resource deletion after %d '
                                'unsuccessful attempts'), DELETION_ATTEMPTS)
                return
            else:
                if attempts > 1:
                    eventlet.sleep(SECONDS_BETWEEN_DELETION_ATTEMPTS)
                LOG.info(_LI('Resource deletion attempt %d starting'),
                         attempts)
            # Remove anything created.
            if mgmt_port is not None:
                ml = {mgmt_port['id']}
                self._delete_resources(context, "management port",
                                       self._core_plugin.delete_port,
                                       n_exc.PortNotFound, ml)
                if not ml:
                    mgmt_port = None
            self._delete_resources(context, "trunk port",
                                   self._core_plugin.delete_port,
                                   n_exc.PortNotFound, port_ids)
            attempts += 1
        self._safe_delete_t1_network(context, tenant_id)
        self._safe_delete_t2_network(context, tenant_id)
        LOG.info(_LI('Resource deletion succeeded'))
예제 #10
0
    def populate_local_cache(self):
        """This populates the local cache after reading the Database.

        It calls the appropriate rule create, fw create routines.
        It doesn't actually call the routine to prepare the fabric or cfg the
        device since it will be handled by retry module.
        """
        fw_dict = self.get_all_fw_db()
        LOG.info(_LI("Populating FW Mgr Local Cache"))
        for fw_id in fw_dict:
            fw_data = fw_dict.get(fw_id)
            tenant_id = fw_data.get('tenant_id')
            rule_dict = fw_data.get('rules').get('rules')
            policy_id = fw_data.get('rules').get('firewall_policy_id')
            for rule in rule_dict:
                fw_evt_data = self.convert_fwdb_event_msg(rule_dict.get(rule),
                                                          tenant_id, rule,
                                                          policy_id)
                LOG.info(_LI("Populating Rules for tenant %s"), tenant_id)
                self.fw_rule_create(fw_evt_data, cache=True)
            fw_os_data = self.os_helper.get_fw(fw_id)
            # If enabler is stopped and FW is deleted, then the above routine
            # will fail.
            if fw_os_data is None:
                fw_os_data = self.convert_fwdb(tenant_id, fw_data.get('name'),
                                               policy_id, fw_id)
            LOG.info(_LI("Populating FW for tenant %s"), tenant_id)
            self.fw_create(fw_os_data, cache=True)
            if fw_data.get('device_status') == 'SUCCESS':
                self.fwid_attr[tenant_id].fw_drvr_created(True)
            else:
                self.fwid_attr[tenant_id].fw_drvr_created(False)
        return fw_dict
예제 #11
0
 def process_uplink_event(self, msg, phy_uplink):
     LOG.info(_LI("Received New uplink Msg %(msg)s for uplink %(uplink)s"),
              {'msg': msg.get_status(), 'uplink': phy_uplink})
     if msg.get_status() == 'up':
         ovs_exc_raised = False
         try:
             self.ovs_vdp_obj_dict[phy_uplink] = ovs_vdp.OVSNeutronVdp(
                 phy_uplink, msg.get_integ_br(), msg.get_ext_br(),
                 msg.get_root_helper())
         except Exception as exc:
             LOG.error(_LE("OVS VDP Object creation failed %s"), str(exc))
             ovs_exc_raised = True
         if (ovs_exc_raised or not self.ovs_vdp_obj_dict[phy_uplink].
                 is_lldpad_setup_done()):
             # Is there a way to delete the object??
             LOG.error(_LE("UP Event Processing NOT Complete"))
             self.err_que.enqueue(constants.Q_UPL_PRIO, msg)
         else:
             self.uplink_det_compl = True
             veth_intf = (self.ovs_vdp_obj_dict[self.phy_uplink].
                          get_lldp_bridge_port())
             LOG.info(_LI("UP Event Processing Complete Saving uplink "
                          "%(ul)s and veth %(veth)s"),
                      {'ul': self.phy_uplink, 'veth': veth_intf})
             self.save_uplink(uplink=self.phy_uplink, veth_intf=veth_intf)
     elif msg.get_status() == 'down':
         # Free the object fixme(padkrish)
         if phy_uplink in self.ovs_vdp_obj_dict:
             self.ovs_vdp_obj_dict[phy_uplink].clear_obj_params()
         else:
             ovs_vdp.delete_uplink_and_flows(self.root_helper, self.br_ex,
                                             phy_uplink)
         self.save_uplink()
    def clean_vrfs(self, conn, router_id_dict, parsed_cfg):
        ostk_router_ids = self.get_ostk_router_ids(router_id_dict)
        rconf_ids = self.get_running_config_router_ids(parsed_cfg)

        source_set = set(ostk_router_ids)
        dest_set = set(rconf_ids)

        # add_set = source_set.difference(dest_set)
        del_set = dest_set.difference(source_set)

        LOG.info(_LI("VRF DB set: %s"), (source_set))
        LOG.info(_LI("VRFs to delete: %s"), (del_set))
        # LOG.info("VRFs to add: %s" % (add_set))

        is_multi_region_enabled = cfg.CONF.multi_region.enable_multi_region
        invalid_vrfs = []
        for router_id in del_set:
            if (is_multi_region_enabled):
                my_region_id = cfg.CONF.multi_region.region_id
                invalid_vrfs.append("nrouter-%s-%s" % (router_id,
                                                       my_region_id))
            else:
                invalid_vrfs.append("nrouter-%s" % (router_id))

        if not self.test_mode:
            for vrf_name in invalid_vrfs:
                confstr = asr_snippets.REMOVE_VRF_DEFN % vrf_name
                conn.edit_config(target='running', config=confstr)

        return invalid_vrfs
    def get_running_config_router_ids(self, parsed_cfg):
        rconf_ids = []
        is_multi_region_enabled = cfg.CONF.multi_region.enable_multi_region

        if (is_multi_region_enabled):
            vrf_regex_new = VRF_MULTI_REGION_REGEX_NEW
        else:
            vrf_regex_new = VRF_REGEX_NEW

        for parsed_obj in parsed_cfg.find_objects(vrf_regex_new):
            LOG.info(_LI("VRF object: %s"), (str(parsed_obj)))
            match_obj = re.match(vrf_regex_new, parsed_obj.text)
            router_id = match_obj.group(1)
            LOG.info(_LI("    First 6 digits of router ID: %s\n"),
                        (router_id))
            if (is_multi_region_enabled):
                region_id = match_obj.group(2)
                LOG.info(_LI("    region ID: %s\n"),
                            (region_id))
                my_region_id = cfg.CONF.multi_region.region_id
                if (my_region_id == region_id):
                    rconf_ids.append(router_id)
            else:
                rconf_ids.append(router_id)

        return rconf_ids
예제 #14
0
    def is_fw_drvr_create_needed(self):
        """This API returns True if a driver init needs to be performed.

        This returns True if a FW is created with a active policy that has
        more than one rule associated with it and if a driver init is NOT
        done.
        """
        LOG.info(_LI("In Drvr create needed %(fw_created)s "
                     "%(active_policy_id)s"
                     " %(is_fw_drvr_created)s %(pol_present)s %(fw_type)s"),
                 {'fw_created': self.fw_created,
                  'active_policy_id': self.active_pol_id,
                  'is_fw_drvr_created': self.is_fw_drvr_created(),
                  'pol_present': self.active_pol_id in self.policies,
                  'fw_type': self.fw_type})
        if self.active_pol_id is not None and (
           self.active_pol_id in self.policies):
            LOG.info(_LI("In Drvr create needed %(len_policy)s %(one_rule)s"),
                     {'len_policy':
                      len(self.policies[self.active_pol_id]['rule_dict']),
                      'one_rule':
                      self.one_rule_present(self.active_pol_id)})
        return self.fw_created and self.active_pol_id and (
            not self.is_fw_drvr_created()) and self.fw_type and (
            self.active_pol_id in self.policies) and (
            len(self.policies[self.active_pol_id]['rule_dict'])) > 0 and (
            self.one_rule_present(self.active_pol_id))
예제 #15
0
    def is_fw_complete(self):
        """This API returns the completion status of FW.

        This returns True if a FW is created with a active policy that has
        more than one rule associated with it and if a driver init is done
        successfully.
        """
        LOG.info(_LI("In fw_complete needed %(fw_created)s "
                     "%(active_policy_id)s %(is_fw_drvr_created)s "
                     "%(pol_present)s %(fw_type)s"),
                 {'fw_created': self.fw_created,
                  'active_policy_id': self.active_pol_id,
                  'is_fw_drvr_created': self.is_fw_drvr_created(),
                  'pol_present': self.active_pol_id in self.policies,
                  'fw_type': self.fw_type})
        if self.active_pol_id is not None:
            LOG.info(_LI("In Drvr create needed %(len_policy)s %(one_rule)s"),
                     {'len_policy':
                      len(self.policies[self.active_pol_id]['rule_dict']),
                      'one_rule':
                      self.one_rule_present(self.active_pol_id)})
        return self.fw_created and self.active_pol_id and (
            self.is_fw_drvr_created()) and self.fw_type and (
            self.active_pol_id in self.policies) and (
            len(self.policies[self.active_pol_id]['rule_dict'])) > 0 and (
            self.one_rule_present(self.active_pol_id))
예제 #16
0
    def release_subnet(self, subnet_address):

        subnet_addr_int = int(netaddr.IPAddress(subnet_address))
        inside = any(lo <= subnet_addr_int <= hi
                     for lo, hi in self.subnet_ranges)
        session = db.get_session()
        with session.begin(subtransactions=True):
            query = session.query(
                self.model).filter_by(subnet_address=subnet_address)
            if inside:
                count = query.update({
                    "allocated": False,
                    "network_id": None,
                    "subnet_id": None
                })
                if count:
                    LOG.info(
                        _LI("Releasing subnet id %s to pool") % subnet_address)
            else:
                count = query.delete()
                if count:
                    LOG.info(
                        _LI("Releasing subnet %s outside pool") %
                        subnet_address)

        if not count:
            LOG.info(_LI("subnet %s not found") % subnet_address)
예제 #17
0
 def dfa_uplink_restart(self, uplink_dict):
     LOG.info(_LI("Obtained uplink after restart %s "), uplink_dict)
     # This shouldn't happen
     if self.phy_uplink is not None:
         LOG.error(_LE("Uplink detection already done %s"), self.phy_uplink)
         return
     uplink = uplink_dict.get('uplink')
     veth_intf = uplink_dict.get('veth_intf')
     # Logic is as follows:
     # If DB didn't have any uplink it means it's not yet detected or down
     # if DB has uplink and veth, then no need to scan all ports we can
     # start with this veth.
     # If uplink has been removed or modified during restart, then a
     # down will be returned by uplink detection code and it will be
     # removed then.
     # If DB has uplink, but no veth, it's an error condition and in
     # which case remove the uplink port from bridge and start fresh
     if uplink is None or len(uplink) == 0:
         LOG.info(_LI("uplink not discovered yet"))
         self.restart_uplink_called = True
         return
     if veth_intf is not None and len(veth_intf) != 0:
         LOG.info(_LI("veth interface is already added, %(ul)s %(veth)s"),
                  {'ul': uplink, 'veth': veth_intf})
         self.phy_uplink = uplink
         self.veth_intf = veth_intf
         self.restart_uplink_called = True
         return
     LOG.info(_LI("Error case removing the uplink %s from bridge"), uplink)
     ovs_vdp.delete_uplink_and_flows(self.root_helper, self.br_ex, uplink)
     self.restart_uplink_called = True
예제 #18
0
    def is_fw_complete(self):
        """This API returns the completion status of FW.

        This returns True if a FW is created with a active policy that has
        more than one rule associated with it and if a driver init is done
        successfully.
        """
        LOG.info(
            _LI("In fw_complete needed %(fw_created)s "
                "%(active_policy_id)s %(is_fw_drvr_created)s "
                "%(pol_present)s %(fw_type)s"), {
                    'fw_created': self.fw_created,
                    'active_policy_id': self.active_pol_id,
                    'is_fw_drvr_created': self.is_fw_drvr_created(),
                    'pol_present': self.active_pol_id in self.policies,
                    'fw_type': self.fw_type
                })
        if self.active_pol_id is not None:
            LOG.info(
                _LI("In Drvr create needed %(len_policy)s %(one_rule)s"), {
                    'len_policy':
                    len(self.policies[self.active_pol_id]['rule_dict']),
                    'one_rule':
                    self.one_rule_present(self.active_pol_id)
                })
        return self.fw_created and self.active_pol_id and (
            self.is_fw_drvr_created()) and self.fw_type and (
                self.active_pol_id in self.policies) and (len(
                    self.policies[self.active_pol_id]['rule_dict'])) > 0 and (
                        self.one_rule_present(self.active_pol_id))
예제 #19
0
    def _set_default_mobility_domain(self):
        settings = self._get_settings()
        LOG.info(_LI("settings is %s") % settings)

        if ('globalMobilityDomain' in settings.keys()):
            global_md = settings.get('globalMobilityDomain')
            self._default_md = global_md.get('name')
            LOG.info(_LI("setting default md to be %s") % self._default_md)
        else:
            self._default_md = "md0"
    def _set_default_mobility_domain(self):
        settings = self._get_settings()
        LOG.info(_LI("settings is %s") % settings)

        if ('globalMobilityDomain' in settings.keys()):
            global_md = settings.get('globalMobilityDomain')
            self._default_md = global_md.get('name')
            LOG.info(_LI("setting default md to be %s") % self._default_md)
        else:
            self._default_md = "md0"
예제 #21
0
 def process_err_queue(self):
     LOG.info(_LI("Entered Err process_q"))
     try:
         while self.err_que.is_not_empty():
             prio, msg = self.err_que.dequeue_nonblock()
             msg_type = msg.msg_type
             LOG.info(_LI("Msg dequeued from err queue type is %d"), msg_type)
             if msg_type == constants.UPLINK_MSG_TYPE:
                 self.que.enqueue(constants.Q_UPL_PRIO, msg)
     except Exception as e:
         LOG.exceptin(_LE("Exception caught in proc_err_que %s "), str(e))
예제 #22
0
 def process_err_queue(self):
     LOG.info(_LI("Entered Err process_q"))
     try:
         while self.err_que.is_not_empty():
             prio, msg = self.err_que.dequeue_nonblock()
             msg_type = msg.msg_type
             LOG.info(_LI("Msg dequeued from err queue type is %d"),
                      msg_type)
             if msg_type == constants.UPLINK_MSG_TYPE:
                 self.que.enqueue(constants.Q_UPL_PRIO, msg)
     except Exception as e:
         LOG.exceptin(_LE("Exception caught in proc_err_que %s "), str(e))
    def clean_acls(self, conn, intf_segment_dict,
                   segment_nat_dict, parsed_cfg):

        delete_acl_list = []
        is_multi_region_enabled = cfg.CONF.multi_region.enable_multi_region
        if (is_multi_region_enabled):
            acl_regex = ACL_MULTI_REGION_REGEX
        else:
            acl_regex = ACL_REGEX

        acls = parsed_cfg.find_objects(acl_regex)
        for acl in acls:
            LOG.info(_LI("\nacl: %(acl)s") % {'acl': acl})
            match_obj = re.match(acl_regex, acl.text)

            if (is_multi_region_enabled):
                region_id = match_obj.group(1)
                segment_id = match_obj.group(2)
                if region_id != cfg.CONF.multi_region.region_id:
                    if region_id not in cfg.CONF.multi_region.other_region_ids:
                        delete_acl_list.append(acl.text)
                    else:
                        # skip because some other deployment owns
                        # this configuration
                        continue
            else:
                segment_id = match_obj.group(1)
            segment_id = int(segment_id)
            LOG.info(_LI("   segment_id: %(seg_id)s") % {'seg_id': segment_id})

            # Check that segment_id exists in openstack DB info
            if segment_id not in intf_segment_dict:
                LOG.info(_LI("Segment ID not found, deleting acl"))
                delete_acl_list.append(acl.text)
                continue

            # Check that permit rules match subnets defined on openstack intfs
            if self.check_acl_permit_rules_valid(segment_id,
                                                 acl,
                                                 intf_segment_dict) is False:
                delete_acl_list.append(acl.text)
                continue

            self.existing_cfg_dict['acls'][segment_id] = acl

        if not self.test_mode:
            for acl_cfg in delete_acl_list:
                del_cmd = XML_CMD_TAG % ("no %s" % (acl_cfg))
                confstr = XML_FREEFORM_SNIPPET % (del_cmd)
                LOG.info(_LI("Delete ACL: %(del_cmd)s") % {'del_cmd': del_cmd})
                conn.edit_config(target='running', config=confstr)

        return delete_acl_list
예제 #24
0
    def clean_acls(self, conn, intf_segment_dict, segment_nat_dict,
                   parsed_cfg):

        delete_acl_list = []
        is_multi_region_enabled = cfg.CONF.multi_region.enable_multi_region
        if (is_multi_region_enabled):
            acl_regex = ACL_MULTI_REGION_REGEX
        else:
            acl_regex = ACL_REGEX

        acls = parsed_cfg.find_objects(acl_regex)
        for acl in acls:
            LOG.info(_LI("\nacl: %(acl)s") % {'acl': acl})
            match_obj = re.match(acl_regex, acl.text)

            if (is_multi_region_enabled):
                region_id = match_obj.group(1)
                segment_id = match_obj.group(2)
                if region_id != cfg.CONF.multi_region.region_id:
                    if region_id not in cfg.CONF.multi_region.other_region_ids:
                        delete_acl_list.append(acl.text)
                    else:
                        # skip because some other deployment owns
                        # this configuration
                        continue
            else:
                segment_id = match_obj.group(1)
            segment_id = int(segment_id)
            LOG.info(_LI("   segment_id: %(seg_id)s") % {'seg_id': segment_id})

            # Check that segment_id exists in openstack DB info
            if segment_id not in intf_segment_dict:
                LOG.info(_LI("Segment ID not found, deleting acl"))
                delete_acl_list.append(acl.text)
                continue

            # Check that permit rules match subnets defined on openstack intfs
            if self.check_acl_permit_rules_valid(segment_id, acl,
                                                 intf_segment_dict) is False:
                delete_acl_list.append(acl.text)
                continue

            self.existing_cfg_dict['acls'][segment_id] = acl

        if not self.test_mode:
            for acl_cfg in delete_acl_list:
                del_cmd = XML_CMD_TAG % ("no %s" % (acl_cfg))
                confstr = XML_FREEFORM_SNIPPET % (del_cmd)
                LOG.info(_LI("Delete ACL: %(del_cmd)s") % {'del_cmd': del_cmd})
                conn.edit_config(target='running', config=confstr)

        return delete_acl_list
    def subintf_hsrp_ip_check(self, intf_list, is_external, ip_addr):
        for target_intf in intf_list:
            ha_intf = target_intf['ha_info']['ha_port']
            target_ip = ha_intf['fixed_ips'][0]['ip_address']
            LOG.info(_LI("target_ip: %(target_ip)s, actual_ip: %(ip_addr)s") %
                     {'target_ip': target_ip,
                      'ip_addr': ip_addr})
            if ip_addr != target_ip:
                LOG.info(_LI("HSRP VIP mismatch, deleting"))
                return False

            return True

        return False
예제 #26
0
 def process_uplink_event(self, msg, phy_uplink):
     LOG.info(_LI("Received New uplink Msg %(msg)s for uplink %(uplink)s"),
              {'msg': msg.get_status(), 'uplink': phy_uplink})
     if msg.get_status() == 'up':
         ovs_exc_raised = False
         ovs_exc_reason = ""
         try:
             self.ovs_vdp_obj_dict[phy_uplink] = ovs_vdp.OVSNeutronVdp(
                 phy_uplink, msg.get_integ_br(), msg.get_ext_br(),
                 msg.get_root_helper(), self.vdp_vlan_change_cb)
         except Exception as exc:
             ovs_exc_reason = str(exc)
             LOG.error(_LE("OVS VDP Object creation failed %s"),
                       str(ovs_exc_reason))
             ovs_exc_raised = True
         if (ovs_exc_raised or not self.ovs_vdp_obj_dict[phy_uplink].
                 is_lldpad_setup_done()):
             # Is there a way to delete the object??
             if not ovs_exc_reason:
                 uplink_fail_reason = (self.ovs_vdp_obj_dict[phy_uplink].
                                       get_uplink_fail_reason())
             else:
                 uplink_fail_reason = ovs_exc_reason
             LOG.error(_LE("UP Event Processing NOT Complete"))
             self.err_que.enqueue(constants.Q_UPL_PRIO, msg)
             self.save_uplink(uplink=self.phy_uplink,
                              fail_reason=uplink_fail_reason)
         else:
             self.uplink_det_compl = True
             veth_intf = (self.ovs_vdp_obj_dict[self.phy_uplink].
                          get_lldp_local_bridge_port())
             LOG.info(_LI("UP Event Processing Complete Saving uplink "
                          "%(ul)s and veth %(veth)s"),
                      {'ul': self.phy_uplink, 'veth': veth_intf})
             self.save_uplink(uplink=self.phy_uplink, veth_intf=veth_intf)
             self.topo_disc.uncfg_intf(self.phy_uplink)
             self.topo_disc.cfg_intf(veth_intf,
                                     phy_interface=self.phy_uplink)
     elif msg.get_status() == 'down':
         # Free the object fixme(padkrish)
         if phy_uplink in self.ovs_vdp_obj_dict:
             self.ovs_vdp_obj_dict[phy_uplink].clear_obj_params()
         else:
             ovs_vdp.delete_uplink_and_flows(self.root_helper, self.br_ex,
                                             phy_uplink)
         self.save_uplink()
         self.topo_disc.uncfg_intf(self.veth_intf)
         self.topo_disc.cfg_intf(phy_uplink)
예제 #27
0
    def apply_policy(self, policy):
        """Apply a firewall policy. """
        tenant_name = policy['tenant_name']
        fw_id = policy['fw_id']
        fw_name = policy['fw_name']
        LOG.info(
            _LI("asa_apply_policy: tenant=%(tenant)s fw_id=%(fw_id)s "
                "fw_name=%(fw_name)s"), {
                    'tenant': tenant_name,
                    'fw_id': fw_id,
                    'fw_name': fw_name
                })
        cmds = ["conf t", "changeto context " + tenant_name]

        for rule_id, rule in policy['rules'].items():
            acl = self.build_acl(tenant_name, rule)

            LOG.info(
                _LI("rule[%(rule_id)s]: name=%(name)s enabled=%(enabled)s"
                    " protocol=%(protocol)s dport=%(dport)s "
                    "sport=%(sport)s dip=%(dport)s "
                    "sip=%(sip)s action=%(dip)s"), {
                        'rule_id': rule_id,
                        'name': rule.get('name'),
                        'enabled': rule.get('enabled'),
                        'protocol': rule.get('protocol'),
                        'dport': rule.get('dst_port'),
                        'sport': rule.get('src_port'),
                        'dip': rule.get('destination_ip_address'),
                        'sip': rule.get('source_ip_address'),
                        'action': rule.get('action')
                    })

            # remove the old ace for this rule
            if rule_id in self.rule_tbl:
                cmds.append('no ' + self.rule_tbl[rule_id])

            self.rule_tbl[rule_id] = acl
            if tenant_name in self.tenant_rule:
                if rule_id not in self.tenant_rule[tenant_name]['rule_lst']:
                    self.tenant_rule[tenant_name]['rule_lst'].append(rule_id)
            cmds.append(acl)
        cmds.append("access-group " + tenant_name + " global")
        cmds.append("write memory")

        LOG.info(_LI("cmds sent is %s"), cmds)
        data = {"commands": cmds}
        return self.rest_send_cli(data)
예제 #28
0
    def cleanup(self, **kwargs):
        """cleanup ASA context for an edge tenant pair. """
        params = kwargs.get('params')
        LOG.info(_LI("asa_cleanup: tenant %(tenant)s %(in_vlan)d %(out_vlan)d"
                     " %(in_ip)s %(in_mask)s %(out_ip)s %(out_mask)s"),
                 {'tenant': params.get('tenant_name'),
                  'in_vlan': params.get('in_vlan'),
                  'out_vlan': params.get('out_vlan'),
                  'in_ip': params.get('in_ip'),
                  'in_mask': params.get('in_mask'),
                  'out_ip': params.get('out_ip'),
                  'out_mask': params.get('out_mask')})
        inside_vlan = str(params.get('in_vlan'))
        outside_vlan = str(params.get('out_vlan'))
        context = params.get('tenant_name')
        cmds = ["conf t", "changeto system"]
        cmds.append("no context " + context + " noconfirm")
        inside_int = params.get('intf_in') + '.' + inside_vlan
        outside_int = params.get('intf_out') + '.' + outside_vlan
        cmds.append("no interface " + inside_int)
        cmds.append("no interface " + outside_int)
        cmds.append("write memory")
        cmds.append("del /noconfirm disk0:/" + context + ".cfg")

        if context in self.tenant_rule:
            for rule in self.tenant_rule[context].get('rule_lst'):
                del self.rule_tbl[rule]
            del self.tenant_rule[context]
        data = {"commands": cmds}
        return self.rest_send_cli(data)
    def _send_request(self, operation, url, payload, desc):
        """Send request to DCNM."""

        res = None
        try:
            payload_json = None
            if payload and payload != '':
                payload_json = jsonutils.dumps(payload)
            self._login()
            desc_lookup = {'POST': ' creation', 'PUT': ' update',
                           'DELETE': ' deletion', 'GET': ' get'}

            res = requests.request(operation, url, data=payload_json,
                                   headers=self._req_headers,
                                   timeout=self.timeout_resp)
            desc += desc_lookup.get(operation, operation.lower())
            LOG.info(_LI("DCNM-send_request: %(desc)s %(url)s %(pld)s"),
                     {'desc': desc, 'url': url, 'pld': payload})

            self._logout()
        except (requests.HTTPError, requests.Timeout,
                requests.ConnectionError) as exc:
            LOG.exception(_LE('Error during request: %s'), exc)
            raise dexc.DfaClientRequestFailed(reason=exc)

        return res
예제 #30
0
 def update_port_postcommit(self, context):
     """Send port parameters to the VSM."""
     port = context.current
     old_port = context.original
     # Perform port update on VSM only if a router or DHCP port is bound.
     if (not old_port['binding:host_id'] and
             (port['device_owner'] in [
                 bc.constants.DEVICE_OWNER_ROUTER_INTF,
                 bc.constants.DEVICE_OWNER_DHCP])):
         session = context._plugin_context.session
         binding = n1kv_db.get_policy_binding(port['id'], session)
         policy_profile = n1kv_db.get_policy_profile_by_uuid(
             session, binding.profile_id)
         if policy_profile is None:
             raise ml2_exc.MechanismDriverError()
         vmnetwork_name = "%s%s_%s" % (n1kv_const.VM_NETWORK_PREFIX,
                                       binding.profile_id,
                                       port['network_id'])
         try:
             # Today an update is just a create, so we call create port
             self.n1kvclient.create_n1kv_port(port,
                                              vmnetwork_name,
                                              policy_profile)
         except(n1kv_exc.VSMError, n1kv_exc.VSMConnectionFailed):
             raise ml2_exc.MechanismDriverError()
         LOG.info(_LI("Update port(postcommit) succeeded for port: "
                      "%(id)s on network: %(network_id)s with policy "
                      "profile ID: %(profile_id)s"),
                  {"network_id": port['network_id'],
                   "id": port['id'],
                   "profile_id": policy_profile.id})
예제 #31
0
    def _set_nat_pool(self, ri, gw_port, is_delete):
        vrf_name = self._get_vrf_name(ri)
        if ri.router.get(ROUTER_ROLE_ATTR) == ROUTER_ROLE_HA_REDUNDANCY:
            the_port = gw_port[ha.HA_INFO]['ha_port']
        else:
            the_port = gw_port
        subnet_id = gw_port['ip_info']['subnet_id']
        fixed_ip = self._get_item(the_port['fixed_ips'], subnet_id)
        pool_ip = fixed_ip['ip_address']
        pool_ip_prefix_len = fixed_ip['prefixlen']
        #TODO(ebobmel) We need to modify the pool name generation if we
        #              will have multiple NAT pools per VRF
        pool_name = "%s_nat_pool" % (vrf_name)
        pool_net = "%s/%s" % (pool_ip, pool_ip_prefix_len)
        pool_net = netaddr.IPNetwork(pool_net)
        if self._fullsync and pool_ip in self._existing_cfg_dict['pools']:
            LOG.info(_LI("Pool already exists, skipping"))
            return

        try:
            if is_delete:
                conf_str = asr1k_snippets.DELETE_NAT_POOL % (
                    pool_name, pool_ip, pool_ip, pool_net.netmask)
                # TODO(update so that hosting device name is passed down)
                self._edit_running_config(conf_str, 'DELETE_NAT_POOL')
            else:
                conf_str = asr1k_snippets.CREATE_NAT_POOL % (
                    pool_name, pool_ip, pool_ip, pool_net.netmask)
                # TODO(update so that hosting device name is passed down)
                self._edit_running_config(conf_str, 'CREATE_NAT_POOL')
        #except cfg_exc.CSR1kvConfigException as cse:
        except Exception as cse:
            LOG.error(
                _LE("Temporary disable NAT_POOL exception handling: "
                    "%s"), cse)
예제 #32
0
    def _create_svc_vm_hosting_devices(self, context, num, template):
        """Creates <num> or less service VM instances based on <template>.

        These hosting devices can be bound to a certain tenant or for shared
        use. A list with the created hosting device VMs is returned.
        """
        hosting_devices = []
        template_id = template['id']
        credentials_id = template['default_credentials_id']
        plugging_drv = self.get_hosting_device_plugging_driver(context,
                                                               template_id)
        hosting_device_drv = self.get_hosting_device_driver(context,
                                                            template_id)
        if plugging_drv is None or hosting_device_drv is None or num <= 0:
            return hosting_devices
        #TODO(bobmel): Determine value for max_hosted properly
        max_hosted = 1  # template['slot_capacity']
        dev_data, mgmt_context = self._get_resources_properties_for_hd(
            template, credentials_id)
        credentials_info = self._credentials.get(credentials_id)
        if credentials_info is None:
            LOG.error(_LE('Could not find credentials for hosting device'
                          'template %s. Aborting VM hosting device creation.'),
                      template_id)
            return hosting_devices
        connectivity_info = self._get_mgmt_connectivity_info(
            context, self.mgmt_subnet_id())
        for i in range(num):
            complementary_id = uuidutils.generate_uuid()
            res = plugging_drv.create_hosting_device_resources(
                context, complementary_id, self.l3_tenant_id(), mgmt_context,
                max_hosted)
            if res.get('mgmt_port') is None:
                # Required ports could not be created
                return hosting_devices
            connectivity_info['mgmt_port'] = res['mgmt_port']
            vm_instance = self.svc_vm_mgr.dispatch_service_vm(
                context, template['name'] + '_nrouter', template['image'],
                template['flavor'], hosting_device_drv, credentials_info,
                connectivity_info, res.get('ports'))
            if vm_instance is not None:
                dev_data.update(
                    {'id': vm_instance['id'],
                     'complementary_id': complementary_id,
                     'management_ip_address': res['mgmt_port'][
                         'fixed_ips'][0]['ip_address'],
                     'management_port_id': res['mgmt_port']['id']})
                self.create_hosting_device(context,
                                           {'hosting_device': dev_data})
                hosting_devices.append(vm_instance)
            else:
                # Fundamental error like could not contact Nova
                # Cleanup anything we created
                plugging_drv.delete_hosting_device_resources(
                    context, self.l3_tenant_id(), **res)
                break
        LOG.info(_LI('Created %(num)d hosting device VMs based on template '
                     '%(t_id)s'), {'num': len(hosting_devices),
                                   't_id': template_id})
        return hosting_devices
예제 #33
0
    def _create_fw_fab_dev_te(self, tenant_id, drvr_name, fw_dict):
        """Prepares the Fabric and configures the device.

        This routine calls the fabric class to prepare the fabric when
        a firewall is created. It also calls the device manager to
        configure the device. It updates the database with the final
        result.
        """
        is_fw_virt = self.is_device_virtual()
        ret = self.fabric.prepare_fabric_fw(tenant_id, fw_dict, is_fw_virt,
                                            fw_constants.RESULT_FW_CREATE_INIT)

        if not ret:
            LOG.error(_LE("Prepare Fabric failed"))
            return
        else:
            self.update_fw_db_final_result(fw_dict.get('fw_id'), (
                fw_constants.RESULT_FW_CREATE_DONE))
        ret = self.create_fw_device(tenant_id, fw_dict.get('fw_id'),
                                    fw_dict)
        if ret:
            self.fwid_attr[tenant_id].fw_drvr_created(True)
            self.update_fw_db_dev_status(fw_dict.get('fw_id'), 'SUCCESS')
            LOG.info(_LI("FW device create returned success for tenant %s"),
                     tenant_id)
        else:
            LOG.error(_LE("FW device create returned failure for tenant %s"),
                      tenant_id)
 def _edit_running_config(self, conf_str, snippet):
     conn = self._get_connection()
     LOG.info(
         _LI("Config generated for [%(device)s] %(snip)s is:%(conf)s " "caller:%(caller)s"),
         {"device": self.hosting_device["id"], "snip": snippet, "conf": conf_str, "caller": self.caller_name()},
     )
     try:
         rpc_obj = conn.edit_config(target="running", config=conf_str)
         self._check_response(rpc_obj, snippet, conf_str=conf_str)
     except Exception as e:
         # Here we catch all exceptions caused by REMOVE_/DELETE_ configs
         # to avoid config agent to get stuck once it hits this condition.
         # This is needed since the current ncclient version (0.4.2)
         # generates an exception when an attempt to configure the device
         # fails by the device (ASR1K router) but it doesn't provide any
         # details about the error message that the device reported.
         # With ncclient 0.4.4 version and onwards the exception returns
         # also the proper error. Hence this code can be changed when the
         # ncclient version is increased.
         if re.search(r"REMOVE_|DELETE_", snippet):
             LOG.warning(_LW("Pass exception for %s"), snippet)
             pass
         elif isinstance(e, ncclient.operations.rpc.RPCError):
             e_tag = e.tag
             e_type = e.type
             params = {
                 "snippet": snippet,
                 "type": e_type,
                 "tag": e_tag,
                 "dev_id": self.hosting_device["id"],
                 "ip": self._host_ip,
                 "confstr": conf_str,
             }
             raise cfg_exc.CSR1kvConfigException(**params)
예제 #35
0
    def _fw_policy_create(self, drvr_name, data, cache):
        """Firewall Policy create routine.

        This function updates its local cache with policy parameters.
        It checks if local cache has information about the rules
        associated with the policy. If not, it means a restart has
        happened. It retrieves the rules associated with the policy by
        calling Openstack API's and calls the rule create internal routine.
        """
        policy = {}
        fw_policy = data.get('firewall_policy')
        tenant_id = fw_policy.get('tenant_id')
        LOG.info(_LI("Creating policy for tenant %s"), tenant_id)
        policy_id = fw_policy.get('id')
        policy_name = fw_policy.get('name')
        pol_rule_dict = fw_policy.get('firewall_rules')
        if tenant_id not in self.fwid_attr:
            self.fwid_attr[tenant_id] = FwMapAttr(tenant_id)
        policy['name'] = policy_name
        policy['rule_dict'] = pol_rule_dict
        self.fwid_attr[tenant_id].store_policy(policy_id, policy)
        if not cache:
            self._check_create_fw(tenant_id, drvr_name)
        self.tenant_db.store_policy_tenant(policy_id, tenant_id)
        for rule in pol_rule_dict:
            rule_id = rule
            if not self.fwid_attr[tenant_id].is_rule_present(rule_id):
                rule_data = self.os_helper.get_fw_rule(rule_id)
                if rule_data is not None:
                    self.fw_rule_create(rule_data, cache=cache)
    def create_portprofile(self, profile_name, vlan_id, vnic_type, host_id):
        """Top level method to create Port Profiles on the UCS Manager.

        Calls all the methods responsible for the individual tasks that
        ultimately result in the creation of the Port Profile on the UCS
        Manager.
        """
        ucsm_ip = self.ucsm_host_dict.get(host_id)
        if not ucsm_ip:
            LOG.info(_LI('UCS Manager network driver does not support Host_id '
                         '%s'), str(host_id))
            return False

        # Create Vlan Profile
        if not self._create_vlanprofile(vlan_id, ucsm_ip):
            LOG.error(_LE('UCS Manager network driver failed to create '
                          'Vlan Profile for vlan %s'), str(vlan_id))
            return False

        # Create Port Profile
        if not self._create_port_profile(profile_name, vlan_id, vnic_type,
            ucsm_ip):
            LOG.error(_LE('UCS Manager network driver failed to create '
                          'Port Profile %s'), profile_name)
            return False

        return True
예제 #37
0
    def _enable_intfs(self, conn):
        """Enable the interfaces of a CSR1kv Virtual Router.

        When the virtual router first boots up, all interfaces except
        management are down. This method will enable all data interfaces.

        Note: In CSR1kv, GigabitEthernet 0 is not present. GigabitEthernet 1
        is used as management and GigabitEthernet 2 and up are used for data.
        This might change in future releases.

        Currently only the second and third Gig interfaces corresponding to a
        single (T1,T2) pair and configured as trunk for VLAN and VXLAN
        is enabled.

        :param conn: Connection object
        :return: True or False
        """

        #ToDo(Hareesh): Interfaces are hard coded for now. Make it dynamic.
        interfaces = ['GigabitEthernet 2', 'GigabitEthernet 3']
        try:
            for i in interfaces:
                confstr = snippets.ENABLE_INTF % i
                rpc_obj = conn.edit_config(target='running', config=confstr)
                if self._check_response(rpc_obj, 'ENABLE_INTF'):
                    LOG.info(_LI("Enabled interface %s "), i)
                    time.sleep(1)
        except Exception:
            return False
        return True
예제 #38
0
    def _create_token(self):
        """Create new token for using novaclient API."""
        ks = keyc.Client(username=self._user_name,
                         password=self._admin_password,
                         tenant_name=self._tenant_name,
                         auth_url=self._url)
        result = ks.authenticate()
        if result:
            access = ks.auth_ref
            token = access.get('token')
            self._token_id = token['id']
            self._project_id = token['tenant'].get('id')
            service_catalog = access.get('serviceCatalog')
            for sc in service_catalog:
                if sc['type'] == "compute" and sc['name'] == 'nova':
                    endpoints = sc['endpoints']
                    for endp in endpoints:
                        self._auth_url = endp['adminURL']
            LOG.info(_LI('_create_token: token = %s'), token)

            # Create nova clinet.
            self._novaclnt = self._create_nova_client()

            return token

        else:
            # Failed request.
            LOG.error(_LE('Failed to send token create request.'))
            return
    def update_serviceprofile(self, host_id, vlan_id):
        """Top level method to update Service Profiles on UCS Manager.

        Calls all the methods responsible for the individual tasks that
        ultimately result in a vlan_id getting programed on a server's
        ethernet ports and the Fabric Interconnect's network ports.
        """
        ucsm_ip = self.ucsm_host_dict.get(host_id)
        service_profile = self.ucsm_sp_dict.get(ucsm_ip, host_id)
        if service_profile:
            LOG.debug("UCS Manager network driver Service Profile : %s",
                service_profile)
        else:
            LOG.info(_LI('UCS Manager network driver does not support Host_id '
                         '%s'), str(host_id))
            return False

        # Create Vlan Profile
        if not self._create_vlanprofile(vlan_id, ucsm_ip):
            LOG.error(_LE('UCS Manager network driver failed to create '
                          'Vlan Profile for vlan %s'), str(vlan_id))
            return False

        # Update Service Profile
        if not self._update_service_profile(service_profile, vlan_id, ucsm_ip):
            LOG.error(_LE('UCS Manager network driver failed to update '
                          'Service Profile %s'), service_profile)
            return False

        return True
예제 #40
0
    def _send_request(self, operation, url, payload, desc):
        """Send request to DCNM."""

        res = None
        try:
            payload_json = None
            if payload and payload != '':
                payload_json = jsonutils.dumps(payload)
            self._login()
            desc_lookup = {'POST': ' creation', 'PUT': ' update',
                           'DELETE': ' deletion', 'GET': ' get'}

            res = requests.request(operation, url, data=payload_json,
                                   headers=self._req_headers,
                                   timeout=self.timeout_resp)
            desc += desc_lookup.get(operation, operation.lower())
            LOG.info(_LI("DCNM-send_request: %(desc)s %(url)s %(pld)s"),
                     {'desc': desc, 'url': url, 'pld': payload})

            self._logout()
        except (requests.HTTPError, requests.Timeout,
                requests.ConnectionError) as exc:
            LOG.exception(_LE('Error during request: %s'), exc)
            raise dexc.DfaClientRequestFailed(reason=exc)

        return res
예제 #41
0
    def _fw_policy_create(self, drvr_name, data, cache):
        """Firewall Policy create routine.

        This function updates its local cache with policy parameters.
        It checks if local cache has information about the rules
        associated with the policy. If not, it means a restart has
        happened. It retrieves the rules associated with the policy by
        calling Openstack API's and calls the rule create internal routine.
        """
        policy = {}
        fw_policy = data.get('firewall_policy')
        tenant_id = fw_policy.get('tenant_id')
        LOG.info(_LI("Creating policy for tenant %s"), tenant_id)
        policy_id = fw_policy.get('id')
        policy_name = fw_policy.get('name')
        pol_rule_dict = fw_policy.get('firewall_rules')
        if tenant_id not in self.fwid_attr:
            self.fwid_attr[tenant_id] = FwMapAttr(tenant_id)
        policy['name'] = policy_name
        policy['rule_dict'] = pol_rule_dict
        self.fwid_attr[tenant_id].store_policy(policy_id, policy)
        if not cache:
            self._check_create_fw(tenant_id, drvr_name)
        self.tenant_db.store_policy_tenant(policy_id, tenant_id)
        for rule in pol_rule_dict:
            rule_id = rule
            if not self.fwid_attr[tenant_id].is_rule_present(rule_id):
                rule_data = self.os_helper.get_fw_rule(rule_id)
                if rule_data is not None:
                    self.fw_rule_create(rule_data, cache=cache)
예제 #42
0
    def retry_failure_fab_dev_create(self, tenant_id, fw_data, fw_dict):
        """This module calls routine in fabric to retry the failure cases.

        If device is not successfully cfg/uncfg, it calls the device manager
        routine to cfg/uncfg the device.
        """
        result = fw_data.get('result').split('(')[0]
        is_fw_virt = self.is_device_virtual()
        # Fabric portion
        if result == fw_constants.RESULT_FW_CREATE_INIT:
            name = dfa_dbm.DfaDBMixin.get_project_name(self, tenant_id)
            ret = self.fabric.retry_failure(tenant_id, name, fw_dict,
                                            is_fw_virt, result)
            if not ret:
                LOG.error(_LE("Retry failure returned fail for tenant %s"),
                          tenant_id)
                return
            else:
                result = fw_constants.RESULT_FW_CREATE_DONE
                self.update_fw_db_final_result(fw_dict.get('fw_id'), result)
        # Device portion
        if result == fw_constants.RESULT_FW_CREATE_DONE:
            if fw_data.get('device_status') != 'SUCCESS':
                ret = self.create_fw_device(tenant_id, fw_dict.get('fw_id'),
                                            fw_dict)
                if ret:
                    self.fwid_attr[tenant_id].fw_drvr_created(True)
                    self.update_fw_db_dev_status(fw_dict.get('fw_id'),
                                                 'SUCCESS')
                    LOG.info(
                        _LI("Retry failue return success for create"
                            " tenant %s"), tenant_id)
예제 #43
0
    def send_vdp_port_event(self, port_uuid, mac, net_uuid,
                            segmentation_id, status, oui):
        '''Send vNIC UP/Down event to VDP

        :param port_uuid: a ovslib.VifPort object.
        :mac: MAC address of the VNIC
        :param net_uuid: the net_uuid this port is to be associated with.
        :param segmentation_id: the VID for 'vlan' or tunnel ID for 'tunnel'
        :param status: Type of port event. 'up' or 'down'
        :oui: OUI Parameters
        '''
        lldpad_port = self.lldpad_info
        if not lldpad_port:
            LOG.error(_LE("There is no LLDPad port available."))
            return False

        ret = False
        if status == 'up':
            if self.vdp_mode == constants.VDP_SEGMENT_MODE:
                port_name = self.ext_br_obj.get_ofport_name(port_uuid)
                if port_name is None:
                    LOG.error(_LE("Unknown portname for uuid %s"), port_uuid)
                    return False
                LOG.info(_LI('portname for uuid %s is '), port_name)
                ret = self.port_up_segment_mode(lldpad_port, port_name,
                                                port_uuid, mac, net_uuid,
                                                segmentation_id, oui)
        else:
            if self.vdp_mode == constants.VDP_SEGMENT_MODE:
                ret = self.port_down_segment_mode(lldpad_port, port_uuid,
                                                  mac, net_uuid,
                                                  segmentation_id, oui)
        return ret
    def _enable_itfcs(self, conn):
        """Enable the interfaces of a CSR1kv Virtual Router.

        When the virtual router first boots up, all interfaces except
        management are down. This method will enable all data interfaces.

        Note: In CSR1kv, GigabitEthernet 0 is not present. GigabitEthernet 1
        is used as management and GigabitEthernet 2 and up are used for data.
        This might change in future releases.

        Currently only the second and third Gig interfaces corresponding to a
        single (T1,T2) pair and configured as trunk for VLAN and VXLAN
        is enabled.

        :param conn: Connection object
        :return: True or False
        """

        # ToDo(Hareesh): Interfaces are hard coded for now. Make it dynamic.
        interfaces = ["GigabitEthernet 2", "GigabitEthernet 3"]
        try:
            for i in interfaces:
                conf_str = snippets.ENABLE_INTF % i
                rpc_obj = conn.edit_config(target="running", config=conf_str)
                if self._check_response(rpc_obj, "ENABLE_INTF"):
                    LOG.info(_LI("Enabled interface %s "), i)
                    time.sleep(1)
        except Exception:
            return False
        return True
예제 #45
0
 def mgmt_nw_id(cls):
     """Returns id of the management network."""
     if cls._mgmt_nw_uuid is None:
         tenant_id = cls.l3_tenant_id()
         if not tenant_id:
             return
         net = manager.NeutronManager.get_plugin().get_networks(
             neutron_context.get_admin_context(),
             {'tenant_id': [tenant_id],
              'name': [cfg.CONF.general.management_network]},
             ['id', 'subnets'])
         if len(net) == 1:
             num_subnets = len(net[0]['subnets'])
             if num_subnets == 0:
                 LOG.error(_LE('The management network has no subnet. '
                               'Please assign one.'))
                 return
             elif num_subnets > 1:
                 LOG.info(_LI('The management network has %d subnets. The '
                              'first one will be used.'), num_subnets)
             cls._mgmt_nw_uuid = net[0].get('id')
             cls._mgmt_subnet_uuid = net[0]['subnets'][0]
         elif len(net) > 1:
             # Management network must have a unique name.
             LOG.error(_LE('The management network for does not have '
                           'unique name. Please ensure that it is.'))
         else:
             # Management network has not been created.
             LOG.error(_LE('There is no virtual management network. Please '
                           'create one.'))
     return cls._mgmt_nw_uuid
예제 #46
0
 def process_vm_event(self, msg, phy_uplink):
     LOG.info(
         _LI("In processing VM Event status %(status)s for MAC "
             "%(mac)s UUID %(uuid)s oui %(oui)s"), {
                 'status': msg.get_status(),
                 'mac': msg.get_mac(),
                 'uuid': msg.get_port_uuid(),
                 'oui': msg.get_oui()
             })
     time.sleep(10)
     if (not self.uplink_det_compl
             or phy_uplink not in self.ovs_vdp_obj_dict):
         LOG.error(_LE("Uplink Port Event not received yet"))
         self.update_vm_result(msg.get_port_uuid(), constants.CREATE_FAIL)
         return
     ovs_vdp_obj = self.ovs_vdp_obj_dict[phy_uplink]
     ret = ovs_vdp_obj.send_vdp_port_event(msg.get_port_uuid(),
                                           msg.get_mac(),
                                           msg.get_net_uuid(),
                                           msg.get_segmentation_id(),
                                           msg.get_status(), msg.get_oui())
     if not ret:
         LOG.error(_LE("Error in VDP port event, Err Queue enq"))
         self.update_vm_result(msg.get_port_uuid(), constants.CREATE_FAIL)
     else:
         self.update_vm_result(msg.get_port_uuid(),
                               constants.RESULT_SUCCESS)
예제 #47
0
 def _create_sub_interface(self, ri, port, is_external=False, gw_ip=""):
     vlan = self._get_interface_vlan_from_hosting_port(port)
     if (self._fullsync
             and int(vlan) in self._existing_cfg_dict['interfaces']):
         LOG.info(_LI("Sub-interface already exists, skipping"))
         return
     vrf_name = self._get_vrf_name(ri)
     net_mask = netaddr.IPNetwork(port['ip_info']['ip_cidr']).netmask
     # get port's ip address for the subnet we're processing
     hsrp_ip = self._get_item(port['fixed_ips'],
                              port['ip_info']['subnet_id'])['ip_address']
     sub_interface = self._get_interface_name_from_hosting_port(port)
     if port['ip_info']['is_primary'] is True:
         self._do_create_sub_interface(sub_interface, vlan, vrf_name,
                                       hsrp_ip, net_mask, is_external)
     else:
         # this will only happen for global routers
         self._do_set_secondary(sub_interface, hsrp_ip, net_mask)
     # Always do HSRP
     if ri.router.get(ha.ENABLED, False):
         if port.get(ha.HA_INFO) is not None:
             self._add_ha_hsrp(ri, port)
         else:
             # We are missing HA data, candidate for retrying
             params = {
                 'r_id': ri.router_id,
                 'p_id': port['id'],
                 'port': port
             }
             raise cfg_exc.HAParamsMissingException(**params)
예제 #48
0
    def retry_failure_fab_dev_create(self, tenant_id, fw_data, fw_dict):
        """This module calls routine in fabric to retry the failure cases.

        If device is not successfully cfg/uncfg, it calls the device manager
        routine to cfg/uncfg the device.
        """
        result = fw_data.get('result').split('(')[0]
        is_fw_virt = self.is_device_virtual()
        # Fabric portion
        if result == fw_constants.RESULT_FW_CREATE_INIT:
            name = dfa_dbm.DfaDBMixin.get_project_name(self, tenant_id)
            ret = self.fabric.retry_failure(tenant_id, name, fw_dict,
                                            is_fw_virt, result)
            if not ret:
                LOG.error(_LE("Retry failure returned fail for tenant %s"),
                          tenant_id)
                return
            else:
                result = fw_constants.RESULT_FW_CREATE_DONE
                self.update_fw_db_final_result(fw_dict.get('fw_id'), result)
        # Device portion
        if result == fw_constants.RESULT_FW_CREATE_DONE:
            if fw_data.get('device_status') != 'SUCCESS':
                ret = self.create_fw_device(tenant_id, fw_dict.get('fw_id'),
                                            fw_dict)
                if ret:
                    self.fwid_attr[tenant_id].fw_drvr_created(True)
                    self.update_fw_db_dev_status(fw_dict.get('fw_id'),
                                                 'SUCCESS')
                    LOG.info(_LI("Retry failue return success for create"
                             " tenant %s"), tenant_id)
예제 #49
0
    def _agent_registration(self):
        """Register this agent with the server.

        This method registers the cfg agent with the neutron server so hosting
        devices can be assigned to it. In case the server is not ready to
        accept registration (it sends a False) then we retry registration
        for `MAX_REGISTRATION_ATTEMPTS` with a delay of
        `REGISTRATION_RETRY_DELAY`. If there is no server response or a
        failure to register after the required number of attempts,
        the agent stops itself.
        """
        for attempts in range(MAX_REGISTRATION_ATTEMPTS):
            context = n_context.get_admin_context_without_session()
            self.send_agent_report(self.agent_state, context)
            res = self.devmgr_rpc.register_for_duty(context)
            if res is True:
                LOG.info(_LI("[Agent registration] Agent successfully "
                             "registered"))
                return
            elif res is False:
                LOG.warning(_LW("[Agent registration] Neutron server said "
                                "that device manager was not ready. Retrying "
                                "in %0.2f seconds "), REGISTRATION_RETRY_DELAY)
                time.sleep(REGISTRATION_RETRY_DELAY)
            elif res is None:
                LOG.error(_LE("[Agent registration] Neutron server said that "
                              "no device manager was found. Cannot continue. "
                              "Exiting!"))
                raise SystemExit(_("Cfg Agent exiting"))
        LOG.error(_LE("[Agent registration] %d unsuccessful registration "
                      "attempts. Exiting!"), MAX_REGISTRATION_ATTEMPTS)
        raise SystemExit(_("Cfg Agent exiting"))
예제 #50
0
 def set_port_vlan(self, vdp_vlan):
     if not ovs_lib.is_valid_vlan_tag(vdp_vlan):
         LOG.info(_LI("Passed Invalid vlan in set_port_vlan"))
         return
     if vdp_vlan not in self.port_vdp_vlan_dict:
         self.port_vdp_vlan_dict[vdp_vlan] = 0
     self.port_vdp_vlan_dict[vdp_vlan] += 1
예제 #51
0
 def process_vm_event(self, msg, phy_uplink):
     LOG.info(
         _LI("In processing VM Event status %(status)s for MAC " "%(mac)s UUID %(uuid)s oui %(oui)s"),
         {"status": msg.get_status(), "mac": msg.get_mac(), "uuid": msg.get_port_uuid(), "oui": msg.get_oui()},
     )
     time.sleep(10)
     if msg.get_status() == "up":
         res_fail = constants.CREATE_FAIL
     else:
         res_fail = constants.DELETE_FAIL
     if not self.uplink_det_compl or phy_uplink not in self.ovs_vdp_obj_dict:
         LOG.error(_LE("Uplink Port Event not received yet"))
         self.update_vm_result(msg.get_port_uuid(), res_fail)
         return
     ovs_vdp_obj = self.ovs_vdp_obj_dict[phy_uplink]
     ret = ovs_vdp_obj.send_vdp_port_event(
         msg.get_port_uuid(),
         msg.get_mac(),
         msg.get_net_uuid(),
         msg.get_segmentation_id(),
         msg.get_status(),
         msg.get_oui(),
     )
     if not ret:
         LOG.error(_LE("Error in VDP port event, Err Queue enq"))
         self.update_vm_result(msg.get_port_uuid(), res_fail)
     else:
         self.update_vm_result(msg.get_port_uuid(), constants.RESULT_SUCCESS)
예제 #52
0
    def create_portprofile(self, profile_name, vlan_id, vnic_type, host_id):
        """Top level method to create Port Profiles on the UCS Manager.

        Calls all the methods responsible for the individual tasks that
        ultimately result in the creation of the Port Profile on the UCS
        Manager.
        """
        ucsm_ip = self.get_ucsm_ip_for_host(host_id)
        if not ucsm_ip:
            LOG.info(
                _LI('UCS Manager network driver does not support Host_id '
                    '%s'), str(host_id))
            return False

        with self.ucsm_connect_disconnect(ucsm_ip) as handle:
            # Create Vlan Profile
            if not self._create_vlanprofile(handle, vlan_id, ucsm_ip):
                LOG.error(
                    _LE('UCS Manager network driver failed to create '
                        'Vlan Profile for vlan %s'), str(vlan_id))
                return False

            # Create Port Profile
            if not self._create_port_profile(handle, profile_name, vlan_id,
                                             vnic_type, ucsm_ip):
                LOG.error(
                    _LE('UCS Manager network driver failed to create '
                        'Port Profile %s'), profile_name)
                return False

        return True
예제 #53
0
    def create_fw(self, tenant_id, data):
        LOG.info(_LI("In creating phy ASA FW data is %s"), data)
        tenant_name = data.get('tenant_name')
        in_ip_dict = self.get_in_ip_addr(tenant_id)
        in_gw = in_ip_dict.get('gateway')
        in_sec_gw = in_ip_dict.get('sec_gateway')
        in_serv_node = self.get_in_srvc_node_ip_addr(tenant_id)
        out_ip_dict = self.get_out_ip_addr(tenant_id)
        out_ip_gw = out_ip_dict.get('gateway')
        out_sec_gw = out_ip_dict.get('sec_gateway')
        out_serv_node = self.get_out_srvc_node_ip_addr(tenant_id)
        in_seg, in_vlan = self.get_in_seg_vlan(tenant_id)
        out_seg, out_vlan = self.get_out_seg_vlan(tenant_id)

        kw = {'params': {'tenant_name': tenant_name,
                         'in_vlan': in_vlan, 'out_vlan': out_vlan,
                         'in_ip': in_serv_node, 'in_mask': '255.255.255.0',
                         'in_gw': in_gw, 'in_sec_gw': in_sec_gw,
                         'out_ip': out_serv_node, 'out_mask': '255.255.255.0',
                         'out_gw': out_ip_gw, 'out_sec_gw': out_sec_gw,
                         'intf_in': self.interface_in,
                         'intf_out': self.interface_out}}
        status = self.asa5585.setup(**kw)
        if status is False:
            LOG.error(_LE("Physical FW instance creation failure for "
                      "tenant %s"), tenant_name)
            return False

        status = self.asa5585.apply_policy(data)
        if status is False:
            LOG.error(_LE("Applying FW policy failure for tenant %s"),
                      tenant_name)

        return status
예제 #54
0
    def update_serviceprofile(self, host_id, vlan_id):
        """Top level method to update Service Profiles on UCS Manager.

        Calls all the methods responsible for the individual tasks that
        ultimately result in a vlan_id getting programed on a server's
        ethernet ports and the Fabric Interconnect's network ports.
        """
        ucsm_ip = self.ucsm_host_dict.get(host_id)
        service_profile = self.ucsm_sp_dict.get(ucsm_ip, host_id)
        if service_profile:
            LOG.debug("UCS Manager network driver Service Profile : %s",
                      service_profile)
        else:
            LOG.info(
                _LI('UCS Manager network driver does not support Host_id '
                    '%s'), str(host_id))
            return False

        # Create Vlan Profile
        if not self._create_vlanprofile(vlan_id, ucsm_ip):
            LOG.error(
                _LE('UCS Manager network driver failed to create '
                    'Vlan Profile for vlan %s'), str(vlan_id))
            return False

        # Update Service Profile
        if not self._update_service_profile(service_profile, vlan_id, ucsm_ip):
            LOG.error(
                _LE('UCS Manager network driver failed to update '
                    'Service Profile %s'), service_profile)
            return False

        return True
예제 #55
0
    def _create_token(self):
        """Create new token for using novaclient API."""
        ks = keyc.Client(username=self._user_name,
                         password=self._admin_password,
                         tenant_name=self._tenant_name,
                         auth_url=self._url)
        result = ks.authenticate()
        if result:
            access = ks.auth_ref
            token = access.get('token')
            self._token_id = token['id']
            self._project_id = token['tenant'].get('id')
            service_catalog = access.get('serviceCatalog')
            for sc in service_catalog:
                if sc['type'] == "compute" and sc['name'] == 'nova':
                    endpoints = sc['endpoints']
                    for endp in endpoints:
                        self._auth_url = endp['adminURL']
            LOG.info(_LI('_create_token: token = %s'), token)

            # Create nova clinet.
            self._novaclnt = self._create_nova_client()

            return token

        else:
            # Failed request.
            LOG.error(_LE('Failed to send token create request.'))
            return
예제 #56
0
    def _create_fw_fab_dev_te(self, tenant_id, drvr_name, fw_dict):
        """Prepares the Fabric and configures the device.

        This routine calls the fabric class to prepare the fabric when
        a firewall is created. It also calls the device manager to
        configure the device. It updates the database with the final
        result.
        """
        is_fw_virt = self.is_device_virtual()
        ret = self.fabric.prepare_fabric_fw(tenant_id, fw_dict, is_fw_virt,
                                            fw_constants.RESULT_FW_CREATE_INIT)

        if not ret:
            LOG.error(_LE("Prepare Fabric failed"))
            return
        else:
            self.update_fw_db_final_result(
                fw_dict.get('fw_id'), (fw_constants.RESULT_FW_CREATE_DONE))
        ret = self.create_fw_device(tenant_id, fw_dict.get('fw_id'), fw_dict)
        if ret:
            self.fwid_attr[tenant_id].fw_drvr_created(True)
            self.update_fw_db_dev_status(fw_dict.get('fw_id'), 'SUCCESS')
            LOG.info(_LI("FW device create returned success for tenant %s"),
                     tenant_id)
        else:
            LOG.error(_LE("FW device create returned failure for tenant %s"),
                      tenant_id)
 def update_port_postcommit(self, context):
     """Send port parameters to the VSM."""
     port = context.current
     old_port = context.original
     # Perform port update on VSM only if a router or DHCP port is bound.
     if (not old_port['binding:host_id'] and
             (port['device_owner'] in [n_const.DEVICE_OWNER_ROUTER_INTF,
                                       n_const.DEVICE_OWNER_DHCP])):
         session = context._plugin_context.session
         binding = n1kv_db.get_policy_binding(port['id'], session)
         policy_profile = n1kv_db.get_policy_profile_by_uuid(
             session, binding.profile_id)
         if policy_profile is None:
             raise ml2_exc.MechanismDriverError()
         vmnetwork_name = "%s%s_%s" % (n1kv_const.VM_NETWORK_PREFIX,
                                       binding.profile_id,
                                       port['network_id'])
         try:
             # Today an update is just a create, so we call create port
             self.n1kvclient.create_n1kv_port(port,
                                              vmnetwork_name,
                                              policy_profile)
         except(n1kv_exc.VSMError, n1kv_exc.VSMConnectionFailed):
             raise ml2_exc.MechanismDriverError()
         LOG.info(_LI("Update port(postcommit) succeeded for port: "
                      "%(id)s on network: %(network_id)s with policy "
                      "profile ID: %(profile_id)s"),
                  {"network_id": port['network_id'],
                   "id": port['id'],
                   "profile_id": policy_profile.id})
 def _check_config_agents(self):
     e_context = n_context.get_admin_context()
     if not self._cfg_agent_statuses:
         self._sync_config_agent_monitoring(e_context)
     to_remove = []
     LOG.debug('In _check_config_agents: Monitored config agents %s:' %
               self._cfg_agent_statuses.keys())
     for cfg_agent_id, info in six.iteritems(self._cfg_agent_statuses):
         if self.should_check_agent(info['timestamp']):
             # agent has not been checked recently so do it now
             LOG.debug('Must check status of config agent %s' %
                       cfg_agent_id)
             try:
                 agent = self.get_agent(e_context, cfg_agent_id)
             except ext_agent.AgentNotFound:
                 LOG.debug('Config agent %s does not exist anymore. Will '
                           'remove it from monitored config agents' %
                           cfg_agent_id)
                 to_remove.append(cfg_agent_id)
                 continue
             info['timestamp'] = timeutils.utcnow()
             if self.is_agent_down(agent['heartbeat_timestamp']):
                 LOG.info(_LI('Config agent %s is not alive. Un-assigning '
                              'hosting devices managed by it.'),
                          cfg_agent_id)
                 self._reschedule_hosting_devices(e_context, cfg_agent_id)
     for cfg_agent_id in to_remove:
         LOG.debug('Removing config agent %s from monitored config agents' %
                   cfg_agent_id)
         del self._cfg_agent_statuses[cfg_agent_id]