Example #1
0
    def _create_ha_network(self, context, tenant_id):
        admin_ctx = context.elevated()

        args = {'network':
                {'name': constants.HA_NETWORK_NAME % tenant_id,
                 'tenant_id': '',
                 'shared': False,
                 'admin_state_up': True,
                 'status': constants.NET_STATUS_ACTIVE}}
        network = self._core_plugin.create_network(context, args)
        try:
            ha_network = self._create_ha_network_tenant_binding(admin_ctx,
                                                                tenant_id,
                                                                network['id'])
        except Exception:
            with excutils.save_and_reraise_exception():
                self._core_plugin.delete_network(admin_ctx, network['id'])

        try:
            self._create_ha_subnet(admin_ctx, network['id'], tenant_id)
        except Exception:
            with excutils.save_and_reraise_exception():
                self._core_plugin.delete_network(admin_ctx, network['id'])

        return ha_network
    def create_vip(self, context, edge_id, vip):
        app_profile = self._convert_app_profile(
            vip['name'], vip.get('session_persistence'))
        try:
            header, response = self.vcns.create_app_profile(
                edge_id, app_profile)
        except vcns_exc.VcnsApiException:
            with excutils.save_and_reraise_exception():
                LOG.exception(_("Failed to create app profile on edge: %s"),
                              edge_id)
        objuri = header['location']
        app_profileid = objuri[objuri.rfind("/") + 1:]

        vip_new = self._convert_lb_vip(context, edge_id, vip, app_profileid)
        try:
            header, response = self.vcns.create_vip(
                edge_id, vip_new)
        except vcns_exc.VcnsApiException:
            with excutils.save_and_reraise_exception():
                LOG.exception(_("Failed to create vip on vshield edge: %s"),
                              edge_id)
        objuri = header['location']
        vip_vseid = objuri[objuri.rfind("/") + 1:]

        # Add the vip mapping
        map_info = {
            "vip_id": vip['id'],
            "vip_vseid": vip_vseid,
            "edge_id": edge_id,
            "app_profileid": app_profileid
        }
        vcns_db.add_vcns_edge_vip_binding(context.session, map_info)
Example #3
0
    def consume_in_thread(self):
        """Runs the ZmqProxy service."""
        ipc_dir = CONF.rpc_zmq_ipc_dir
        consume_in = "tcp://%s:%s" % \
            (CONF.rpc_zmq_bind_address,
             CONF.rpc_zmq_port)
        consumption_proxy = InternalContext(None)

        try:
            os.makedirs(ipc_dir)
        except os.error:
            if not os.path.isdir(ipc_dir):
                with excutils.save_and_reraise_exception():
                    LOG.error(_("Required IPC directory does not exist at"
                                " %s") % (ipc_dir, ))
        try:
            self.register(consumption_proxy,
                          consume_in,
                          zmq.PULL)
        except zmq.ZMQError:
            if os.access(ipc_dir, os.X_OK):
                with excutils.save_and_reraise_exception():
                    LOG.error(_("Permission denied to IPC directory at"
                                " %s") % (ipc_dir, ))
            with excutils.save_and_reraise_exception():
                LOG.error(_("Could not create ZeroMQ receiver daemon. "
                            "Socket may already be in use."))

        super(ZmqProxy, self).consume_in_thread()
Example #4
0
def add_neutron_nsx_port_mapping(session, neutron_id,
                                 nsx_switch_id, nsx_port_id):
    session.begin(subtransactions=True)
    try:
        mapping = models.NeutronNsxPortMapping(
            neutron_id, nsx_switch_id, nsx_port_id)
        session.add(mapping)
        session.commit()
    except db_exc.DBDuplicateEntry:
        with excutils.save_and_reraise_exception() as ctxt:
            session.rollback()
            # do not complain if the same exact mapping is being added,
            # otherwise re-raise because even though it is possible for the
            # same neutron port to map to different back-end ports over time,
            # this should not occur whilst a mapping already exists
            current = get_nsx_switch_and_port_id(session, neutron_id)
            if current[1] == nsx_port_id:
                LOG.debug(_("Port mapping for %s already available"),
                          neutron_id)
                ctxt.reraise = False
    except db_exc.DBError:
        with excutils.save_and_reraise_exception():
            # rollback for any other db error
            session.rollback()
    return mapping
    def create_loadbalancer(self, context, loadbalancer):
        with context.session.begin(subtransactions=True):
            self._load_id_and_tenant_id(context, loadbalancer)
            vip_address = loadbalancer.pop('vip_address')
            securitygroup_id = loadbalancer.get('securitygroup_id')
            loadbalancer['status'] = constants.PENDING_CREATE
            loadbalancer['created_at'] = timeutils.utcnow()
            lb_db = models.LoadBalancer(**loadbalancer)
            context.session.add(lb_db)
            context.session.flush()
            lb_db.stats = self._create_loadbalancer_stats(
                context, lb_db.id)
            context.session.add(lb_db)

        # create port outside of lb create transaction since it can sometimes
        # cause lock wait timeouts
        try:
            self._create_port_for_load_balancer(context, lb_db,
                                        vip_address, securitygroup_id)
        except ext_sg.SecurityGroupNotFound:
            LOG.error('_create_port_for_load_balancer %s securitygroup',lb_db.id)
            with excutils.save_and_reraise_exception():
                context.session.delete(lb_db)
                context.session.flush()
                raise loadbalancerv2.SecurityGroupNotFound(id=lb_db.id) 
        except Exception:
            LOG.error('_create_port_for_load_balancer %s',lb_db.id)
            with excutils.save_and_reraise_exception():
                context.session.delete(lb_db)
                context.session.flush()
        return data_models.LoadBalancer.from_sqlalchemy_model(lb_db)
Example #6
0
    def _add_rule_below(self, context, ref_rule_id, edge_id, firewall_rule):
        rule_map = vcns_db.get_vcns_edge_firewallrule_binding(context.session, ref_rule_id, edge_id)
        ref_vcns_rule_id = rule_map.rule_vseid
        fwr_vse_next = self._get_firewall_rule_next(context, edge_id, ref_vcns_rule_id)
        fwr_req = self._convert_firewall_rule(context, firewall_rule)
        if fwr_vse_next:
            ref_vcns_rule_id = fwr_vse_next["ruleId"]
            try:
                header = self.vcns.add_firewall_rule_above(edge_id, int(ref_vcns_rule_id), fwr_req)[0]
            except vcns_exc.VcnsApiException:
                with excutils.save_and_reraise_exception():
                    LOG.exception(
                        _("Failed to add firewall rule above: " "%(rule_id)s with edge_id: %(edge_id)s"),
                        {"rule_id": ref_vcns_rule_id, "edge_id": edge_id},
                    )
        else:
            # append the rule at the bottom
            try:
                header = self.vcns.add_firewall_rule(edge_id, fwr_req)[0]
            except vcns_exc.VcnsApiException:
                with excutils.save_and_reraise_exception():
                    LOG.exception(_("Failed to append a firewall rule" "with edge_id: %s"), edge_id)

        objuri = header["location"]
        fwr_vseid = objuri[objuri.rfind("/") + 1 :]
        map_info = {"rule_id": firewall_rule["id"], "rule_vseid": fwr_vseid, "edge_id": edge_id}
        vcns_db.add_vcns_edge_firewallrule_binding(context.session, map_info)
Example #7
0
    def create_router(self, host, username, password, rbridge_id, router_id):
        """create vrf and associate vrf."""
        router_id = router_id[0:11]
        vrf_name = template.OS_VRF_NAME.format(id=router_id)
        rd = router_id + ":" + router_id
        try:
            mgr = self.connect(host, username, password)
            self.create_vrf(mgr, rbridge_id, vrf_name)
        except Exception:
            with excutils.save_and_reraise_exception():
                LOG.exception(_("NETCONF error"))
                self.close_session()
        try:
            # For Nos5.0.0
            self.configure_rd_for_vrf(mgr, rbridge_id, vrf_name, rd)
            self.configure_address_family_for_vrf(mgr, rbridge_id, vrf_name)
        except Exception:
            with excutils.save_and_reraise_exception() as ctxt:
                try:
                    # This is done because on 4.0.0 rd doesnt accept alpha
                    # character nor hyphen
                    rd = "".join(i for i in router_id if i in "0123456789")
                    rd = rd[:4] + ":" + rd[:4]
                    self.configure_rd_for_vrf(mgr, rbridge_id, vrf_name, rd)
                    self.configure_address_family_for_vrf_v1(mgr,
                                                             rbridge_id,
                                                             vrf_name)
                except Exception:
                    with excutils.save_and_reraise_exception():
                        LOG.exception(_("NETCONF error"))
                        self.close_session()

                ctxt.reraise = False
Example #8
0
    def set_driver(self, resource):
        """Set the driver for a neutron resource.

        :param resource: Neutron resource in dict format. Expected keys:
                        { 'id': <value>
                          'hosting_device': { 'id': <value>, }
                          'router_type': {'cfg_agent_driver': <value>,  }
                        }
        :return driver : driver object
        """
        try:
            resource_id = resource['id']
            hosting_device = resource['hosting_device']
            hd_id = hosting_device['id']
            if hd_id in self._hosting_device_routing_drivers_binding:
                driver = self._hosting_device_routing_drivers_binding[hd_id]
                self._drivers[resource_id] = driver
            else:
                driver_class = resource['router_type']['cfg_agent_driver']
                driver = importutils.import_object(driver_class,
                                                   **hosting_device)
                self._hosting_device_routing_drivers_binding[hd_id] = driver
                self._drivers[resource_id] = driver
            return driver
        except ImportError:
            LOG.exception(_("Error loading cfg agent driver %(driver)s for "
                            "hosting device template %(t_name)s(%(t_id)s)"),
                          {'driver': driver_class, 't_id': hd_id,
                           't_name': hosting_device['name']})
            with excutils.save_and_reraise_exception(reraise=False):
                raise cfg_exceptions.DriverNotExist(driver=driver_class)
        except KeyError as e:
            with excutils.save_and_reraise_exception(reraise=False):
                raise cfg_exceptions.DriverNotSetForMissingParameter(e)
    def create_router(self, context, router):
        is_ha = self._is_ha(router['router'])

        if is_ha and l3_dvr_db.is_distributed_router(router['router']):
            raise l3_ha.DistributedHARouterNotSupported()

        router['router']['ha'] = is_ha
        router_dict = super(L3_HA_NAT_db_mixin,
                            self).create_router(context, router)

        if is_ha:
            if cfg.CONF.mulity_ha_network_one_tenant:
                router_db = self._get_router(context, router_dict['id'])
                ha_network_list = self.get_ha_network_list(context, router_db.tenant_id)
                if not ha_network_list:
                    ha_network_list = [self._create_ha_network(context,
                                                         router_db.tenant_id)]
                def get_vr_id():
                    flag = 1
                    for ha_network in ha_network_list:
                        try:
                            self._set_vr_id(context, router_db, ha_network)
                            flag = 0
                            LOG.debug("get the vr_id from  ha network %s" %ha_network.network['id'])
                            break
                        except l3_ha.NoVRIDAvailable:
                            LOG.debug("the ha network %s vr_id is over " %ha_network.network['id'])

                    if flag:
                        LOG.debug("create new ha network")
                        ha_network = self._create_ha_network(context,
                                                         router_db.tenant_id)
                        self._set_vr_id(context, router_db, ha_network)
                    self._create_router_ha_network_info(context, router_db.id, ha_network.network['id'])
                    self._create_ha_interfaces(context, router_db, ha_network)
                    self._notify_ha_interfaces_updated(context, router_db.id)
                    router_dict['ha_vr_id'] = router_db.extra_attributes.ha_vr_id
                try:
                    get_vr_id()
                except Exception:
                     with excutils.save_and_reraise_exception():
                        self.delete_router(context, router_dict['id'])
            else:
                try:
                    router_db = self._get_router(context, router_dict['id'])
                    ha_network = self.get_ha_network(context,
                                                 router_db.tenant_id)
                    if not ha_network:
                        ha_network = self._create_ha_network(context,
                                                         router_db.tenant_id)

                    self._set_vr_id(context, router_db, ha_network)
                    self._create_ha_interfaces(context, router_db, ha_network)
                    self._notify_ha_interfaces_updated(context, router_db.id)
                except Exception:
                    with excutils.save_and_reraise_exception():
                        self.delete_router(context, router_dict['id'])
                router_dict['ha_vr_id'] = router_db.extra_attributes.ha_vr_id
        return router_dict
Example #10
0
 def delete_router(self, context, router_id):
     """ delete a vrf on NOS device."""
     router = super(BrocadeSVIPlugin, self).get_router(context, router_id)
     super(BrocadeSVIPlugin, self).delete_router(context, router_id)
     try:
         switch = self._switch
         self._driver.delete_router(switch['address'],
                                    switch['username'],
                                    switch['password'],
                                    switch['rbridge_id'],
                                    str(router['id']))
     except Exception:
         excutils.save_and_reraise_exception()
    def __init__(self, host, conf=None):
        super(L3NATAgent, self).__init__()
        if conf:
            self.conf = conf
        else:
            self.conf = cfg.CONF
        self.router_info = {}
        self.virtual_ip = ''
        self.virtual_mac = ''
        self.context = context.get_admin_context_without_session()
        self.plugin_rpc = L3PluginApi(topics.L3PLUGIN, host)
        self.dvr_base_mac = self.plugin_rpc.get_dvr_base_mac(self.context)
        self._setup_dataenigine_rpc()
        self.dataengine_rpc.delete_all_flows()
        self.fullsync = True

        # Get the list of service plugins from Neutron Server
        # This is the first place where we contact neutron-server on startup
        # so retry in case its not ready to respond.
        retry_count = 5
        while True:
            retry_count = retry_count - 1
            try:
                self.neutron_service_plugins = (
                    self.plugin_rpc.get_service_plugin_list(self.context))
            except n_rpc.RemoteError as e:
                with excutils.save_and_reraise_exception() as ctx:
                    ctx.reraise = False
                    LOG.warning(_LW('l3-agent cannot check service plugins '
                                    'enabled at the neutron server when '
                                    'startup due to RPC error. It happens '
                                    'when the server does not support this '
                                    'RPC API. If the error is '
                                    'UnsupportedVersion you can ignore this '
                                    'warning. Detail message: %s'), e)
                self.neutron_service_plugins = None
            except messaging.MessagingTimeout as e:
                with excutils.save_and_reraise_exception() as ctx:
                    if retry_count > 0:
                        ctx.reraise = False
                        LOG.warning(_LW('l3-agent cannot check service '
                                        'plugins enabled on the neutron '
                                        'server. Retrying. '
                                        'Detail message: %s'), e)
                        continue
            break

        self._queue = RouterProcessingQueue()
Example #12
0
 def get_bridge_name_for_port_name(self, port_name):
     try:
         return self.run_vsctl(['port-to-br', port_name], check_error=True)
     except RuntimeError as e:
         with excutils.save_and_reraise_exception() as ctxt:
             if 'Exit code: 1\n' in str(e):
                 ctxt.reraise = False
Example #13
0
    def create_network(self, host, username, password, net_id):
        """Creates a new virtual network."""

        domain_name = "default"
        name = template.OS_PORT_PROFILE_NAME.format(id=net_id)
        try:
            mgr = self.connect(host, username, password)
            self.create_vlan_interface(mgr, net_id)
            self.create_port_profile(mgr, name)

            if self._pp_domains_supported and self._virtual_fabric_enabled:
                self.configure_port_profile_in_domain(mgr, domain_name, name)

            self.create_vlan_profile_for_port_profile(mgr, name)

            if self._pp_domains_supported:
                self.configure_l2_mode_for_vlan_profile_with_domains(mgr, name)
            else:
                self.configure_l2_mode_for_vlan_profile(mgr, name)

            self.configure_trunk_mode_for_vlan_profile(mgr, name)
            self.configure_allowed_vlans_for_vlan_profile(mgr, name, net_id)
            self.activate_port_profile(mgr, name)
        except Exception:
            with excutils.save_and_reraise_exception():
                LOG.exception(_("NETCONF error"))
                self.close_session()
Example #14
0
    def create_router(self, context, router):
        is_ha = self._is_ha(router['router'])

        if is_ha and l3_dvr_db.is_distributed_router(router['router']):
            raise l3_ha.DistributedHARouterNotSupported()

        router['router']['ha'] = is_ha
        router_dict = super(L3_HA_NAT_db_mixin,
                            self).create_router(context, router)

        if is_ha:
            try:
                router_db = self._get_router(context, router_dict['id'])
                ha_network = self.get_ha_network(context,
                                                 router_db.tenant_id)
                if not ha_network:
                    ha_network = self._create_ha_network(context,
                                                         router_db.tenant_id)

                self._set_vr_id(context, router_db, ha_network)
                self._create_ha_interfaces(context, router_db, ha_network)
                self._notify_ha_interfaces_updated(context, router_db.id)
            except Exception:
                with excutils.save_and_reraise_exception():
                    self.delete_router(context, router_dict['id'])
            router_dict['ha_vr_id'] = router_db.extra_attributes.ha_vr_id
        return router_dict
Example #15
0
    def sync_single_resource(self, operation, object_type, obj_id,
                             context, attr_filter_create, attr_filter_update):
        """Sync over a single resource from Neutron to OpenDaylight.

        Handle syncing a single operation over to OpenDaylight, and correctly
        filter attributes out which are not required for the requisite
        operation (create or update) being handled.
        """
        dbcontext = context._plugin_context
        if operation == 'create':
            urlpath = object_type
            method = 'post'
        else:
            urlpath = object_type + '/' + obj_id
            method = 'put'

        try:
            obj_getter = getattr(context._plugin, 'get_%s' % object_type[:-1])
            resource = obj_getter(dbcontext, obj_id)
        except not_found_exception_map[object_type]:
            LOG.debug(_('%(object_type)s not found (%(obj_id)s)'),
                      {'object_type': object_type.capitalize(),
                      'obj_id': obj_id})
        else:
            if operation == 'create':
                attr_filter_create(self, resource, context, dbcontext)
            elif operation == 'update':
                attr_filter_update(self, resource, context, dbcontext)
            try:
                # 400 errors are returned if an object exists, which we ignore.
                self.sendjson(method, urlpath, {object_type[:-1]: resource},
                              [400])
            except Exception:
                with excutils.save_and_reraise_exception():
                    self.out_of_sync = True
def serve_rpc():
    plugin = manager.NeutronManager.get_plugin()

    # If 0 < rpc_workers then start_rpc_listeners would be called in a
    # subprocess and we cannot simply catch the NotImplementedError.  It is
    # simpler to check this up front by testing whether the plugin supports
    # multiple RPC workers.
    if not plugin.rpc_workers_supported():
        LOG.debug(_("Active plugin doesn't implement start_rpc_listeners"))
        if 0 < cfg.CONF.rpc_workers:
            msg = _("'rpc_workers = %d' ignored because start_rpc_listeners "
                    "is not implemented.")
            LOG.error(msg, cfg.CONF.rpc_workers)
        raise NotImplementedError()

    try:
        rpc = RpcWorker(plugin)

        if cfg.CONF.rpc_workers < 1:
            rpc.start()
            return rpc
        else:
            launcher = common_service.ProcessLauncher(wait_interval=1.0)
            launcher.launch_service(rpc, workers=cfg.CONF.rpc_workers)
            return launcher
    except Exception:
        with excutils.save_and_reraise_exception():
            LOG.exception(_('Unrecoverable error: please check log '
                            'for details.'))
 def _status_edge(self, task):
     edge_id = task.userdata['edge_id']
     try:
         response = self.vcns.get_edge_deploy_status(edge_id)[1]
         task.userdata['retries'] = 0
         system_status = response.get('systemStatus', None)
         if system_status is None:
             status = constants.TaskStatus.PENDING
         elif system_status == 'good':
             status = constants.TaskStatus.COMPLETED
         else:
             status = constants.TaskStatus.ERROR
     except exceptions.VcnsApiException:
         with excutils.save_and_reraise_exception():
             LOG.exception(_("VCNS: Edge %s status query failed."), edge_id)
     except Exception:
         retries = task.userdata.get('retries', 0) + 1
         if retries < 3:
             task.userdata['retries'] = retries
             msg = _("VCNS: Unable to retrieve edge %(edge_id)s status. "
                     "Retry %(retries)d.") % {
                         'edge_id': edge_id,
                         'retries': retries}
             LOG.exception(msg)
             status = constants.TaskStatus.PENDING
         else:
             msg = _("VCNS: Unable to retrieve edge %s status. "
                     "Abort.") % edge_id
             LOG.exception(msg)
             status = constants.TaskStatus.ERROR
     LOG.debug(_("VCNS: Edge %s status"), edge_id)
     return status
Example #18
0
 def _from_xml(self, datastring):
     if datastring is None:
         return None
     plurals = set(self.metadata.get('plurals', {}))
     try:
         node = self._parseXML(datastring)
         root_tag = self._get_key(node.tag)
         # Deserialize link node was needed by unit test for verifying
         # the request's response
         links = self._get_links(root_tag, node)
         result = self._from_xml_node(node, plurals)
         # root_tag = constants.VIRTUAL_ROOT_KEY and links is not None
         # is not possible because of the way data are serialized.
         if root_tag == constants.VIRTUAL_ROOT_KEY:
             return result
         return dict({root_tag: result}, **links)
     except Exception as e:
         with excutils.save_and_reraise_exception():
             parseError = False
             # Python2.7
             if (hasattr(etree, 'ParseError') and
                 isinstance(e, getattr(etree, 'ParseError'))):
                 parseError = True
             # Python2.6
             elif isinstance(e, expat.ExpatError):
                 parseError = True
             if parseError:
                 msg = _("Cannot understand XML")
                 raise exception.MalformedRequestBody(reason=msg)
Example #19
0
 def _port_action(self, plugin, context, port, action):
     """Perform port operations taking care of concurrency issues."""
     try:
         if action == 'create_port':
             return plugin.create_port(context, port)
         elif action == 'update_port':
             return plugin.update_port(context, port['id'], port['port'])
         else:
             msg = _('Unrecognized action')
             raise n_exc.Invalid(message=msg)
     except (db_exc.DBError, n_exc.NetworkNotFound,
             n_exc.SubnetNotFound, n_exc.IpAddressGenerationFailure) as e:
         with excutils.save_and_reraise_exception(reraise=False) as ctxt:
             if isinstance(e, n_exc.IpAddressGenerationFailure):
                 # Check if the subnet still exists and if it does not,
                 # this is the reason why the ip address generation failed.
                 # In any other unlikely event re-raise
                 try:
                     subnet_id = port['port']['fixed_ips'][0]['subnet_id']
                     plugin.get_subnet(context, subnet_id)
                 except n_exc.SubnetNotFound:
                     pass
                 else:
                     ctxt.reraise = True
             network_id = port['port']['network_id']
             LOG.warn(_("Port for network %(net_id)s could not be created: "
                        "%(reason)s") % {"net_id": network_id, 'reason': e})
Example #20
0
    def create_port(self, context, port):
        attrs = port['port']
        attrs['status'] = const.PORT_STATUS_DOWN

        session = context.session
        with session.begin(subtransactions=True):
            self._ensure_default_security_group_on_port(context, port)
            sgids = self._get_security_groups_on_port(context, port)
            result = super(Ml2Plugin, self).create_port(context, port)
            self._process_portbindings_create_and_update(context, attrs,
                                                         result)
            self._process_port_create_security_group(context, result, sgids)
            self._extend_port_dict_binding(context, result)
            mech_context = driver_context.PortContext(self, context, result)
            self.mechanism_manager.create_port_precommit(mech_context)

        try:
            self.mechanism_manager.create_port_postcommit(mech_context)
        except ml2_exc.MechanismDriverError:
            with excutils.save_and_reraise_exception():
                LOG.error(_("mechanism_manager.create_port failed, "
                            "deleting port '%s'"), result['id'])
                self.delete_port(context, result['id'])
        self.notify_security_groups_member_updated(context, result)
        return result
 def get_nat_config(self, edge_id):
     try:
         return self.vcns.get_nat_config(edge_id)[1]
     except exceptions.VcnsApiException as e:
         with excutils.save_and_reraise_exception():
             LOG.exception(_("VCNS: Failed to get nat config:\n%s"),
                           e.response)
Example #22
0
    def _fetch_external_net_id(self, force=False):
        """Find UUID of single external network for this agent."""
        if self.conf.gateway_external_network_id:
            return self.conf.gateway_external_network_id

        # L3 agent doesn't use external_network_bridge to handle external
        # networks, so bridge_mappings with provider networks will be used
        # and the L3 agent is able to handle any external networks.
        if not self.conf.external_network_bridge:
            return

        if not force and self.target_ex_net_id:
            return self.target_ex_net_id

        try:
            self.target_ex_net_id = self.plugin_rpc.get_external_network_id(
                self.context)
            return self.target_ex_net_id
        except rpc_common.RemoteError as e:
            with excutils.save_and_reraise_exception() as ctx:
                if e.exc_type == 'TooManyExternalNetworks':
                    ctx.reraise = False
                    msg = _(
                        "The 'gateway_external_network_id' option must be "
                        "configured for this agent as Neutron has more than "
                        "one external network.")
                    raise Exception(msg)
Example #23
0
    def create_port(self, context, port):
        attrs = port['port']
        attrs['status'] = const.PORT_STATUS_DOWN

        session = context.session
        with session.begin(subtransactions=True):
            self._ensure_default_security_group_on_port(context, port)
            sgids = self._get_security_groups_on_port(context, port)
            dhcp_opts = port['port'].get(edo_ext.EXTRADHCPOPTS, [])
            result = super(Ml2Plugin, self).create_port(context, port)
            self._process_port_create_security_group(context, result, sgids)
            network = self.get_network(context, result['network_id'])
            mech_context = driver_context.PortContext(self, context, result,
                                                      network)
            self._process_port_binding(mech_context, attrs)
            result[addr_pair.ADDRESS_PAIRS] = (
                self._process_create_allowed_address_pairs(
                    context, result,
                    attrs.get(addr_pair.ADDRESS_PAIRS)))
            self._process_port_create_extra_dhcp_opts(context, result,
                                                      dhcp_opts)
            self.mechanism_manager.create_port_precommit(mech_context)

        try:
            self.mechanism_manager.create_port_postcommit(mech_context)
        except ml2_exc.MechanismDriverError:
            with excutils.save_and_reraise_exception():
                LOG.error(_("mechanism_manager.create_port_postcommit "
                            "failed, deleting port '%s'"), result['id'])
                self.delete_port(context, result['id'])
        self.notify_security_groups_member_updated(context, result)
        return result
Example #24
0
    def create_network(self, context, network):
        net_data = network['network']
        segments = self._process_provider_create(net_data)
        tenant_id = self._get_tenant_id_for_create(context, net_data)

        session = context.session
        with session.begin(subtransactions=True):
            self._ensure_default_security_group(context, tenant_id)
            result = super(Ml2Plugin, self).create_network(context, network)
            network_id = result['id']
            self._process_l3_create(context, result, net_data)
            # REVISIT(rkukura): Consider moving all segment management
            # to TypeManager.
            if segments:
                for segment in segments:
                    self.type_manager.reserve_provider_segment(session,
                                                               segment)
                    db.add_network_segment(session, network_id, segment)
            else:
                segment = self.type_manager.allocate_tenant_segment(session)
                db.add_network_segment(session, network_id, segment)
            self._extend_network_dict_provider(context, result)
            mech_context = driver_context.NetworkContext(self, context,
                                                         result)
            self.mechanism_manager.create_network_precommit(mech_context)

        try:
            self.mechanism_manager.create_network_postcommit(mech_context)
        except ml2_exc.MechanismDriverError:
            with excutils.save_and_reraise_exception():
                LOG.error(_("mechanism_manager.create_network_postcommit "
                            "failed, deleting network '%s'"), result['id'])
                self.delete_network(context, result['id'])
        return result
Example #25
0
    def remove_router_interface(self, context, router_id, interface_info):
        """Detach a subnet from a router."""
        tenant_id = context.tenant_id
        subnet_id = interface_info["subnet_id"]
        LOG.debug(
            "Detaching subnet %(subnet_id)s from "
            "router %(router_id)s" % {"subnet_id": subnet_id, "router_id": router_id}
        )

        # Get network for this subnet
        subnet = self.get_subnet(context, subnet_id)
        network_id = subnet["network_id"]
        network = self.get_network(context, network_id)

        contract = self.manager.create_tenant_contract(tenant_id)

        epg = self.manager.ensure_epg_created_for_network(tenant_id, network_id, network["name"])
        # Delete contract for this epg
        self.manager.delete_contract_for_epg(tenant_id, epg.epg_id, contract.contract_id, provider=epg.provider)

        try:
            return super(ApicL3ServicePlugin, self).remove_router_interface(context, router_id, interface_info)
        except Exception:
            LOG.error(
                _("Error detaching subnet %(subnet_id)s from " "router %(router_id)s")
                % {"subnet_id": subnet_id, "router_id": router_id}
            )
            with excutils.save_and_reraise_exception():
                self._add_epg_to_contract(tenant_id, epg, contract)
Example #26
0
    def add_router_interface(self, context, router_id, interface_info):
        """Attach a subnet to a router."""
        tenant_id = context.tenant_id
        subnet_id = interface_info["subnet_id"]
        LOG.debug(
            "Attaching subnet %(subnet_id)s to "
            "router %(router_id)s" % {"subnet_id": subnet_id, "router_id": router_id}
        )

        # Get network for this subnet
        subnet = self.get_subnet(context, subnet_id)
        network_id = subnet["network_id"]
        net_name = self.get_network(context, network_id)["name"]

        # Setup tenant filters and contracts
        contract = self.manager.create_tenant_contract(tenant_id)

        # Check for a provider EPG
        epg = self.manager.ensure_epg_created_for_network(tenant_id, network_id, net_name)
        self._add_epg_to_contract(tenant_id, epg, contract)

        # Create DB port
        try:
            return super(ApicL3ServicePlugin, self).add_router_interface(context, router_id, interface_info)
        except Exception:
            LOG.error(
                _("Error attaching subnet %(subnet_id)s to " "router %(router_id)s")
                % {"subnet_id": subnet_id, "router_id": router_id}
            )
            with excutils.save_and_reraise_exception():
                self.manager.delete_contract_for_epg(tenant_id, epg.epg_id, contract.contract_id, provider=epg.provider)
Example #27
0
def handle_port_metadata_access(plugin, context, port, is_delete=False):
    if is_user_port(port, check_dev_id=True):
        network_id = port["network_id"]
        network = plugin.get_network(context, network_id)
        if network[external_net.EXTERNAL]:
            LOG.info(_("Network %s is external: nothing to do"), network_id)
            return
        subnet_id = port["fixed_ips"][0]['subnet_id']
        host_data = {
            "instance_id": port["device_id"],
            "tenant_id": port["tenant_id"],
            "ip_address": port["fixed_ips"][0]['ip_address']
        }
        LOG.info(_("Configuring metadata entry for port %s"), port)
        if not is_delete:
            handler = plugin.lsn_manager.lsn_port_meta_host_add
        else:
            handler = plugin.lsn_manager.lsn_port_meta_host_remove
        try:
            handler(context, network_id, subnet_id, host_data)
        except p_exc.PortConfigurationError:
            with excutils.save_and_reraise_exception():
                if not is_delete:
                    db_base_plugin_v2.NeutronDbPluginV2.delete_port(
                        plugin, context, port['id'])
        LOG.info(_("Metadata for port %s configured successfully"), port['id'])
Example #28
0
    def connect(self, host, username, password):
        """Connect via SSH and initialize the NETCONF session."""

        # Use the persisted NETCONF connection
        if self.mgr and self.mgr.connected:
            return self.mgr

        # check if someone forgot to edit the conf file with real values
        if host == '':
            raise Exception(_("Brocade Switch IP address is not set, "
                              "check config ml2_conf_brocade.ini file"))

        # Open new NETCONF connection
        try:
            self.mgr = manager.connect(host=host, port=SSH_PORT,
                                       username=username, password=password,
                                       unknown_host_cb=nos_unknown_host_cb)

        except Exception:
            with excutils.save_and_reraise_exception():
                LOG.exception(_("Connect failed to switch"))

        LOG.debug(_("Connect success to host %(host)s:%(ssh_port)d"),
                  dict(host=host, ssh_port=SSH_PORT))
        return self.mgr
Example #29
0
    def sync_resources(self, collection_name, context):
        """Sync objects from Neutron over to OpenDaylight.

        This will handle syncing networks, subnets, and ports from Neutron to
        OpenDaylight. It also filters out the requisite items which are not
        valid for create API operations.
        """
        to_be_synced = []
        dbcontext = context._plugin_context
        obj_getter = getattr(context._plugin, 'get_%s' % collection_name)
        resources = obj_getter(dbcontext)
        for resource in resources:
            try:
                urlpath = collection_name + '/' + resource['id']
                self.sendjson('get', urlpath, None)
            except requests.exceptions.HTTPError as e:
                with excutils.save_and_reraise_exception() as ctx:
                    if e.response.status_code == requests.codes.not_found:
                        attr_filter = self.create_object_map[collection_name]
                        attr_filter(resource, context)
                        to_be_synced.append(resource)
                        ctx.reraise = False
        key = collection_name[:-1] if len(to_be_synced) == 1 else (
            collection_name)
        self.sendjson('post', collection_name, {key: to_be_synced})
Example #30
0
    def sync_single_resource(self, operation, object_type, context):
        """Sync over a single resource from Neutron to OpenDaylight.

        Handle syncing a single operation over to OpenDaylight, and correctly
        filter attributes out which are not required for the requisite
        operation (create or update) being handled.
        """
        try:
            obj_id = context.current['id']
            if operation == 'delete':
                self.sendjson('delete', object_type + '/' + obj_id, None)
            else:
                if operation == 'create':
                    urlpath = object_type
                    method = 'post'
                    attr_filter = self.create_object_map[object_type]
                elif operation == 'update':
                    urlpath = object_type + '/' + obj_id
                    method = 'put'
                    attr_filter = self.update_object_map[object_type]
                resource = context.current.copy()
                attr_filter(resource, context)
                self.sendjson(method, urlpath, {object_type[:-1]: resource})
        except Exception:
            with excutils.save_and_reraise_exception():
                self.out_of_sync = True
Example #31
0
def handle_router_metadata_access(plugin, context, router_id, interface=None):
    LOG.info(
        _("Handle metadata access via router: %(r)s and "
          "interface %(i)s") % {
              'r': router_id,
              'i': interface
          })
    if interface:
        try:
            plugin.get_port(context, interface['port_id'])
            is_enabled = True
        except n_exc.NotFound:
            is_enabled = False
        subnet_id = interface['subnet_id']
        try:
            plugin.lsn_manager.lsn_metadata_configure(context, subnet_id,
                                                      is_enabled)
        except p_exc.NsxPluginException:
            with excutils.save_and_reraise_exception():
                if is_enabled:
                    l3_db.L3_NAT_db_mixin.remove_router_interface(
                        plugin, context, router_id, interface)
    LOG.info(_("Metadata for router %s handled successfully"), router_id)
Example #32
0
    def set_contract_for_epg(self,
                             tenant_id,
                             epg_id,
                             contract_id,
                             provider=False):
        """Set the contract for an EPG.

        By default EPGs are consumers to a contract. Set provider flag
        for a single EPG to act as a contract provider.
        """
        if provider:
            try:
                self.apic.fvRsProv.create(tenant_id, AP_NAME, epg_id,
                                          contract_id)
                self.db.set_provider_contract(epg_id)
                self.make_tenant_contract_global(tenant_id)
            except (cexc.ApicResponseNotOk, KeyError):
                with excutils.save_and_reraise_exception():
                    self.make_tenant_contract_local(tenant_id)
                    self.apic.fvRsProv.delete(tenant_id, AP_NAME, epg_id,
                                              contract_id)
        else:
            self.apic.fvRsCons.create(tenant_id, AP_NAME, epg_id, contract_id)
Example #33
0
    def create_router(self, context, router):
        """ creates a vrf on NOS device."""
        LOG.debug("BrocadeSVIPlugin.create_router called: ")
        with context.session.begin(subtransactions=True):
            new_router = super(BrocadeSVIPlugin,
                               self).create_router(context, router)
            # Router on VDX
            try:
                switch = self._switch
                self._driver.create_router(switch['address'],
                                           switch['username'],
                                           switch['password'],
                                           switch['rbridge_id'],
                                           str(new_router['id']))
            except Exception:
                with excutils.save_and_reraise_exception():
                    with context.session.begin(subtransactions=True):
                        super(BrocadeSVIPlugin,
                              self).delete_router(context, new_router['id'])

        LOG.debug("BrocadeSVIPlugin.create_router: "
                  "router created on VDX switch")
        return new_router
Example #34
0
    def deserialize_body(self, request, action):
        try:
            content_type = request.best_match_content_type()
        except exception.InvalidContentType:
            LOG.debug(_("Unrecognized Content-Type provided in request"))
            return {}

        if content_type is None:
            LOG.debug(_("No Content-Type provided in request"))
            return {}

        if not len(request.body) > 0:
            LOG.debug(_("Empty body provided in request"))
            return {}

        try:
            deserializer = self.get_body_deserializer(content_type)
        except exception.InvalidContentType:
            with excutils.save_and_reraise_exception():
                LOG.debug(_("Unable to deserialize body as provided "
                            "Content-Type"))

        return deserializer.deserialize(request.body, action)
Example #35
0
 def lsn_port_get(self, context, network_id, subnet_id, raise_on_err=True):
     try:
         obj = lsn_db.lsn_port_get_for_subnet(context,
                                              subnet_id,
                                              raise_on_err=raise_on_err)
         return (obj.lsn_id, obj.lsn_port_id) if obj else (None, None)
     except p_exc.LsnPortNotFound:
         with excutils.save_and_reraise_exception() as ctxt:
             ctxt.reraise = False
             if self.sync_on_missing:
                 lsn_id, lsn_port_id = (super(
                     PersistentLsnManager,
                     self).lsn_port_get(context,
                                        network_id,
                                        subnet_id,
                                        raise_on_err=raise_on_err))
                 mac_addr = lsn_api.lsn_port_info_get(
                     self.cluster, lsn_id, lsn_port_id)['mac_address']
                 self.lsn_port_save(context, lsn_port_id, subnet_id,
                                    mac_addr, lsn_id)
                 return (lsn_id, lsn_port_id)
             if raise_on_err:
                 ctxt.reraise = True
    def create_servicechain_instance(self, context, servicechain_instance):
        session = context.session
        with session.begin(subtransactions=True):
            result = super(ServiceChainPlugin,
                           self).create_servicechain_instance(
                               context, servicechain_instance)
            sc_context = servicechain_context.ServiceChainInstanceContext(
                self, context, result)
            self.driver_manager.create_servicechain_instance_precommit(
                sc_context)

        try:
            self.driver_manager.create_servicechain_instance_postcommit(
                sc_context)
        except Exception:
            with excutils.save_and_reraise_exception():
                LOG.error(
                    _("driver_manager.create_servicechain_instance_postcommit "
                      "failed, deleting servicechain_instance %s"),
                    result['id'])
                self.delete_servicechain_instance(context, result['id'])

        return result
Example #37
0
    def create_port(self, context, port):

        network = {}

        network_id = port['port']['network_id']

        with context.session.begin(subtransactions=True):

            # Invoke the Neutron  API for creating port
            neutron_port = super(OneConvergencePluginV2,
                                 self).create_port(context, port)

            self._process_portbindings_create_and_update(context,
                                                         port['port'],
                                                         neutron_port)

            if port['port']['device_owner'] in ('network:router_gateway',
                                                'network:floatingip'):
                # for l3 requests, tenant_id will be None/''
                network = self._get_network(context, network_id)

                tenant_id = network['tenant_id']
            else:
                tenant_id = port['port']['tenant_id']

        port_id = neutron_port['id']

        try:
            self.nvsdlib.create_port(tenant_id, neutron_port)
        except nvsdexception.NVSDAPIException:
            with excutils.save_and_reraise_exception():
                LOG.error(_("Deleting newly created "
                          "neutron port %s"), port_id)
                super(OneConvergencePluginV2, self).delete_port(context,
                                                                port_id)

        return neutron_port
Example #38
0
    def deactivate_port(self, context, port, raise_exc=True):
        """Deactivate port by deleting port from OFC if exists."""
        if not self.ofc.exists_ofc_port(context, port['id']):
            LOG.debug(_("deactivate_port(): skip, ofc_port for port=%s "
                        "does not exist."), port['id'])
            return port

        try:
            self.ofc.delete_ofc_port(context, port['id'], port)
            self._update_resource_status_if_changed(
                context, "port", port, const.PORT_STATUS_DOWN)
            return port
        except (nexc.OFCResourceNotFound, nexc.OFCMappingNotFound):
            # There is a case where multiple delete_port operation are
            # running concurrently. For example, delete_port from
            # release_dhcp_port and deletion of network owned ports in
            # delete_network. In such cases delete_ofc_port may receive
            # 404 error from OFC.
            # Also there is a case where neutron port is deleted
            # between exists_ofc_port and get_ofc_id in delete_ofc_port.
            # In this case OFCMappingNotFound is raised.
            # These two cases are valid situations.
            LOG.info(_("deactivate_port(): OFC port for port=%s is "
                       "already removed."), port['id'])
            # The port is already removed, so there is no need
            # to update status in the database.
            port['status'] = const.PORT_STATUS_DOWN
            return port
        except nexc.OFCException as exc:
            with excutils.save_and_reraise_exception() as ctxt:
                LOG.error(_("Failed to delete port=%(port)s from OFC: "
                            "%(exc)s"), {'port': port['id'], 'exc': exc})
                self._update_resource_status_if_changed(
                    context, "port", port, const.PORT_STATUS_ERROR)
                if not raise_exc:
                    ctxt.reraise = False
                    return port
Example #39
0
    def connect(self, host, username, password):
        """Connect via SSH and initialize the NETCONF session."""

        # Use the persisted NETCONF connection
        if self.mgr and self.mgr.connected:
            return self.mgr

        # check if someone forgot to edit the conf file with real values
        if host == '':
            raise Exception(_("Brocade Switch IP address is not set, "
                              "check config ml2_conf_brocade.ini file"))

        # Open new NETCONF connection
        try:
            self.mgr = manager.connect(host=host, port=SSH_PORT,
                                       username=username, password=password,
                                       unknown_host_cb=nos_unknown_host_cb)
        except Exception:
            with excutils.save_and_reraise_exception():
                LOG.exception(_("Connect failed to switch"))

        LOG.debug(_("Connect success to host %(host)s:%(ssh_port)d"),
                  dict(host=host, ssh_port=SSH_PORT))
        return self.mgr
Example #40
0
    def sync_single_resource(self, operation, object_type, obj_id, context,
                             attr_filter_create, attr_filter_update):
        """Sync over a single resource from Neutron to OpenDaylight.

        Handle syncing a single operation over to OpenDaylight, and correctly
        filter attributes out which are not required for the requisite
        operation (create or update) being handled.
        """
        dbcontext = context._plugin_context
        if operation == 'create':
            urlpath = object_type
            method = 'post'
        else:
            urlpath = object_type + '/' + obj_id
            method = 'put'

        try:
            obj_getter = getattr(context._plugin, 'get_%s' % object_type[:-1])
            resource = obj_getter(dbcontext, obj_id)
        except not_found_exception_map[object_type]:
            LOG.debug(_('%(object_type)s not found (%(obj_id)s)'), {
                'object_type': object_type.capitalize(),
                'obj_id': obj_id
            })
        else:
            if operation == 'create':
                attr_filter_create(self, resource, context, dbcontext)
            elif operation == 'update':
                attr_filter_update(self, resource, context, dbcontext)
            try:
                # 400 errors are returned if an object exists, which we ignore.
                self.sendjson(method, urlpath, {object_type[:-1]: resource},
                              [400])
            except Exception:
                with excutils.save_and_reraise_exception():
                    self.out_of_sync = True
Example #41
0
    def __init__(self, conf, plugin_rpc, context):
        super(HaproxyNSDriver, self).__init__()
        self.conf = conf
        self.context = context

        self.ovs_plugin_rpc = OVSPluginApi(topics.PLUGIN)
        self.root_helper = config.get_root_helper(conf)
        #the config file,the pid file and the sock file will be stored under
        #the specified state_path.
        self.state_path = '/'.join(
            [self.conf.haproxy.loadbalancer_state_path, STATE_PATH_V2_APPEND])
        try:
            vif_driver = importutils.import_object(conf.interface_driver, conf)
        except ImportError:
            with excutils.save_and_reraise_exception():
                msg = (_('Error importing interface driver: %s') %
                       conf.interface_driver)
                LOG.error(msg)

        self.vif_driver = vif_driver
        self.plugin_rpc = plugin_rpc
        self.loadbalancer_to_port_id = {}

        self._setup_sg_rpc()
Example #42
0
 def _update_packet_filter(self, context, new_pf, pf_data):
     pf_id = new_pf['id']
     prev_status = new_pf['status']
     try:
         # If previous status is ERROR, try to sync all attributes.
         pf = new_pf if prev_status == pf_db.PF_STATUS_ERROR else pf_data
         self.ofc.update_ofc_packet_filter(context, pf_id, pf)
         new_status = pf_db.PF_STATUS_ACTIVE
         if new_status != prev_status:
             self._update_resource_status(context, "packet_filter",
                                          pf_id, new_status)
             new_pf['status'] = new_status
         return new_pf
     except Exception as exc:
         with excutils.save_and_reraise_exception():
             if (isinstance(exc, nexc.OFCException) or
                 isinstance(exc, nexc.OFCConsistencyBroken)):
                 LOG.error(_("Failed to create packet_filter id=%(id)s on "
                             "OFC: %(exc)s"),
                           {'id': pf_id, 'exc': exc})
             new_status = pf_db.PF_STATUS_ERROR
             if new_status != prev_status:
                 self._update_resource_status(context, "packet_filter",
                                              pf_id, new_status)
Example #43
0
    def sync_resources(self, resource_name, collection_name, resources,
                       context, dbcontext, attr_filter):
        """Sync objects from Neutron over to OpenDaylight.

        This will handle syncing networks, subnets, and ports from Neutron to
        OpenDaylight. It also filters out the requisite items which are not
        valid for create API operations.
        """
        to_be_synced = []
        for resource in resources:
            try:
                urlpath = collection_name + '/' + resource['id']
                self.sendjson('get', urlpath, None)
            except requests.exceptions.HTTPError as e:
                with excutils.save_and_reraise_exception() as ctx:
                    if e.response.status_code == 404:
                        attr_filter(resource, context, dbcontext)
                        to_be_synced.append(resource)
                        ctx.reraise = False

        key = resource_name if len(to_be_synced) == 1 else collection_name

        # 400 errors are returned if an object exists, which we ignore.
        self.sendjson('post', collection_name, {key: to_be_synced}, [400])
Example #44
0
    def create_network(self, context, network):

        tenant_id = self._get_tenant_id_for_create(context, network['network'])
        self._ensure_default_security_group(context, tenant_id)

        net = self.nvsdlib.create_network(network['network'])

        network['network']['id'] = net['id']

        with context.session.begin(subtransactions=True):
            try:
                neutron_net = super(OneConvergencePluginV2,
                                    self).create_network(context, network)

                #following call checks whether the network is external or not
                #and if it is external then adds this network to
                #externalnetworks table of neutron db
                self._process_l3_create(context, neutron_net,
                                        network['network'])
            except nvsdexception.NVSDAPIException:
                with excutils.save_and_reraise_exception():
                    self.nvsdlib.delete_network(net)

        return neutron_net
Example #45
0
    def create_router(self, context, router):
        net_partition = self._get_net_partition_for_router(context, router)
        neutron_router = super(NuagePlugin,
                               self).create_router(context, router)
        params = {
            'net_partition': net_partition,
            'tenant_id': neutron_router['tenant_id']
        }
        try:
            nuage_router = self.nuageclient.create_router(
                neutron_router, router['router'], params)
        except Exception:
            with excutils.save_and_reraise_exception():
                super(NuagePlugin, self).delete_router(context,
                                                       neutron_router['id'])

        if nuage_router:
            with context.session.begin(subtransactions=True):
                nuagedb.add_entrouter_mapping(context.session,
                                              net_partition['id'],
                                              neutron_router['id'],
                                              nuage_router['nuage_domain_id'])

        return neutron_router
    def create_vlan(self, nexus_host, vlanid, vlanname):
        """Create a VLAN on Nexus Switch given the VLAN ID and Name."""
        confstr = self.create_xml_snippet(
            snipp.CMD_VLAN_CONF_SNIPPET % (vlanid, vlanname))
        self._edit_config(nexus_host, target='running', config=confstr)

        # Enable VLAN active and no-shutdown states. Some versions of
        # Nexus switch do not allow state changes for the extended VLAN
        # range (1006-4094), but these errors can be ignored (default
        # values are appropriate).
        state_config = [snipp.CMD_VLAN_ACTIVE_SNIPPET,
                        snipp.CMD_VLAN_NO_SHUTDOWN_SNIPPET]
        for snippet in state_config:
            try:
                confstr = self.create_xml_snippet(snippet % vlanid)
                self._edit_config(
                    nexus_host,
                    target='running',
                    config=confstr,
                    allowed_exc_strs=["Can't modify state for extended",
                                      "Command is only allowed on VLAN"])
            except cexc.NexusConfigFailed:
                with excutils.save_and_reraise_exception():
                    self.delete_vlan(nexus_host, vlanid)
Example #47
0
 def call(*args, **kwargs):
     try:
         return func(*args, **kwargs)
     except Exception as e:
         with excutils.save_and_reraise_exception():
             self.logger(e)
Example #48
0
    def _get_socket(self, host, port, backlog):
        bind_addr = (host, port)
        # TODO(dims): eventlet's green dns/socket module does not actually
        # support IPv6 in getaddrinfo(). We need to get around this in the
        # future or monitor upstream for a fix
        try:
            info = socket.getaddrinfo(bind_addr[0], bind_addr[1],
                                      socket.AF_UNSPEC, socket.SOCK_STREAM)[0]
            family = info[0]
            bind_addr = info[-1]
        except Exception:
            LOG.exception(_("Unable to listen on %(host)s:%(port)s"), {
                'host': host,
                'port': port
            })
            sys.exit(1)

        if CONF.use_ssl:
            if not os.path.exists(CONF.ssl_cert_file):
                raise RuntimeError(
                    _("Unable to find ssl_cert_file "
                      ": %s") % CONF.ssl_cert_file)

            if not os.path.exists(CONF.ssl_key_file):
                raise RuntimeError(
                    _("Unable to find "
                      "ssl_key_file : %s") % CONF.ssl_key_file)

            # ssl_ca_file is optional
            if CONF.ssl_ca_file and not os.path.exists(CONF.ssl_ca_file):
                raise RuntimeError(
                    _("Unable to find ssl_ca_file "
                      ": %s") % CONF.ssl_ca_file)

        def wrap_ssl(sock):
            ssl_kwargs = {
                'server_side': True,
                'certfile': CONF.ssl_cert_file,
                'keyfile': CONF.ssl_key_file,
                'cert_reqs': ssl.CERT_NONE,
            }

            if CONF.ssl_ca_file:
                ssl_kwargs['ca_certs'] = CONF.ssl_ca_file
                ssl_kwargs['cert_reqs'] = ssl.CERT_REQUIRED

            return ssl.wrap_socket(sock, **ssl_kwargs)

        sock = None
        retry_until = time.time() + CONF.retry_until_window
        while not sock and time.time() < retry_until:
            try:
                sock = eventlet.listen(bind_addr,
                                       backlog=backlog,
                                       family=family)
                if CONF.use_ssl:
                    sock = wrap_ssl(sock)

            except socket.error as err:
                with excutils.save_and_reraise_exception() as ctxt:
                    if err.errno == errno.EADDRINUSE:
                        ctxt.reraise = False
                        eventlet.sleep(0.1)
        if not sock:
            raise RuntimeError(
                _("Could not bind to %(host)s:%(port)s "
                  "after trying for %(time)d seconds") % {
                      'host': host,
                      'port': port,
                      'time': CONF.retry_until_window
                  })
        sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
        # sockets can hang around forever without keepalive
        sock.setsockopt(socket.SOL_SOCKET, socket.SO_KEEPALIVE, 1)

        # This option isn't available in the OS X version of eventlet
        if hasattr(socket, 'TCP_KEEPIDLE'):
            sock.setsockopt(socket.IPPROTO_TCP, socket.TCP_KEEPIDLE,
                            CONF.tcp_keepidle)

        return sock
    def create_network(self, network, attachment):
        """Create or update a network when an attachment is changed.

        This method is not invoked at the usual plugin create_network() time.
        Instead, it is invoked on create/update port.

        :param network: Network on which the port operation is happening
        :param attachment: Details about the owner of the port

        Create a VLAN in the appropriate switch/port, and configure the
        appropriate interfaces for this VLAN.
        """
        LOG.debug(_("NexusPlugin:create_network() called"))
        # Grab the switch IP and port for this host
        host = str(attachment[const.HOST_NAME])
        switch_ip, port_id = self._client.get_switch_and_port_id(host)
        if not switch_ip and not port_id:
            raise cisco_exc.NexusComputeHostNotConfigured(host=host)

        vlan_id = network[const.NET_VLAN_ID]
        vlan_name = network[const.NET_VLAN_NAME]
        auto_create = True
        auto_trunk = True
        if cdb.is_provider_vlan(vlan_id):
            vlan_name = ''.join(
                [conf.CISCO.provider_vlan_name_prefix,
                 str(vlan_id)])
            auto_create = conf.CISCO.provider_vlan_auto_create
            auto_trunk = conf.CISCO.provider_vlan_auto_trunk

        # Check if this network is already in the DB
        vlan_created = False
        vlan_trunked = False

        try:
            nxos_db.get_port_vlan_switch_binding(port_id, vlan_id, switch_ip)
        except cisco_exc.NexusPortBindingNotFound:
            # Check for vlan/switch binding
            try:
                nxos_db.get_nexusvlan_binding(vlan_id, switch_ip)
            except cisco_exc.NexusPortBindingNotFound:
                if auto_create and auto_trunk:
                    # Create vlan and trunk vlan on the port
                    LOG.debug("Nexus: create & trunk vlan %s" % vlan_name)
                    self._client.create_and_trunk_vlan(switch_ip, vlan_id,
                                                       vlan_name, port_id)
                    vlan_created = True
                    vlan_trunked = True
                elif auto_create:
                    # Create vlan but do not trunk it on the port
                    LOG.debug("Nexus: create vlan %s" % vlan_name)
                    self._client.create_vlan(switch_ip, vlan_id, vlan_name)
                    vlan_created = True
            else:
                if auto_trunk:
                    # Only trunk vlan on the port
                    LOG.debug("Nexus: trunk vlan %s" % vlan_name)
                    self._client.enable_vlan_on_trunk_int(
                        switch_ip, vlan_id, port_id)
                    vlan_trunked = True

        try:
            instance = attachment[const.INSTANCE_ID]
            nxos_db.add_nexusport_binding(port_id, str(vlan_id), switch_ip,
                                          instance)
        except Exception:
            with excutils.save_and_reraise_exception():
                # Add binding failed, roll back any vlan creation/enabling
                if vlan_created and vlan_trunked:
                    LOG.debug("Nexus: delete & untrunk vlan %s" % vlan_name)
                    self._client.delete_and_untrunk_vlan(
                        switch_ip, vlan_id, port_id)
                elif vlan_created:
                    LOG.debug("Nexus: delete vlan %s" % vlan_name)
                    self._client.delete_vlan(switch_ip, vlan_id)
                elif vlan_trunked:
                    LOG.debug("Nexus: untrunk vlan %s" % vlan_name)
                    self._client.disable_vlan_on_trunk_int(
                        switch_ip, vlan_id, port_id)

        net_id = network[const.NET_ID]
        new_net_dict = {
            const.NET_ID: net_id,
            const.NET_NAME: network[const.NET_NAME],
            const.NET_PORTS: {},
            const.NET_VLAN_NAME: vlan_name,
            const.NET_VLAN_ID: vlan_id
        }
        self._networks[net_id] = new_net_dict
        return new_net_dict
 def _get_edges(self):
     try:
         return self.vcns.get_edges()[1]
     except exceptions.VcnsApiException as e:
         with excutils.save_and_reraise_exception():
             LOG.exception(_("VCNS: Failed to get edges:\n%s"), e.response)
Example #51
0
    def add_router_interface(self, context, router_id, interface_info):
        session = context.session
        with session.begin(subtransactions=True):
            rtr_if_info = super(NuagePlugin, self).add_router_interface(
                context, router_id, interface_info)
            subnet_id = rtr_if_info['subnet_id']
            subn = self.get_subnet(context, subnet_id)

            rtr_zone_mapping = nuagedb.get_rtr_zone_mapping(session, router_id)
            ent_rtr_mapping = nuagedb.get_ent_rtr_mapping_by_rtrid(
                session, router_id)
            subnet_l2dom = nuagedb.get_subnet_l2dom_by_id(session, subnet_id)
            if not rtr_zone_mapping or not ent_rtr_mapping:
                super(NuagePlugin,
                      self).remove_router_interface(context, router_id,
                                                    interface_info)
                msg = (_("Router %s does not hold default zone OR "
                         "net_partition mapping. Router-IF add failed") %
                       router_id)
                raise q_exc.BadRequest(resource='router', msg=msg)

            if not subnet_l2dom:
                super(NuagePlugin,
                      self).remove_router_interface(context, router_id,
                                                    interface_info)
                msg = (_("Subnet %s does not hold Nuage VSD reference. "
                         "Router-IF add failed") % subnet_id)
                raise q_exc.BadRequest(resource='subnet', msg=msg)

            if (subnet_l2dom['net_partition_id'] !=
                    ent_rtr_mapping['net_partition_id']):
                super(NuagePlugin,
                      self).remove_router_interface(context, router_id,
                                                    interface_info)
                msg = (_("Subnet %(subnet)s and Router %(router)s belong to "
                         "different net_partition Router-IF add "
                         "not permitted") % {
                             'subnet': subnet_id,
                             'router': router_id
                         })
                raise q_exc.BadRequest(resource='subnet', msg=msg)
            nuage_subnet_id = subnet_l2dom['nuage_subnet_id']
            nuage_l2dom_tmplt_id = subnet_l2dom['nuage_l2dom_tmplt_id']
            if self.nuageclient.vms_on_l2domain(nuage_subnet_id):
                super(NuagePlugin,
                      self).remove_router_interface(context, router_id,
                                                    interface_info)
                msg = (_("Subnet %s has one or more active VMs "
                         "Router-IF add not permitted") % subnet_id)
                raise q_exc.BadRequest(resource='subnet', msg=msg)
            self.nuageclient.delete_subnet(nuage_subnet_id,
                                           nuage_l2dom_tmplt_id)
            net = netaddr.IPNetwork(subn['cidr'])
            params = {'net': net, 'zone_id': rtr_zone_mapping['nuage_zone_id']}
            if not attributes.is_attr_set(subn['gateway_ip']):
                subn['gateway_ip'] = str(netaddr.IPAddress(net.first + 1))
            try:
                nuage_subnet = self.nuageclient.create_domain_subnet(
                    subn, params)
            except Exception:
                with excutils.save_and_reraise_exception():
                    super(NuagePlugin, self).remove_router_interface(
                        context, router_id, interface_info)
            if nuage_subnet:
                ns_dict = {}
                ns_dict['nuage_subnet_id'] = nuage_subnet['nuage_subnetid']
                ns_dict['nuage_l2dom_tmplt_id'] = None
                nuagedb.update_subnetl2dom_mapping(subnet_l2dom, ns_dict)
        return rtr_if_info
Example #52
0
    def _issue_request(self):
        '''Issue a request to a provider.'''
        conn = (self._client_conn or self._api_client.acquire_connection(
            True, copy.copy(self._headers), rid=self._rid()))
        if conn is None:
            error = Exception(_("No API connections available"))
            self._request_error = error
            return error

        url = self._url
        LOG.debug(_("[%(rid)d] Issuing - request %(conn)s"), {
            'rid': self._rid(),
            'conn': self._request_str(conn, url)
        })
        issued_time = time.time()
        is_conn_error = False
        is_conn_service_unavail = False
        response = None
        try:
            redirects = 0
            while (redirects <= self._redirects):
                # Update connection with user specified request timeout,
                # the connect timeout is usually smaller so we only set
                # the request timeout after a connection is established
                if conn.sock is None:
                    conn.connect()
                    conn.sock.settimeout(self._http_timeout)
                elif conn.sock.gettimeout() != self._http_timeout:
                    conn.sock.settimeout(self._http_timeout)

                headers = copy.copy(self._headers)
                cookie = self._api_client.auth_cookie(conn)
                if cookie:
                    headers["Cookie"] = cookie

                gen = self._api_client.config_gen
                if gen:
                    headers["X-Nvp-Wait-For-Config-Generation"] = gen
                    LOG.debug(
                        _("Setting X-Nvp-Wait-For-Config-Generation "
                          "request header: '%s'"), gen)
                try:
                    conn.request(self._method, url, self._body, headers)
                except Exception as e:
                    with excutils.save_and_reraise_exception():
                        LOG.warn(
                            _("[%(rid)d] Exception issuing request: "
                              "%(e)s"), {
                                  'rid': self._rid(),
                                  'e': e
                              })

                response = conn.getresponse()
                response.body = response.read()
                response.headers = response.getheaders()
                elapsed_time = time.time() - issued_time
                LOG.debug(
                    _("[%(rid)d] Completed request '%(conn)s': "
                      "%(status)s (%(elapsed)s seconds)"), {
                          'rid': self._rid(),
                          'conn': self._request_str(conn, url),
                          'status': response.status,
                          'elapsed': elapsed_time
                      })

                new_gen = response.getheader('X-Nvp-Config-Generation', None)
                if new_gen:
                    LOG.debug(
                        _("Reading X-Nvp-config-Generation response "
                          "header: '%s'"), new_gen)
                    if (self._api_client.config_gen is None
                            or self._api_client.config_gen < int(new_gen)):
                        self._api_client.config_gen = int(new_gen)

                if response.status == httplib.UNAUTHORIZED:

                    if cookie is None and self._url != "/ws.v1/login":
                        # The connection still has no valid cookie despite
                        # attemps to authenticate and the request has failed
                        # with unauthorized status code. If this isn't a
                        # a request to authenticate, we should abort the
                        # request since there is no point in retrying.
                        self._abort = True
                    else:
                        # If request is unauthorized, clear the session cookie
                        # for the current provider so that subsequent requests
                        # to the same provider triggers re-authentication.
                        self._api_client.set_auth_cookie(conn, None)

                    self._api_client.set_auth_cookie(conn, None)
                elif response.status == httplib.SERVICE_UNAVAILABLE:
                    is_conn_service_unavail = True

                if response.status not in [
                        httplib.MOVED_PERMANENTLY, httplib.TEMPORARY_REDIRECT
                ]:
                    break
                elif redirects >= self._redirects:
                    LOG.info(
                        _("[%d] Maximum redirects exceeded, aborting "
                          "request"), self._rid())
                    break
                redirects += 1

                conn, url = self._redirect_params(conn, response.headers,
                                                  self._client_conn is None)
                if url is None:
                    response.status = httplib.INTERNAL_SERVER_ERROR
                    break
                LOG.info(_("[%(rid)d] Redirecting request to: %(conn)s"), {
                    'rid': self._rid(),
                    'conn': self._request_str(conn, url)
                })
                # yield here, just in case we are not out of the loop yet
                eventlet.greenthread.sleep(0)
            # If we receive any of these responses, then
            # our server did not process our request and may be in an
            # errored state. Raise an exception, which will cause the
            # the conn to be released with is_conn_error == True
            # which puts the conn on the back of the client's priority
            # queue.
            if (response.status == httplib.INTERNAL_SERVER_ERROR
                    and response.status > httplib.NOT_IMPLEMENTED):
                LOG.warn(
                    _("[%(rid)d] Request '%(method)s %(url)s' "
                      "received: %(status)s"), {
                          'rid': self._rid(),
                          'method': self._method,
                          'url': self._url,
                          'status': response.status
                      })
                raise Exception(_('Server error return: %s'), response.status)
            return response
        except Exception as e:
            if isinstance(e, httplib.BadStatusLine):
                msg = (_("Invalid server response"))
            else:
                msg = unicode(e)
            if response is None:
                elapsed_time = time.time() - issued_time
            LOG.warn(
                _("[%(rid)d] Failed request '%(conn)s': '%(msg)s' "
                  "(%(elapsed)s seconds)"), {
                      'rid': self._rid(),
                      'conn': self._request_str(conn, url),
                      'msg': msg,
                      'elapsed': elapsed_time
                  })
            self._request_error = e
            is_conn_error = True
            return e
        finally:
            # Make sure we release the original connection provided by the
            # acquire_connection() call above.
            if self._client_conn is None:
                self._api_client.release_connection(conn,
                                                    is_conn_error,
                                                    is_conn_service_unavail,
                                                    rid=self._rid())
Example #53
0
 def get_driver(self, resource_id):
     try:
         return self._drivers[resource_id]
     except KeyError:
         with excutils.save_and_reraise_exception(reraise=False):
             raise cfg_exceptions.DriverNotFound(id=resource_id)
Example #54
0
    def rest_call(self,
                  action,
                  resource,
                  data='',
                  headers=None,
                  timeout=False,
                  reconnect=False,
                  hash_handler=None):
        uri = self.base_uri + resource
        body = jsonutils.dumps(data)
        headers = headers or {}
        headers['Content-type'] = 'application/json'
        headers['Accept'] = 'application/json'
        headers['NeutronProxy-Agent'] = self.name
        headers['Instance-ID'] = self.neutron_id
        headers['Orchestration-Service-ID'] = ORCHESTRATION_SERVICE_ID
        if hash_handler:
            # this will be excluded on calls that don't need hashes
            # (e.g. topology sync, capability checks)
            headers[HASH_MATCH_HEADER] = hash_handler.read_for_update()
        else:
            hash_handler = cdb.HashHandler()
        if 'keep-alive' in self.capabilities:
            headers['Connection'] = 'keep-alive'
        else:
            reconnect = True
        if self.auth:
            headers['Authorization'] = self.auth

        LOG.debug(
            _("ServerProxy: server=%(server)s, port=%(port)d, "
              "ssl=%(ssl)r"), {
                  'server': self.server,
                  'port': self.port,
                  'ssl': self.ssl
              })
        LOG.debug(
            _("ServerProxy: resource=%(resource)s, data=%(data)r, "
              "headers=%(headers)r, action=%(action)s"), {
                  'resource': resource,
                  'data': data,
                  'headers': headers,
                  'action': action
              })

        # unspecified timeout is False because a timeout can be specified as
        # None to indicate no timeout.
        if timeout is False:
            timeout = self.timeout

        if timeout != self.timeout:
            # need a new connection if timeout has changed
            reconnect = True

        if not self.currentconn or reconnect:
            if self.currentconn:
                self.currentconn.close()
            if self.ssl:
                self.currentconn = HTTPSConnectionWithValidation(
                    self.server, self.port, timeout=timeout)
                if self.currentconn is None:
                    LOG.error(
                        _('ServerProxy: Could not establish HTTPS '
                          'connection'))
                    return 0, None, None, None
                self.currentconn.combined_cert = self.combined_cert
            else:
                self.currentconn = httplib.HTTPConnection(self.server,
                                                          self.port,
                                                          timeout=timeout)
                if self.currentconn is None:
                    LOG.error(
                        _('ServerProxy: Could not establish HTTP '
                          'connection'))
                    return 0, None, None, None

        try:
            self.currentconn.request(action, uri, body, headers)
            response = self.currentconn.getresponse()
            respstr = response.read()
            respdata = respstr
            if response.status in self.success_codes:
                hash_value = response.getheader(HASH_MATCH_HEADER)
                # don't clear hash from DB if a hash header wasn't present
                if hash_value is not None:
                    hash_handler.put_hash(hash_value)
                else:
                    hash_handler.clear_lock()
                try:
                    respdata = jsonutils.loads(respstr)
                except ValueError:
                    # response was not JSON, ignore the exception
                    pass
            else:
                # release lock so others don't have to wait for timeout
                hash_handler.clear_lock()

            ret = (response.status, response.reason, respstr, respdata)
        except httplib.HTTPException:
            # If we were using a cached connection, try again with a new one.
            with excutils.save_and_reraise_exception() as ctxt:
                self.currentconn.close()
                if reconnect:
                    # if reconnect is true, this was on a fresh connection so
                    # reraise since this server seems to be broken
                    ctxt.reraise = True
                else:
                    # if reconnect is false, it was a cached connection so
                    # try one more time before re-raising
                    ctxt.reraise = False
            return self.rest_call(action,
                                  resource,
                                  data,
                                  headers,
                                  timeout=timeout,
                                  reconnect=True)
        except (socket.timeout, socket.error) as e:
            self.currentconn.close()
            LOG.error(_('ServerProxy: %(action)s failure, %(e)r'), {
                'action': action,
                'e': e
            })
            ret = 0, None, None, None
        LOG.debug(
            _("ServerProxy: status=%(status)d, reason=%(reason)r, "
              "ret=%(ret)s, data=%(data)r"), {
                  'status': ret[0],
                  'reason': ret[1],
                  'ret': ret[2],
                  'data': ret[3]
              })
        return ret
Example #55
0
    def create_vip(self, context, vip):
        v = vip['vip']
        tenant_id = self._get_tenant_id_for_create(context, v)

        with context.session.begin(subtransactions=True):
            if v['pool_id']:
                pool = self._get_resource(context, Pool, v['pool_id'])
                # validate that the pool has same tenant
                if pool['tenant_id'] != tenant_id:
                    raise n_exc.NotAuthorized()
                # validate that the pool has same protocol
                if pool['protocol'] != v['protocol']:
                    raise loadbalancer.ProtocolMismatch(
                        vip_proto=v['protocol'], pool_proto=pool['protocol'])
                if pool['status'] == constants.PENDING_DELETE:
                    raise loadbalancer.StateInvalid(state=pool['status'],
                                                    id=pool['id'])
            vip_db = Vip(id=uuidutils.generate_uuid(),
                         tenant_id=tenant_id,
                         name=v['name'],
                         description=v['description'],
                         port_id=None,
                         protocol_port=v['protocol_port'],
                         protocol=v['protocol'],
                         pool_id=v['pool_id'],
                         connection_limit=v['connection_limit'],
                         admin_state_up=v['admin_state_up'],
                         status=constants.PENDING_CREATE)

            session_info = v['session_persistence']

            if session_info:
                s_p = self._create_session_persistence_db(
                    session_info, vip_db['id'])
                vip_db.session_persistence = s_p

            try:
                context.session.add(vip_db)
                context.session.flush()
            except exception.DBDuplicateEntry:
                raise loadbalancer.VipExists(pool_id=v['pool_id'])
        if cfg.CONF.lbaas_vip_create_port:
            try:
                # create a port to reserve address for IPAM
                # do it outside the transaction to avoid rpc calls
                self._create_port_for_vip(context, vip_db, v['subnet_id'],
                                          v.get('address'))
            except Exception:
                # catch any kind of exceptions
                with excutils.save_and_reraise_exception():
                    context.session.delete(vip_db)
                    context.session.flush()
        else:
            try:
                if v.get('address') == attributes.ATTR_NOT_SPECIFIED:
                    raise loadbalancer.VipAddressNotAssigned()

                filters = {
                    'fixed_ips': {
                        'subnet_id': [v['subnet_id']],
                        'ip_address': [v.get('address')]
                    }
                }
                vip_ports = self._core_plugin.get_ports(context,
                                                        filters=filters)
                if not vip_ports:
                    raise loadbalancer.VipPortNotFound(
                        address=v.get('address'), subnet_id=v['subnet_id'])
                vip_port = vip_ports[0]

                filters = {
                    'port_id': [vip_port['id']],
                    'protocol_port': [v['protocol_port']]
                }
                vips = self.get_vips(context, filters=filters)
                if vips:
                    raise loadbalancer.VipProtocolPortInUse(
                        address=v.get('address'),
                        protocol_port=v['protocol_port'])
                vip_db.port_id = vip_port['id']
                # explicitly sync session with db
                context.session.flush()
            except Exception:
                # catch any kind of exceptions
                with excutils.save_and_reraise_exception():
                    context.session.delete(vip_db)
                    context.session.flush()

        if v['pool_id']:
            # fetching pool again
            pool = self._get_resource(context, Pool, v['pool_id'])
            # (NOTE): we rely on the fact that pool didn't change between
            # above block and here
            vip_db['pool_id'] = v['pool_id']
            pool['vip_id'] = vip_db['id']
            # explicitly flush changes as we're outside any transaction
            context.session.flush()

        return self._make_vip_dict(vip_db)
Example #56
0
    def _issue_request(self):
        '''Issue a request to a provider.'''
        conn = (self._client_conn or self._api_client.acquire_connection(
            True, copy.copy(self._headers), rid=self._rid()))
        if conn is None:
            error = Exception(_("No API connections available"))
            self._request_error = error
            return error

        url = self._url
        LOG.debug(
            _("[%(rid)d] Issuing - request url: %(conn)s, "
              "body: %(body)s"), {
                  'rid': self._rid(),
                  'conn': self._request_str(conn, url),
                  'body': self._body
              })
        issued_time = time.time()
        is_conn_error = False
        is_conn_service_unavail = False
        response = None
        try:
            redirects = 0
            while (redirects <= self._redirects):
                # Update connection with user specified request timeout,
                # the connect timeout is usually smaller so we only set
                # the request timeout after a connection is established
                if conn.sock is None:
                    conn.connect()
                    conn.sock.settimeout(self._http_timeout)
                elif conn.sock.gettimeout() != self._http_timeout:
                    conn.sock.settimeout(self._http_timeout)
                headers = copy.copy(self._headers)
                if templates.RELOGIN in url:
                    url = json.loads(templates.LOGIN)['path']
                    conn.connect()
                    self._api_client._wait_for_login(conn, headers)
                    url = self._url

                cookie = self._api_client.auth_cookie(conn)

                if self._url != json.loads(templates.LOGIN)['path'] and cookie:
                    headers['Cookie'] = cookie['Cookie']
                    headers['X-CSRFTOKEN'] = cookie['X-CSRFTOKEN']

                try:
                    if self._body:
                        if self._url == json.loads(templates.LOGIN)['path']:
                            body = urllib.urlencode(self._body)
                        else:
                            body = json.dumps(self._body)
                    else:
                        body = None
                    LOG.warn(
                        _("Issuing request: "
                          "self._method = [%(method)s], "
                          "url=%(url)s, body=%(body)s, "
                          "headers=%(headers)s"), {
                              'method': self._method,
                              "url": url,
                              "body": body,
                              "headers": headers
                          })

                    conn.request(self._method, url, body, headers)
                except Exception as e:
                    with excutils.save_and_reraise_exception():
                        LOG.warn(
                            _("[%(rid)d] Exception issuing request: "
                              "%(e)s"), {
                                  'rid': self._rid(),
                                  'e': e
                              })

                response = conn.getresponse()
                response.body = response.read()
                response.headers = response.getheaders()
                elapsed_time = time.time() - issued_time
                LOG.debug(
                    _("@@@@@@ [ _issue_request ] [%(rid)d] "
                      "Completed request '%(conn)s': "
                      "%(status)s (%(elapsed)s seconds), "
                      "response.headers %(response.headers)s"
                      "response.body %(response.body)s"), {
                          'rid': self._rid(),
                          'conn': self._request_str(conn, url),
                          'status': response.status,
                          'elapsed': elapsed_time,
                          'response.headers': response.headers,
                          'response.body': response.body
                      })

                if response.status in (httplib.UNAUTHORIZED, httplib.FOUND):
                    if cookie is None and \
                       self._url != json.loads(templates.LOGIN)['path']:
                        # The connection still has no valid cookie despite
                        # attempts to authenticate and the request has failed
                        # with unauthorized status code. If this isn't a
                        # a request to authenticate, we should abort the
                        # request since there is no point in retrying.
                        self._abort = True
                    LOG.debug("self._api_client=%s" % self._api_client)
                    # If request is unauthorized, clear the session cookie
                    # for the current provider so that subsequent requests
                    # to the same provider triggers re-authentication.
                    self._api_client.set_auth_cookie(conn, None)
                elif response.status == httplib.SERVICE_UNAVAILABLE:
                    is_conn_service_unavail = True

                if response.status not in [
                        httplib.MOVED_PERMANENTLY, httplib.FOUND,
                        httplib.TEMPORARY_REDIRECT
                ]:
                    break
                elif redirects >= self._redirects:
                    LOG.info(
                        _("[%d] Maximum redirects exceeded, aborting "
                          "request"), self._rid())
                    break
                redirects += 1
                conn, url = self._redirect_params(conn, response.headers,
                                                  self._client_conn is None)
                if url is None:
                    response.status = httplib.INTERNAL_SERVER_ERROR
                    break
                LOG.info(_("[%(rid)d] Redirecting request to: %(conn)s"), {
                    'rid': self._rid(),
                    'conn': self._request_str(conn, url)
                })
                # yield here, just in case we are not out of the loop yet
                eventlet.greenthread.sleep(0)
            # If we receive any of these responses, then
            # our server did not process our request and may be in an
            # errored state. Raise an exception, which will cause the
            # the conn to be released with is_conn_error == True
            # which puts the conn on the back of the client's priority
            # queue.
            if (response.status == httplib.INTERNAL_SERVER_ERROR
                    and response.status > httplib.NOT_IMPLEMENTED):
                LOG.warn(
                    _("[%(rid)d] Request '%(method)s %(url)s' "
                      "received: %(status)s"), {
                          'rid': self._rid(),
                          'method': self._method,
                          'url': self._url,
                          'status': response.status
                      })
                raise Exception(_('Server error return: %s'), response.status)
            return response
        except Exception as e:
            if isinstance(e, httplib.BadStatusLine):
                msg = (_("Invalid server response"))
            else:
                msg = unicode(e)
            if response is None:
                elapsed_time = time.time() - issued_time
            LOG.warn(
                _("[%(rid)d] Failed request '%(conn)s': '%(msg)s' "
                  "(%(elapsed)s seconds)"), {
                      'rid': self._rid(),
                      'conn': self._request_str(conn, url),
                      'msg': msg,
                      'elapsed': elapsed_time
                  })
            self._request_error = e
            is_conn_error = True
            return e
        finally:
            # Make sure we release the original connection provided by the
            # acquire_connection() call above.
            if self._client_conn is None:
                self._api_client.release_connection(conn,
                                                    is_conn_error,
                                                    is_conn_service_unavail,
                                                    rid=self._rid())
    def update(self, request, id, body=None, **kwargs):
        """Updates the specified entity's attributes."""
        parent_id = kwargs.get(self._parent_id_name)
        try:
            payload = body.copy()
        except AttributeError:
            msg = _("Invalid format: %s") % request.body
            raise exceptions.BadRequest(resource='body', msg=msg)
        payload['id'] = id
        self._notifier.info(request.context, self._resource + '.update.start',
                            payload)
        body = Controller.prepare_request_body(request.context,
                                               body,
                                               False,
                                               self._resource,
                                               self._attr_info,
                                               allow_bulk=self._allow_bulk)
        action = self._plugin_handlers[self.UPDATE]
        # Load object to check authz
        # but pass only attributes in the original body and required
        # by the policy engine to the policy 'brain'
        field_list = [
            name for (name, value) in self._attr_info.iteritems()
            if (value.get('required_by_policy') or value.get('primary_key')
                or 'default' not in value)
        ]
        # Ensure policy engine is initialized
        policy.init()
        orig_obj = self._item(request,
                              id,
                              field_list=field_list,
                              parent_id=parent_id)
        orig_object_copy = copy.copy(orig_obj)
        orig_obj.update(body[self._resource])
        # Make a list of attributes to be updated to inform the policy engine
        # which attributes are set explicitly so that it can distinguish them
        # from the ones that are set to their default values.
        orig_obj[const.ATTRIBUTES_TO_UPDATE] = body[self._resource].keys()
        try:
            policy.enforce(request.context, action, orig_obj)
        except exceptions.PolicyNotAuthorized:
            with excutils.save_and_reraise_exception() as ctxt:
                # If a tenant is modifying it's own object, it's safe to return
                # a 403. Otherwise, pretend that it doesn't exist to avoid
                # giving away information.
                if request.context.tenant_id != orig_obj['tenant_id']:
                    ctxt.reraise = False
            msg = _('The resource could not be found.')
            raise webob.exc.HTTPNotFound(msg)

        obj_updater = getattr(self._plugin, action)
        kwargs = {self._resource: body}
        if parent_id:
            kwargs[self._parent_id_name] = parent_id
        obj = obj_updater(request.context, id, **kwargs)
        result = {self._resource: self._view(request.context, obj)}
        notifier_method = self._resource + '.update.end'
        self._notifier.info(request.context, notifier_method, result)
        self._send_dhcp_notification(request.context, result, notifier_method)
        self._send_nova_notification(action, orig_object_copy, result)
        return result
Example #58
0
    def delete_network(self, context, id):
        # REVISIT(rkukura) The super(Ml2Plugin, self).delete_network()
        # function is not used because it auto-deletes ports and
        # subnets from the DB without invoking the derived class's
        # delete_port() or delete_subnet(), preventing mechanism
        # drivers from being called. This approach should be revisited
        # when the API layer is reworked during icehouse.

        LOG.debug(_("Deleting network %s"), id)
        session = context.session
        while True:
            try:
                with session.begin(subtransactions=True):
                    # Get ports to auto-delete.
                    ports = (session.query(
                        models_v2.Port).enable_eagerloads(False).filter_by(
                            network_id=id).with_lockmode('update').all())
                    LOG.debug(_("Ports to auto-delete: %s"), ports)
                    only_auto_del = all(
                        p.device_owner in
                        db_base_plugin_v2.AUTO_DELETE_PORT_OWNERS
                        for p in ports)
                    if not only_auto_del:
                        LOG.debug(_("Tenant-owned ports exist"))
                        raise exc.NetworkInUse(net_id=id)

                    # Get subnets to auto-delete.
                    subnets = (session.query(
                        models_v2.Subnet).enable_eagerloads(False).filter_by(
                            network_id=id).with_lockmode('update').all())
                    LOG.debug(_("Subnets to auto-delete: %s"), subnets)

                    if not (ports or subnets):
                        network = self.get_network(context, id)
                        mech_context = driver_context.NetworkContext(
                            self, context, network)
                        self.mechanism_manager.delete_network_precommit(
                            mech_context)

                        record = self._get_network(context, id)
                        LOG.debug(_("Deleting network record %s"), record)
                        session.delete(record)

                        for segment in mech_context.network_segments:
                            self.type_manager.release_segment(session, segment)

                        # The segment records are deleted via cascade from the
                        # network record, so explicit removal is not necessary.
                        LOG.debug(_("Committing transaction"))
                        break
            except os_db.exception.DBError as e:
                with excutils.save_and_reraise_exception() as ctxt:
                    if isinstance(e.inner_exception, sql_exc.IntegrityError):
                        ctxt.reraise = False
                        msg = _("A concurrent port creation has occurred")
                        LOG.warning(msg)
                        continue

            for port in ports:
                try:
                    self.delete_port(context, port.id)
                except Exception:
                    with excutils.save_and_reraise_exception():
                        LOG.exception(_("Exception auto-deleting port %s"),
                                      port.id)

            for subnet in subnets:
                try:
                    self.delete_subnet(context, subnet.id)
                except Exception:
                    with excutils.save_and_reraise_exception():
                        LOG.exception(_("Exception auto-deleting subnet %s"),
                                      subnet.id)

        try:
            self.mechanism_manager.delete_network_postcommit(mech_context)
        except ml2_exc.MechanismDriverError:
            # TODO(apech) - One or more mechanism driver failed to
            # delete the network.  Ideally we'd notify the caller of
            # the fact that an error occurred.
            LOG.error(_("mechanism_manager.delete_network_postcommit failed"))
        self.notifier.network_delete(context, id)
Example #59
0
    def remove_router_interface(self, context, router_id, interface_info):
        LOG.debug(
            _("Remove router interface in progress: "
              "router_id=%(router_id)s "
              "interface_info=%(interface_info)r"), {
                  'router_id': router_id,
                  'interface_info': interface_info
              })

        subnet_id = interface_info.get('subnet_id')
        port_id = interface_info.get('port_id')
        if not subnet_id:
            if not port_id:
                raise sdnve_exc.BadInputException(msg=_('No port ID'))
            myport = super(SdnvePluginV2, self).get_port(context, port_id)
            LOG.debug(_("SdnvePluginV2.remove_router_interface port: %s"),
                      myport)
            myfixed_ips = myport.get('fixed_ips')
            if not myfixed_ips:
                raise sdnve_exc.BadInputException(msg=_('No fixed IP'))
            subnet_id = myfixed_ips[0].get('subnet_id')
            if subnet_id:
                interface_info['subnet_id'] = subnet_id
                LOG.debug(
                    _("SdnvePluginV2.remove_router_interface subnet_id: %s"),
                    subnet_id)
        else:
            if not port_id:
                # The backend requires port id info in the request
                subnet = super(SdnvePluginV2,
                               self).get_subnet(context, subnet_id)
                df = {
                    'device_id': [router_id],
                    'device_owner': [n_const.DEVICE_OWNER_ROUTER_INTF],
                    'network_id': [subnet['network_id']]
                }
                ports = self.get_ports(context, filters=df)
                if ports:
                    pid = ports[0]['id']
                    interface_info['port_id'] = pid
                    msg = ("SdnvePluginV2.remove_router_interface "
                           "subnet_id: %(sid)s  port_id: %(pid)s")
                    LOG.debug(msg, {'sid': subnet_id, 'pid': pid})

        (res, data) = self.sdnve_client.sdnve_update(
            'router', router_id + '/remove_router_interface', interface_info)

        if res not in constants.HTTP_ACCEPTABLE:
            raise sdnve_exc.SdnveException(
                msg=(_('Update router-remove-interface failed SDN-VE: %s') %
                     res))

        session = context.session
        with session.begin(subtransactions=True):
            try:
                info = super(SdnvePluginV2, self).remove_router_interface(
                    context, router_id, interface_info)
            except Exception:
                with excutils.save_and_reraise_exception():
                    self._add_router_interface_only(context, router_id,
                                                    interface_info)

        return info
Example #60
0
    def add_router_interface(self, context, router_id, interface_info):
        """creates svi on NOS device and assigns ip addres to SVI."""
        LOG.debug(
            "BrocadeSVIPlugin.add_router_interface on VDX: "
            "router_id=%(router_id)s "
            "interface_info=%(interface_info)r", {
                'router_id': router_id,
                'interface_info': interface_info
            })

        with context.session.begin(subtransactions=True):

            info = super(BrocadeSVIPlugin,
                         self).add_router_interface(context, router_id,
                                                    interface_info)

            port = db.get_port(context.session, info["port_id"])

            # shutting down neutron port to allow NOS to do Arp/Routing
            port['admin_state_up'] = False
            port['port'] = port
            self._core_plugin.update_port(context, info["port_id"], port)

            interface_info = info
            subnet = self._core_plugin._get_subnet(context,
                                                   interface_info["subnet_id"])
            cidr = subnet["cidr"]
            net_addr, net_len = self.net_addr(cidr)
            gateway_ip = subnet["gateway_ip"]
            network_id = subnet['network_id']
            bnet = brocade_db.get_network(context, network_id)
            vlan_id = bnet['vlan']
            gateway_ip_cidr = gateway_ip + '/' + str(net_len)
            LOG.debug("Allocated cidr %(cidr)s from the pool, "
                      "network_id %(net_id)s "
                      "bnet %(bnet)s "
                      "vlan %(vlan_id)d " % ({
                          'cidr': gateway_ip_cidr,
                          'net_id': network_id,
                          'bnet': bnet,
                          'vlan_id': int(vlan_id)
                      }))
            port_filters = {
                'network_id': [network_id],
                'device_owner': [DEVICE_OWNER_ROUTER_INTF]
            }
            port_count = self._core_plugin.get_ports_count(
                context, port_filters)
            LOG.info(_("BrocadeSVIPlugin.add_router_interface ports_count %d"),
                     port_count)

            # port count is checked against 2 since the current port is already
            # added to db
            if port_count == 2:
                # This subnet is already part of some router
                # (this is not supported in this version of brocade svi plugin)
                LOG.error(
                    _("BrocadeSVIPlugin: adding redundant router "
                      "interface is not supported"))
                raise Exception(
                    _("BrocadeSVIPlugin:adding redundant router "
                      "interface is not supported"))

        try:
            switch = self._switch
            self._driver.create_svi(switch['address'], switch['username'],
                                    switch['password'], switch['rbridge_id'],
                                    vlan_id, gateway_ip_cidr, str(router_id))
        except Exception:
            LOG.error(
                _("Failed to create Brocade resources to add router "
                  "interface. info=%(info)s, router_id=%(router_id)s"), {
                      "info": info,
                      "router_id": router_id
                  })
            with excutils.save_and_reraise_exception():
                with context.session.begin(subtransactions=True):
                    self.remove_router_interface(context, router_id,
                                                 interface_info)
        return info