def __init__(self, configfile=None): options = {"sql_connection": cfg.CONF.DATABASE.sql_connection} options.update({'base': models_v2.model_base.BASEV2}) reconnect_interval = cfg.CONF.DATABASE.reconnect_interval options.update({"reconnect_interval": reconnect_interval}) db.configure_db(options) self.tunnel_key = db_api_v2.TunnelKey(cfg.CONF.OVS.tunnel_key_min, cfg.CONF.OVS.tunnel_key_max) ofp_con_host = cfg.CONF.OVS.openflow_controller ofp_api_host = cfg.CONF.OVS.openflow_rest_api if ofp_con_host is None or ofp_api_host is None: raise q_exc.Invalid(_('invalid configuration. check ryu.ini')) hosts = [(ofp_con_host, ofp_service_type.CONTROLLER), (ofp_api_host, ofp_service_type.REST_API)] db_api_v2.set_ofp_servers(hosts) self.client = client.OFPClient(ofp_api_host) self.tun_client = client.TunnelClient(ofp_api_host) for nw_id in rest_nw_id.RESERVED_NETWORK_IDS: if nw_id != rest_nw_id.NW_ID_UNKNOWN: self.client.update_network(nw_id) self._setup_rpc() # register known all network list on startup self._create_all_tenant_network()
def __init__(self): LOG.info(_('QuantumPluginPLUMgrid Status: Starting Plugin')) # PLUMgrid NOS configuration nos_plumgrid = cfg.CONF.PLUMgridNOS.nos_server nos_port = cfg.CONF.PLUMgridNOS.nos_server_port timeout = cfg.CONF.PLUMgridNOS.servertimeout self.topology_name = cfg.CONF.PLUMgridNOS.topologyname self.snippets = plumgrid_nos_snippets.DataNOSPLUMgrid() # TODO: (Edgar) These are placeholders for next PLUMgrid release nos_username = cfg.CONF.PLUMgridNOS.username nos_password = cfg.CONF.PLUMgridNOS.password self.rest_conn = rest_connection.RestConnection(nos_plumgrid, nos_port, timeout) if self.rest_conn is None: raise SystemExit(_('QuantumPluginPLUMgrid Status: ' 'Aborting Plugin')) else: # Plugin DB initialization db.configure_db() # PLUMgrid NOS info validation LOG.info(_('QuantumPluginPLUMgrid NOS: %s'), nos_plumgrid) if not nos_plumgrid: raise SystemExit(_('QuantumPluginPLUMgrid Status: ' 'NOS value is missing in config file')) LOG.debug(_('QuantumPluginPLUMgrid Status: Quantum server with ' 'PLUMgrid Plugin has started'))
def __init__(self, configfile=None): config = ConfigParser.ConfigParser() if configfile is None: if os.path.exists(CONF_FILE): configfile = CONF_FILE else: configfile = find_config( os.path.abspath(os.path.dirname(__file__))) if configfile is None: raise Exception("Configuration file \"%s\" doesn't exist" % (configfile)) LOG.debug("Using configuration file: %s" % configfile) config.read(configfile) LOG.debug("Config: %s" % config) options = {"sql_connection": config.get("DATABASE", "sql_connection")} db.configure_db(options) self.vmap = VlanMap() # Populate the map with anything that is already present in the # database vlans = ovs_db.get_vlans() for x in vlans: vlan_id, network_id = x # LOG.debug("Adding already populated vlan %s -> %s" # % (vlan_id, network_id)) self.vmap.set(vlan_id, network_id)
def __init__(self, configfile=None): if configfile is None: if os.path.exists(CONF_FILE): configfile = CONF_FILE else: configfile = find_config( os.path.abspath(os.path.dirname(__file__))) if configfile is None: raise Exception("Configuration file \"%s\" doesn't exist" % (configfile)) LOG.debug("Using configuration file: %s" % configfile) conf = config.parse(configfile) options = {"sql_connection": conf.DATABASE.sql_connection} reconnect_interval = conf.DATABASE.reconnect_interval options.update({"reconnect_interval": reconnect_interval}) db.configure_db(options) vlan_min = conf.OVS.vlan_min vlan_max = conf.OVS.vlan_max if vlan_min > vlan_max: LOG.warn("Using default VLAN values! vlan_min = %s is larger" " than vlan_max = %s!" % (vlan_min, vlan_max)) vlan_min = 1 vlan_max = 4094 self.vmap = VlanMap(vlan_min, vlan_max) # Populate the map with anything that is already present in the # database vlans = ovs_db.get_vlans() for x in vlans: vlan_id, network_id = x LOG.debug("Adding already populated vlan %s -> %s" % (vlan_id, network_id)) self.vmap.already_used(vlan_id, network_id)
def setUp(self): super(MetaQuantumPluginV2Test, self).setUp() db._ENGINE = None db._MAKER = None self.fake_tenant_id = uuidutils.generate_uuid() self.context = context.get_admin_context() db.configure_db() setup_metaplugin_conf() self.mox = mox.Mox() self.stubs = stubout.StubOutForTesting() self.client_cls_p = mock.patch('quantumclient.v2_0.client.Client') client_cls = self.client_cls_p.start() self.client_inst = mock.Mock() client_cls.return_value = self.client_inst self.client_inst.create_network.return_value = \ {'id': 'fake_id'} self.client_inst.create_port.return_value = \ {'id': 'fake_id'} self.client_inst.create_subnet.return_value = \ {'id': 'fake_id'} self.client_inst.update_network.return_value = \ {'id': 'fake_id'} self.client_inst.update_port.return_value = \ {'id': 'fake_id'} self.client_inst.update_subnet.return_value = \ {'id': 'fake_id'} self.client_inst.delete_network.return_value = True self.client_inst.delete_port.return_value = True self.client_inst.delete_subnet.return_value = True self.plugin = MetaPluginV2(configfile=None)
def __init__(self): LOG.info(_('QuantumPluginPLUMgrid Status: Starting Plugin')) # PLUMgrid NOS configuration nos_plumgrid = cfg.CONF.PLUMgridNOS.nos_server nos_port = cfg.CONF.PLUMgridNOS.nos_server_port timeout = cfg.CONF.PLUMgridNOS.servertimeout self.topology_name = cfg.CONF.PLUMgridNOS.topologyname self.snippets = plumgrid_nos_snippets.DataNOSPLUMgrid() # TODO(Edgar) These are placeholders for next PLUMgrid release cfg.CONF.PLUMgridNOS.username cfg.CONF.PLUMgridNOS.password self.rest_conn = rest_connection.RestConnection( nos_plumgrid, nos_port, timeout) if self.rest_conn is None: raise SystemExit( _('QuantumPluginPLUMgrid Status: ' 'Aborting Plugin')) else: # Plugin DB initialization db.configure_db() # PLUMgrid NOS info validation LOG.info(_('QuantumPluginPLUMgrid NOS: %s'), nos_plumgrid) if not nos_plumgrid: raise SystemExit( _('QuantumPluginPLUMgrid Status: ' 'NOS value is missing in config file')) LOG.debug( _('QuantumPluginPLUMgrid Status: Quantum server with ' 'PLUMgrid Plugin has started'))
def __init__(self): # NOTE(jkoelker) This is an incomlete implementation. Subclasses # must override __init__ and setup the database # and not call into this class's __init__. # This connection is setup as memory for the tests. sql_connection = "sqlite:///:memory:" db.configure_db({"sql_connection": sql_connection, "base": models_v2.model_base.BASEV2})
def setUp(self): super(VPNTestCase, self).setUp() plugin = ("quantum.tests.unit.test_vpn.VPNTestPlugin") # point config file to: quantum/tests/etc/quantum.conf.test args = ['--config-file', test_api_v2.etcdir('quantum.conf.test')] config.parse(args=args) #just stubbing core plugin with VPN plugin cfg.CONF.set_override('core_plugin', plugin) cfg.CONF.set_override('service_plugins', [plugin]) self.addCleanup(cfg.CONF.reset) # Ensure 'stale' patched copies of the plugin are never returned quantum.manager.QuantumManager._instance = None # Ensure the database is reset between tests db._ENGINE = None db._MAKER = None db.configure_db() # Ensure existing ExtensionManager is not used ext_mgr = extensions.PluginAwareExtensionManager( extensions_path, {constants.VPN: VPNTestPlugin()} ) extensions.PluginAwareExtensionManager._instance = ext_mgr router.APIRouter() app = config.load_paste_app('extensions_test_app') self._api = extensions.ExtensionMiddleware(app, ext_mgr=ext_mgr) self._tenant_id = "8c70909f-b081-452d-872b-df48e6c355d1" self._subnet_id = "0c798ed8-33ba-11e2-8b28-000c291c4d14"
def setUp(self): super(MetaQuantumPluginV2Test, self).setUp() db._ENGINE = None db._MAKER = None self.fake_tenant_id = uuidutils.generate_uuid() self.context = context.get_admin_context() db.configure_db() setup_metaplugin_conf() self.mox = mox.Mox() self.stubs = stubout.StubOutForTesting() args = ['--config-file', etcdir('quantum.conf.test')] self.client_cls_p = mock.patch('quantumclient.v2_0.client.Client') client_cls = self.client_cls_p.start() self.client_inst = mock.Mock() client_cls.return_value = self.client_inst self.client_inst.create_network.return_value = \ {'id': 'fake_id'} self.client_inst.create_port.return_value = \ {'id': 'fake_id'} self.client_inst.create_subnet.return_value = \ {'id': 'fake_id'} self.client_inst.update_network.return_value = \ {'id': 'fake_id'} self.client_inst.update_port.return_value = \ {'id': 'fake_id'} self.client_inst.update_subnet.return_value = \ {'id': 'fake_id'} self.client_inst.delete_network.return_value = True self.client_inst.delete_port.return_value = True self.client_inst.delete_subnet.return_value = True self.plugin = MetaPluginV2(configfile=None)
def __init__(self, configfile=None): config = ConfigParser.ConfigParser() if configfile is None: if os.path.exists(CONF_FILE): configfile = CONF_FILE else: configfile = find_config(os.path.abspath( os.path.dirname(__file__))) if configfile is None: raise Exception("Configuration file \"%s\" doesn't exist" % (configfile)) LOG.debug("Using configuration file: %s" % configfile) config.read(configfile) LOG.debug("Config: %s" % config) options = {"sql_connection": config.get("DATABASE", "sql_connection")} db.configure_db(options) self.vmap = VlanMap() # Populate the map with anything that is already present in the # database vlans = ovs_db.get_vlans() for x in vlans: vlan_id, network_id = x # LOG.debug("Adding already populated vlan %s -> %s" # % (vlan_id, network_id)) self.vmap.set_vlan(vlan_id, network_id)
def __init__(self, configfile=None): options = {"sql_connection": cfg.CONF.DATABASE.sql_connection} options.update({"base": models_v2.model_base.BASEV2}) reconnect_interval = cfg.CONF.DATABASE.reconnect_interval options.update({"reconnect_interval": reconnect_interval}) db.configure_db(options) self.tunnel_key = db_api_v2.TunnelKey(cfg.CONF.OVS.tunnel_key_min, cfg.CONF.OVS.tunnel_key_max) ofp_con_host = cfg.CONF.OVS.openflow_controller ofp_api_host = cfg.CONF.OVS.openflow_rest_api if ofp_con_host is None or ofp_api_host is None: raise q_exc.Invalid(_("invalid configuration. check ryu.ini")) hosts = [(ofp_con_host, ofp_service_type.CONTROLLER), (ofp_api_host, ofp_service_type.REST_API)] db_api_v2.set_ofp_servers(hosts) self.client = client.OFPClient(ofp_api_host) self.tun_client = client.TunnelClient(ofp_api_host) self.iface_client = client.QuantumIfaceClient(ofp_api_host) for nw_id in rest_nw_id.RESERVED_NETWORK_IDS: if nw_id != rest_nw_id.NW_ID_UNKNOWN: self.client.update_network(nw_id) self._setup_rpc() # register known all network list on startup self._create_all_tenant_network()
def __init__(self, configfile=None): db.configure_db() self.tunnel_key = db_api_v2.TunnelKey( cfg.CONF.OVS.tunnel_key_min, cfg.CONF.OVS.tunnel_key_max) ofp_con_host = cfg.CONF.OVS.openflow_controller ofp_api_host = cfg.CONF.OVS.openflow_rest_api if ofp_con_host is None or ofp_api_host is None: raise q_exc.Invalid(_('Invalid configuration. check ryu.ini')) hosts = [(ofp_con_host, ofp_service_type.CONTROLLER), (ofp_api_host, ofp_service_type.REST_API)] db_api_v2.set_ofp_servers(hosts) self.client = client.OFPClient(ofp_api_host) self.tun_client = client.TunnelClient(ofp_api_host) self.iface_client = client.QuantumIfaceClient(ofp_api_host) for nw_id in rest_nw_id.RESERVED_NETWORK_IDS: if nw_id != rest_nw_id.NW_ID_UNKNOWN: self.client.update_network(nw_id) self._setup_rpc() # register known all network list on startup self._create_all_tenant_network()
def initialize(): options = {"sql_connection": "%s" % cfg.CONF.DATABASE.sql_connection} options.update({"sql_max_retries": cfg.CONF.DATABASE.sql_max_retries}) options.update({"reconnect_interval": cfg.CONF.DATABASE.reconnect_interval}) options.update({"base": models_v2.model_base.BASEV2}) db.configure_db(options)
def initialize(): options = {"sql_connection": "%s" % cfg.CONF.DATABASE.sql_connection} options.update({"sql_max_retries": cfg.CONF.DATABASE.sql_max_retries}) options.update({"reconnect_interval":cfg.CONF.DATABASE.reconnect_interval}) options.update({"base": HW_BASE}) options.update({"sql_dbpool_enable":cfg.CONF.DATABASE.sql_dbpool_enable}) db.configure_db(options)
def __init__(self): """Initialize the segmentation manager. Checks which device plugins are configured, and load the inventories those device plugins for which the inventory is configured. """ conf.CiscoConfigOptions() for key in conf.CISCO_PLUGINS.keys(): plugin_obj = conf.CISCO_PLUGINS[key] self._plugins[key] = importutils.import_object(plugin_obj) LOG.debug(_("Loaded device plugin %s\n"), conf.CISCO_PLUGINS[key]) if ((const.VSWITCH_PLUGIN in self._plugins) and hasattr(self._plugins[const.VSWITCH_PLUGIN], "supported_extension_aliases")): self.supported_extension_aliases.extend( self._plugins[const.VSWITCH_PLUGIN]. supported_extension_aliases) # At this point, all the database models should have been loaded. It's # possible that configure_db() may have been called by one of the # plugins loaded in above. Otherwise, this call is to make sure that # the database is initialized db_api.configure_db() # Initialize credential store after database initialization cred.Store.initialize() LOG.debug(_("%(module)s.%(name)s init done"), {'module': __name__, 'name': self.__class__.__name__})
def initialize(): options = {"sql_connection": "%s" % cfg.CONF.DATABASE.sql_connection} options.update({"sql_max_retries": cfg.CONF.DATABASE.sql_max_retries}) options.update( {"reconnect_interval": cfg.CONF.DATABASE.reconnect_interval}) options.update({"base": models_v2.model_base.BASEV2}) db.configure_db(options)
def __init__(self): # Read config values midonet_conf = cfg.CONF.MIDONET midonet_uri = midonet_conf.midonet_uri admin_user = midonet_conf.username admin_pass = midonet_conf.password admin_project_id = midonet_conf.project_id provider_router_id = midonet_conf.provider_router_id metadata_router_id = midonet_conf.metadata_router_id mode = midonet_conf.mode self.mido_api = api.MidonetApi(midonet_uri, admin_user, admin_pass, project_id=admin_project_id) self.client = midonet_lib.MidoClient(self.mido_api) if provider_router_id and metadata_router_id: # get MidoNet provider router and metadata router self.provider_router = self.client.get_router(provider_router_id) self.metadata_router = self.client.get_router(metadata_router_id) elif not provider_router_id or not metadata_router_id: if mode == 'dev': msg = _('No provider router and metadata device ids found. ' 'But skipping because running in dev env.') LOG.debug(msg) else: msg = _('provider_router_id and metadata_router_id ' 'should be configured in the plugin config file') LOG.exception(msg) raise MidonetPluginException(msg=msg) db.configure_db()
def setUp(self): super(QuarkIpamBaseTest, self).setUp() cfg.CONF.set_override("sql_connection", "sqlite://", "DATABASE") quantum_db_api.configure_db() models.BASEV2.metadata.create_all(quantum_db_api._ENGINE) self.ipam = quark.ipam.QuarkIpam()
def __init__(self, configfile=None): if configfile is None: if os.path.exists(CONF_FILE): configfile = CONF_FILE else: configfile = find_config(os.path.abspath( os.path.dirname(__file__))) if configfile is None: raise Exception("Configuration file \"%s\" doesn't exist" % (configfile)) LOG.debug("Using configuration file: %s" % configfile) conf = config.parse(configfile) options = {"sql_connection": conf.DATABASE.sql_connection} reconnect_interval = conf.DATABASE.reconnect_interval options.update({"reconnect_interval": reconnect_interval}) db.configure_db(options) self.vmap = VlanMap() # Populate the map with anything that is already present in the # database vlans = ovs_db.get_vlans() for x in vlans: vlan_id, network_id = x LOG.debug("Adding already populated vlan %s -> %s" % (vlan_id, network_id)) self.vmap.already_used(vlan_id, network_id)
def __init__(self, loglevel=None): if loglevel: logging.basicConfig(level=loglevel) nvplib.LOG.setLevel(loglevel) NvpApiClient.LOG.setLevel(loglevel) self.db_opts, self.nvp_opts, self.clusters_opts = parse_config() self.clusters = [] for c_opts in self.clusters_opts: # Password is guaranteed to be the same across all controllers # in the same NVP cluster. cluster = NVPCluster(c_opts['name']) for controller_connection in c_opts['nvp_controller_connection']: args = controller_connection.split(':') try: args.extend([ c_opts['default_tz_uuid'], c_opts['nvp_cluster_uuid'], c_opts['nova_zone_id'] ]) cluster.add_controller(*args) except Exception: LOG.exception( "Invalid connection parameters for " "controller %s in cluster %s", controller_connection, c_opts['name']) raise api_providers = [(x['ip'], x['port'], True) for x in cluster.controllers] cluster.api_client = NvpApiClient.NVPApiHelper( api_providers, cluster.user, cluster.password, request_timeout=cluster.request_timeout, http_timeout=cluster.http_timeout, retries=cluster.retries, redirects=cluster.redirects, failover_time=self.nvp_opts['failover_time'], concurrent_connections=self.nvp_opts['concurrent_connections']) # TODO(salvatore-orlando): do login at first request, # not when plugin, is instantiated cluster.api_client.login() # TODO(pjb): What if the cluster isn't reachable this # instant? It isn't good to fall back to invalid cluster # strings. # Default for future-versions self.clusters.append(cluster) # Connect and configure ovs_quantum db options = { 'sql_connection': self.db_opts['sql_connection'], 'sql_max_retries': self.db_opts['sql_max_retries'], 'reconnect_interval': self.db_opts['reconnect_interval'], 'base': models_v2.model_base.BASEV2, } db.configure_db(options) self.setup_rpc()
def initialize(): options = { "sql_connection": "%s" % config.DATABASE.sql_connection, "sql_max_retries": config.DATABASE.sql_max_retries, "reconnect_interval": config.DATABASE.reconnect_interval, "base": model_base.BASEV2 } db.configure_db(options)
def __init__(self): # NOTE(jkoelker) This is an incomlete implementation. Subclasses # must override __init__ and setup the database # and not call into this class's __init__. # This connection is setup as memory for the tests. sql_connection = 'sqlite:///:memory:' db.configure_db({'sql_connection': sql_connection, 'base': models_v2.model_base.BASEV2})
def initialize(): 'Establish database connection and load models' sql_connection = "mysql://%s:%s@%s/%s" % (conf.DB_USER, conf.DB_PASS, conf.DB_HOST, conf.DB_NAME) db.configure_db({ 'sql_connection': sql_connection, 'base': network_models_v2.model_base.BASEV2 })
def setUp(self): config = utils.get_config() options = {"sql_connection": config.get("DATABASE", "sql_connection")} db.configure_db(options) self.config = config self.mox = mox.Mox() self.stubs = stubout.StubOutForTesting()
def initialize(base=None): global L2_MODEL options = {"sql_connection": "%s" % CONF.DATABASE.sql_connection} options.update({"reconnect_interval": CONF.DATABASE.reconnect_interval}) if base: options.update({"base": base}) L2_MODEL = l2network_models_v2 db.configure_db(options) create_vlanids()
def __init__(self, loglevel=None): if loglevel: logging.basicConfig(level=loglevel) nvplib.LOG.setLevel(loglevel) NvpApiClient.LOG.setLevel(loglevel) self.db_opts, self.nvp_opts, self.clusters_opts = parse_config() self.clusters = [] for c_opts in self.clusters_opts: # Password is guaranteed to be the same across all controllers # in the same NVP cluster. cluster = NVPCluster(c_opts["name"]) for controller_connection in c_opts["nvp_controller_connection"]: args = controller_connection.split(":") try: args.extend([c_opts["default_tz_uuid"], c_opts["nvp_cluster_uuid"], c_opts["nova_zone_id"]]) cluster.add_controller(*args) except Exception: LOG.exception( "Invalid connection parameters for " "controller %s in cluster %s", controller_connection, c_opts["name"], ) raise api_providers = [(x["ip"], x["port"], True) for x in cluster.controllers] cluster.api_client = NvpApiClient.NVPApiHelper( api_providers, cluster.user, cluster.password, request_timeout=cluster.request_timeout, http_timeout=cluster.http_timeout, retries=cluster.retries, redirects=cluster.redirects, failover_time=self.nvp_opts["failover_time"], concurrent_connections=self.nvp_opts["concurrent_connections"], ) # TODO(salvatore-orlando): do login at first request, # not when plugin, is instantiated cluster.api_client.login() # TODO(pjb): What if the cluster isn't reachable this # instant? It isn't good to fall back to invalid cluster # strings. # Default for future-versions self.clusters.append(cluster) # Connect and configure ovs_quantum db options = { "sql_connection": self.db_opts["sql_connection"], "sql_max_retries": self.db_opts["sql_max_retries"], "reconnect_interval": self.db_opts["reconnect_interval"], "base": models_v2.model_base.BASEV2, } db.configure_db(options) self.setup_rpc()
def new_nexus_init(self): self._client = importutils.import_object(NEXUS_DRIVER) self._nexus_ip = NEXUS_IP_ADDRESS self._nexus_username = NEXUS_USERNAME self._nexus_password = NEXUS_PASSWORD self._nexus_ports = NEXUS_PORTS self._nexus_ssh_port = NEXUS_SSH_PORT self.credentials = {self._nexus_ip: {"username": self._nexus_username, "password": self._nexus_password}} db.configure_db()
def __init__(self, configfile=None): db.configure_db() self.quantum = client.Client( username=cfg.CONF.PROXY.admin_user, password=cfg.CONF.PROXY.admin_password, tenant_name=cfg.CONF.PROXY.admin_tenant_name, auth_url=cfg.CONF.PROXY.auth_url, auth_strategy=cfg.CONF.PROXY.auth_strategy, auth_region=cfg.CONF.PROXY.auth_region)
def test_warn_when_no_connection(self): with mock.patch.object(db, 'register_models') as mock_register: mock_register.return_value = False with mock.patch.object(db.LOG, 'warn') as mock_log: mock_log.return_value = False db.configure_db() self.assertEqual(mock_log.call_count, 1) args = mock_log.call_args self.assertNotEqual(args.find('sql_connection'), -1)
def setUp(self): config = utils.get_config() options = {"sql_connection": config.get("DATABASE", "sql_connection")} options.update({'base': models_v2.model_base.BASEV2}) db.configure_db(options) self.config = config self.mox = mox.Mox() self.stubs = stubout.StubOutForTesting()
def __init__(self, configfile=None): conf = config.parse(CONF_FILE) options = {"sql_connection": conf.DATABASE.sql_connection} options.update({'base': models_v2.model_base.BASEV2}) reconnect_interval = conf.DATABASE.reconnect_interval options.update({"reconnect_interval": reconnect_interval}) db.configure_db(options) self.vmap = VlanMap(conf.OVS.vlan_min, conf.OVS.vlan_max) self.vmap.populate_already_used(ovs_db_v2.get_vlans())
def initialize(): options = {"sql_connection": "%s" % config.DATABASE.sql_connection, "sql_max_retries": config.DATABASE.sql_max_retries, "reconnect_interval": config.DATABASE.reconnect_interval, "base": model_base.BASEV2, "sql_min_pool_size": config.CONF.DATABASE.sql_min_pool_size, "sql_max_pool_size": config.CONF.DATABASE.sql_max_pool_size, "sql_idle_timeout": config.CONF.DATABASE.sql_idle_timeout, "sql_dbpool_enable": config.CONF.DATABASE.sql_dbpool_enable} db.configure_db(options)
def initialize(base=None): global L2_MODEL options = {"sql_connection": "%s" % cfg.CONF.DATABASE.sql_connection} options.update({"sql_max_retries": cfg.CONF.DATABASE.sql_max_retries}) options.update({"reconnect_interval": cfg.CONF.DATABASE.reconnect_interval}) if base: options.update({"base": base}) db.configure_db(options) create_vlanids()
def __init__(self, configfile=None): db.configure_db() self.quantum = client.Client( username=cfg.CONF.PROXY.admin_user, password=cfg.CONF.PROXY.admin_password, tenant_name=cfg.CONF.PROXY.admin_tenant_name, auth_url=cfg.CONF.PROXY.auth_url, auth_strategy=cfg.CONF.PROXY.auth_strategy, auth_region=cfg.CONF.PROXY.auth_region, )
def __init__(self, configfile=None): conf = config.parse(CONF_FILE) options = {"sql_connection": conf.DATABASE.sql_connection} reconnect_interval = conf.DATABASE.reconnect_interval options.update({"reconnect_interval": reconnect_interval}) db.configure_db(options) self.vmap = VlanMap(conf.OVS.vlan_min, conf.OVS.vlan_max) # Populate the map with anything that is already present in the # database self.vmap.populate_already_used(ovs_db.get_vlans())
def __init__(self, configfile=None): self.enable_tunneling = cfg.CONF.OVS.enable_tunneling options = {"sql_connection": cfg.CONF.DATABASE.sql_connection} options.update({'base': models_v2.model_base.BASEV2}) sql_max_retries = cfg.CONF.DATABASE.sql_max_retries options.update({"sql_max_retries": sql_max_retries}) reconnect_interval = cfg.CONF.DATABASE.reconnect_interval options.update({"reconnect_interval": reconnect_interval}) db.configure_db(options) # update the vlan_id table based on current configuration ovs_db_v2.update_vlan_id_pool()
def setUp(self): options = {"sql_connection": 'sqlite:///quantum.test.db'} options.update({'base': models_v2.model_base.BASEV2}) reconnect_interval = cfg.CONF.DATABASE.reconnect_interval options.update({"reconnect_interval": reconnect_interval}) db.configure_db(options) self.hosts = [ (cfg.CONF.OVS.openflow_controller, ofp_service_type.CONTROLLER), (cfg.CONF.OVS.openflow_rest_api, ofp_service_type.REST_API) ] db_api_v2.set_ofp_servers(self.hosts)
def __init__(self, configfile=None): self.enable_tunneling = cfg.CONF.OVS.enable_tunneling options = {"sql_connection": cfg.CONF.DATABASE.sql_connection} options.update({'base': models_v2.model_base.BASEV2}) sql_max_retries = cfg.CONF.DATABASE.sql_max_retries options.update({"sql_max_retries": sql_max_retries}) reconnect_interval = cfg.CONF.DATABASE.reconnect_interval options.update({"reconnect_interval": reconnect_interval}) db.configure_db(options) self.vmap = VlanMap(cfg.CONF.OVS.vlan_min, cfg.CONF.OVS.vlan_max) self.vmap.populate_already_used(ovs_db_v2.get_vlans())
def __init__(self, configfile=None): options = {"sql_connection": cfg.CONF.DATABASE.sql_connection} sql_max_retries = cfg.CONF.DATABASE.sql_max_retries options.update({"sql_max_retries": sql_max_retries}) reconnect_interval = cfg.CONF.DATABASE.reconnect_interval options.update({"reconnect_interval": reconnect_interval}) db.configure_db(options) self.vmap = VlanMap(cfg.CONF.OVS.vlan_min, cfg.CONF.OVS.vlan_max) # Populate the map with anything that is already present in the # database self.vmap.populate_already_used(ovs_db.get_vlans())
def setUp(self): options = {"sql_connection": "sqlite:///:memory:"} options.update({"base": models_v2.model_base.BASEV2}) reconnect_interval = cfg.CONF.DATABASE.reconnect_interval options.update({"reconnect_interval": reconnect_interval}) db.configure_db(options) self.hosts = [ (cfg.CONF.OVS.openflow_controller, ofp_service_type.CONTROLLER), (cfg.CONF.OVS.openflow_rest_api, ofp_service_type.REST_API), ] db_api_v2.set_ofp_servers(self.hosts)
def __init__(self, conf_file, mod_file, configfile=None): super(OVSQuantumPluginBase, self).__init__() options = {"sql_connection": cfg.CONF.DATABASE.sql_connection} sql_max_retries = cfg.CONF.DATABASE.sql_max_retries options.update({"sql_max_retries": sql_max_retries}) reconnect_interval = cfg.CONF.DATABASE.reconnect_interval options.update({"reconnect_interval": reconnect_interval}) db.configure_db(options) self.conf = cfg.CONF # Subclass must set self.driver to its own OVSQuantumPluginDriverBase self.driver = None
def __init__(self, configfile=None): LOG.debug(_("Start initializing metaplugin")) self.supported_extension_aliases = cfg.CONF.META.supported_extension_aliases.split(",") self.supported_extension_aliases += ["flavor", "router"] # Ignore config option overapping def _is_opt_registered(opts, opt): if opt.dest in opts: return True else: return False cfg._is_opt_registered = _is_opt_registered # Keep existing tables if multiple plugin use same table name. db.model_base.QuantumBase.__table_args__ = {"keep_existing": True} self.plugins = {} plugin_list = [plugin_set.split(":") for plugin_set in cfg.CONF.META.plugin_list.split(",")] for flavor, plugin_provider in plugin_list: self.plugins[flavor] = self._load_plugin(plugin_provider) # Needed to clear _ENGINE for each plugin db._ENGINE = None self.l3_plugins = {} l3_plugin_list = [plugin_set.split(":") for plugin_set in cfg.CONF.META.l3_plugin_list.split(",")] for flavor, plugin_provider in l3_plugin_list: if flavor in self.plugins: self.l3_plugins[flavor] = self.plugins[flavor] else: # For l3 only plugin self.l3_plugins[flavor] = self._load_plugin(plugin_provider) db._ENGINE = None self.default_flavor = cfg.CONF.META.default_flavor if not self.default_flavor in self.plugins: raise exc.Invalid(_("default_flavor %s is not plugin list") % self.default_flavor) self.default_l3_flavor = cfg.CONF.META.default_l3_flavor if not self.default_l3_flavor in self.l3_plugins: raise exc.Invalid(_("default_l3_flavor %s is not plugin list") % self.default_l3_flavor) db.configure_db() self.extension_map = {} if not cfg.CONF.META.extension_map == "": extension_list = [method_set.split(":") for method_set in cfg.CONF.META.extension_map.split(",")] for method_name, flavor in extension_list: self.extension_map[method_name] = flavor self.default_flavor = cfg.CONF.META.default_flavor
def setUp(self): super(MetaQuantumPluginV2Test, self).setUp() db._ENGINE = None db._MAKER = None self.fake_tenant_id = str(uuid.uuid4()) self.context = context.get_admin_context() sql_connection = 'sqlite:///:memory:' options = {"sql_connection": sql_connection} options.update({'base': models_v2.model_base.BASEV2}) db.configure_db(options) self.mox = mox.Mox() self.stubs = stubout.StubOutForTesting() args = ['--config-file', etcdir('quantum.conf.test')] #config.parse(args=args) # Update the plugin cfg.CONF.set_override('auth_url', 'http://localhost:35357/v2.0', 'PROXY') cfg.CONF.set_override('auth_region', 'RegionOne', 'PROXY') cfg.CONF.set_override('admin_user', 'quantum', 'PROXY') cfg.CONF.set_override('admin_password', 'password', 'PROXY') cfg.CONF.set_override('admin_tenant_name', 'service', 'PROXY') cfg.CONF.set_override('plugin_list', PLUGIN_LIST, 'META') cfg.CONF.set_override('l3_plugin_list', L3_PLUGIN_LIST, 'META') cfg.CONF.set_override('default_flavor', 'fake2', 'META') cfg.CONF.set_override('default_l3_flavor', 'fake1', 'META') cfg.CONF.set_override('base_mac', "12:34:56:78:90:ab") #TODO(nati) remove this after subnet quota change is merged cfg.CONF.max_dns_nameservers = 10 self.client_cls_p = mock.patch('quantumclient.v2_0.client.Client') client_cls = self.client_cls_p.start() self.client_inst = mock.Mock() client_cls.return_value = self.client_inst self.client_inst.create_network.return_value = \ {'id': 'fake_id'} self.client_inst.create_port.return_value = \ {'id': 'fake_id'} self.client_inst.create_subnet.return_value = \ {'id': 'fake_id'} self.client_inst.update_network.return_value = \ {'id': 'fake_id'} self.client_inst.update_port.return_value = \ {'id': 'fake_id'} self.client_inst.update_subnet.return_value = \ {'id': 'fake_id'} self.client_inst.delete_network.return_value = True self.client_inst.delete_port.return_value = True self.client_inst.delete_subnet.return_value = True self.plugin = MetaPluginV2(configfile=None)
def new_nexus_init(self): self._client = importutils.import_object(NEXUS_DRIVER) self._nexus_ip = NEXUS_IP_ADDRESS self._nexus_username = NEXUS_USERNAME self._nexus_password = NEXUS_PASSWORD self._nexus_ports = NEXUS_PORTS self._nexus_ssh_port = NEXUS_SSH_PORT self.credentials = { self._nexus_ip: { 'username': self._nexus_username, 'password': self._nexus_password } } db.configure_db()
def __init__(self, configfile=None): options = {"sql_connection": cfg.CONF.DATABASE.sql_connection} options.update({'base': models_v2.model_base.BASEV2}) sql_max_retries = cfg.CONF.DATABASE.sql_max_retries options.update({"sql_max_retries": sql_max_retries}) reconnect_interval = cfg.CONF.DATABASE.reconnect_interval options.update({"reconnect_interval": reconnect_interval}) db.configure_db(options) self.quantum = client.Client( username=cfg.CONF.PROXY.admin_user, password=cfg.CONF.PROXY.admin_password, tenant_name=cfg.CONF.PROXY.admin_tenant_name, auth_url=cfg.CONF.PROXY.auth_url, auth_strategy=cfg.CONF.PROXY.auth_strategy, auth_region=cfg.CONF.PROXY.auth_region)
def __init__(self): LOG.info('QuantumRestProxy: Starting plugin. Version=%s' % version_string_with_vcs()) # init DB, proxy's persistent store defaults to in-memory sql-lite DB options = { "sql_connection": "%s" % cfg.CONF.DATABASE.sql_connection, "sql_max_retries": cfg.CONF.DATABASE.sql_max_retries, "reconnect_interval": cfg.CONF.DATABASE.reconnect_interval, "base": models_v2.model_base.BASEV2 } db.configure_db(options) # 'servers' is the list of network controller REST end-points # (used in order specified till one suceeds, and it is sticky # till next failure). Use 'serverauth' to encode api-key servers = cfg.CONF.RESTPROXY.servers serverauth = cfg.CONF.RESTPROXY.serverauth serverssl = cfg.CONF.RESTPROXY.serverssl syncdata = cfg.CONF.RESTPROXY.syncdata timeout = cfg.CONF.RESTPROXY.servertimeout # validate config assert servers is not None, 'Servers not defined. Aborting plugin' servers = tuple(s.rsplit(':', 1) for s in servers.split(',')) servers = tuple((server, int(port)) for server, port in servers) assert all(len(s) == 2 for s in servers), SYNTAX_ERROR_MESSAGE # init network ctrl connections self.servers = ServerPool(servers, serverssl, serverauth, timeout) # init dhcp support self.topic = topics.PLUGIN self.rpc_context = glbcontext.RequestContext('quantum', 'quantum', is_admin=False) self.conn = rpc.create_connection(new=True) self.callbacks = RpcProxy(self.rpc_context) self.dispatcher = self.callbacks.create_rpc_dispatcher() self.conn.create_consumer(self.topic, self.dispatcher, fanout=False) # Consume from all consumers in a thread self.conn.consume_in_thread() if syncdata: self._send_all_data() LOG.debug("QuantumRestProxyV2: initialization done")
def __init__(self, configfile=None): db.configure_db() self.tunnel_key = db_api_v2.TunnelKey(cfg.CONF.OVS.tunnel_key_min, cfg.CONF.OVS.tunnel_key_max) self.ofp_api_host = cfg.CONF.OVS.openflow_rest_api if not self.ofp_api_host: raise q_exc.Invalid(_('Invalid configuration. check ryu.ini')) self.client = client.OFPClient(self.ofp_api_host) self.tun_client = client.TunnelClient(self.ofp_api_host) self.iface_client = client.QuantumIfaceClient(self.ofp_api_host) for nw_id in rest_nw_id.RESERVED_NETWORK_IDS: if nw_id != rest_nw_id.NW_ID_UNKNOWN: self.client.update_network(nw_id) self._setup_rpc() # register known all network list on startup self._create_all_tenant_network()
def __init__(self): """Initialize Brocade Plugin. Specify switch address and db configuration. """ self.supported_extension_aliases = ["binding", "security-group", "agent", "agent_scheduler"] self.physical_interface = (cfg.CONF.PHYSICAL_INTERFACE. physical_interface) db.configure_db() self.ctxt = context.get_admin_context() self.ctxt.session = db.get_session() self._vlan_bitmap = vbm.VlanBitmap(self.ctxt) self._setup_rpc() self.network_scheduler = importutils.import_object( cfg.CONF.network_scheduler_driver) self.router_scheduler = importutils.import_object( cfg.CONF.router_scheduler_driver) self.brocade_init()
def __init__(self, conf_file, mod_file, configfile=None): super(OVSQuantumPluginBase, self).__init__() if configfile is None: if os.path.exists(conf_file): configfile = conf_file else: configfile = find_config( os.path.abspath(os.path.dirname(__file__))) if configfile is None: raise Exception("Configuration file \"%s\" doesn't exist" % (configfile)) LOG.debug("Using configuration file: %s" % configfile) conf = config.parse(configfile) options = {"sql_connection": conf.DATABASE.sql_connection} reconnect_interval = conf.DATABASE.reconnect_interval options.update({"reconnect_interval": reconnect_interval}) db.configure_db(options) self.conf = conf # Subclass must set self.driver to its own OVSQuantumPluginDriverBase self.driver = None
def __init__(self): # Read configuration, for unit tests pass in conf as first arg # - proxy's persistent store defaults to in-memory sql-lite DB # - nova can be configured to read DB ditrectly using 'novadb' or # to query the nova server using 'novaapi' as REST end-points # with 'novauth' for encoding a user:pass to access it as admin # Note: novaapi is not currently supported # - 'servers' is the list of network controller REST end-points # (used in order specified till one suceeds, and it is sticky # till next failure). Use 'serverauth' to encode api-key config_file = os.environ.get('RESTPROXY_CONFIG') if config_file: self.conf = ConfigProxy(None, config_file, 'restproxy') else: self.conf = ConfigProxy('quantum/plugins/restproxy', \ 'restproxy.ini', 'restproxy') self.setup_logging() LOG.info('QuantumRestProxy: Starting plugin. Version=%s' % version()) # read config proxydb = self.conf.get('proxydb') or \ 'mysql://root@localhost/restproxy' novadb = self.conf.get('novadb') or \ 'mysql://root@localhost/nova' servers = self.conf.get('servers') serverauth = self.conf.get('serverauth') serverssl = self.conf.get_bool('serverssl') # validate config assert novadb or novaauth, 'Nova must be accessible from plugin' assert servers is not None, 'Servers not defined. Aborting plugin' servers = tuple(s.split(':') for s in servers.split(',')) servers = tuple((server, int(port)) for server, port in servers) assert all(len(s) == 2 for s in servers), \ 'Syntax error in servers in config file. Aborting plugin' # init DB, nova and network ctrl connections db.configure_db({'sql_connection': proxydb}) self.nova = NovaDbProxy(novadb) self.servers = ServerPool(servers, serverssl, serverauth)
def __init__(self, conf_file, mod_file, configfile=None): super(OVSQuantumPluginBase, self).__init__() config = ConfigParser.ConfigParser() if configfile is None: if conf_file and os.path.exists(conf_file): configfile = conf_file else: configfile = ( find_config(os.path.abspath(os.path.dirname(mod_file)))) if configfile is None: raise Exception("Configuration file \"%s\" doesn't exist" % (configfile)) LOG.debug("Using configuration file: %s", configfile) config.read(configfile) LOG.debug("Config: %s", config) options = {"sql_connection": config.get("DATABASE", "sql_connection")} db.configure_db(options) self.config = config # Subclass must set self.driver to its own OVSQuantumPluginDriverBase self.driver = None
def __init__(self): # Read config values midonet_conf = cfg.CONF.MIDONET midonet_uri = midonet_conf.midonet_uri admin_user = midonet_conf.username admin_pass = midonet_conf.password admin_project_id = midonet_conf.project_id provider_router_id = midonet_conf.provider_router_id metadata_router_id = midonet_conf.metadata_router_id mode = midonet_conf.mode self.mido_api = api.MidonetApi(midonet_uri, admin_user, admin_pass, project_id=admin_project_id) # get MidoNet provider router and metadata router if provider_router_id and metadata_router_id: self.provider_router = self.mido_api.get_router(provider_router_id) self.metadata_router = self.mido_api.get_router(metadata_router_id) # for dev purpose only elif mode == 'dev': msg = _('No provider router and metadata device ids found. ' 'But skipping because running in dev env.') LOG.debug(msg) else: msg = _('provider_router_id and metadata_router_id ' 'should be configured in the plugin config file') LOG.exception(msg) raise MidonetPluginException(msg=msg) self.chain_manager = midonet_lib.ChainManager(self.mido_api) self.pg_manager = midonet_lib.PortGroupManager(self.mido_api) self.rule_manager = midonet_lib.RuleManager(self.mido_api) db.configure_db()
def __init__(self, configfile=None): options = {"sql_connection": cfg.CONF.DATABASE.sql_connection} options.update({'base': models_v2.model_base.BASEV2}) reconnect_interval = cfg.CONF.DATABASE.reconnect_interval options.update({"reconnect_interval": reconnect_interval}) db.configure_db(options) ofp_con_host = cfg.CONF.OVS.openflow_controller ofp_api_host = cfg.CONF.OVS.openflow_rest_api if ofp_con_host is None or ofp_api_host is None: raise q_exc.Invalid("invalid configuration. check ryu.ini") hosts = [(ofp_con_host, ofp_service_type.CONTROLLER), (ofp_api_host, ofp_service_type.REST_API)] db_api_v2.set_ofp_servers(hosts) self.client = client.OFPClient(ofp_api_host) self.client.update_network(rest_nw_id.NW_ID_EXTERNAL) self._setup_rpc() # register known all network list on startup self._create_all_tenant_network()