def test_call_ok_auth_uri_ks_authtoken(self): # Import auth_token to have keystone_authtoken settings setup. importutils.import_module('keystonemiddleware.auth_token') dummy_url = 'http://123:5000/v2.0' try: cfg.CONF.set_override('www_authenticate_uri', dummy_url, group='keystone_authtoken') except cfg.NoSuchOptError: cfg.CONF.set_override('auth_uri', dummy_url, group='keystone_authtoken') ec2 = ec2token.EC2Token(app='woot', conf={}) params = {'AWSAccessKeyId': 'foo', 'Signature': 'xyz'} req_env = {'SERVER_NAME': 'heat', 'SERVER_PORT': '8000', 'PATH_INFO': '/v1'} dummy_req = self._dummy_GET_request(params, req_env) ok_resp = json.dumps({'token': { 'project': {'name': 'tenant', 'id': 'abcd1234'}}}) self._stub_http_connection(response=ok_resp, params={'AWSAccessKeyId': 'foo'}) self.assertEqual('woot', ec2.__call__(dummy_req)) requests.post.assert_called_with( self.verify_req_url, data=self.verify_data, verify=self.verify_verify, cert=self.verify_cert, headers=self.verify_req_headers)
def _create_auth_plugin(self): if self.trust_id: importutils.import_module('keystonemiddleware.auth_token') username = cfg.CONF.keystone_authtoken.admin_user password = cfg.CONF.keystone_authtoken.admin_password return v3.Password(username=username, password=password, user_domain_id='default', auth_url=self._keystone_v3_endpoint, trust_id=self.trust_id) if self.auth_token_info: auth_ref = access.AccessInfo.factory(body=self.auth_token_info, auth_token=self.auth_token) return _AccessInfoPlugin(self._keystone_v3_endpoint, auth_ref) if self.auth_token: # FIXME(jamielennox): This is broken but consistent. If you # only have a token but don't load a service catalog then # url_for wont work. Stub with the keystone endpoint so at # least it might be right. return token_endpoint.Token(endpoint=self._keystone_v3_endpoint, token=self.auth_token) if self.password: return v3.Password(username=self.username, password=self.password, project_id=self.tenant_id, user_domain_id='default', auth_url=self._keystone_v3_endpoint) LOG.error(_LE("Keystone v3 API connection failed, no password " "trust or auth_token!")) raise exception.AuthorizationFailure()
def before(self, state): headers = state.request.headers user = headers.get('X-User') user_id = headers.get('X-User-Id') project = headers.get('X-Project-Name') project_id = headers.get('X-Project-Id') domain_id = headers.get('X-User-Domain-Id') domain_name = headers.get('X-User-Domain-Name') auth_token = headers.get('X-Storage-Token') auth_token = headers.get('X-Auth-Token', auth_token) auth_token_info = state.request.environ.get('keystone.token_info') auth_url = headers.get('X-Auth-Url') if auth_url is None: importutils.import_module('keystonemiddleware.auth_token') auth_url = cfg.CONF.keystone_authtoken.auth_uri state.request.context = context.make_context( auth_token=auth_token, auth_url=auth_url, auth_token_info=auth_token_info, user_name=user, user_id=user_id, project_name=project, project_id=project_id, domain_id=domain_id, domain_name=domain_name)
def __init__(self, context): # If a trust_id is specified in the context, we immediately # authenticate so we can populate the context with a trust token # otherwise, we delay client authentication until needed to avoid # unnecessary calls to keystone. # # Note that when you obtain a token using a trust, it cannot be # used to reauthenticate and get another token, so we have to # get a new trust-token even if context.auth_token is set. # # - context.auth_url is expected to contain a versioned keystone # path, we will work with either a v2.0 or v3 path self.context = context self._client = None self._admin_client = None if self.context.auth_url: self.v3_endpoint = self.context.auth_url.replace('v2.0', 'v3') else: # Import auth_token to have keystone_authtoken settings setup. importutils.import_module('keystonemiddleware.auth_token') self.v3_endpoint = cfg.CONF.keystone_authtoken.auth_uri.replace( 'v2.0', 'v3') if self.context.trust_id: # Create a client with the specified trust_id, this # populates self.context.auth_token with a trust-scoped token self._client = self._v3_client_init()
def validate_type_driver(source_type_driver): try: importutils.import_module(source_type_driver) except ImportError as err: raise exception.SourceTypeDriverNotFound( type_driver=source_type_driver, message=err.message)
def list_opts(): return [ ('cells', itertools.chain( cells_opts, importutils.import_module( "nova.cells.manager").cell_manager_opts, importutils.import_module( "nova.cells.messaging").cell_messaging_opts, importutils.import_module( "nova.cells.rpc_driver").cell_rpc_driver_opts, importutils.import_module( "nova.cells.scheduler").cell_scheduler_opts, importutils.import_module( "nova.cells.state").cell_state_manager_opts, importutils.import_module( "nova.cells.weights.mute_child").mute_weigher_opts, importutils.import_module( "nova.cells.weights.ram_by_instance_type").ram_weigher_opts, importutils.import_module( "nova.cells.weights.weight_offset").weigher_opts )), ('upgrade_levels', itertools.chain( [importutils.import_module( "nova.cells.rpc_driver").rpcapi_cap_opt], [importutils.import_module( "nova.cells.rpcapi").rpcapi_cap_opt], )), ]
def auth_region_name(self): importutils.import_module('keystonemiddleware.auth_token') auth_region = cfg.CONF.keystone_authtoken.region_name if not auth_region: auth_region = (self.context.region_name or cfg.CONF.region_name_for_services) return auth_region
def __init__(self, virtapi, read_only=False): super(IronicDriver, self).__init__(virtapi) global ironic if ironic is None: ironic = importutils.import_module('ironicclient') # NOTE(deva): work around a lack of symbols in the current version. if not hasattr(ironic, 'exc'): ironic.exc = importutils.import_module('ironicclient.exc') if not hasattr(ironic, 'client'): ironic.client = importutils.import_module( 'ironicclient.client') self.firewall_driver = firewall.load_driver( default='nova.virt.firewall.NoopFirewallDriver') self.node_cache = {} self.node_cache_time = 0 # TODO(mrda): Bug ID 1365230 Logging configurability needs # to be addressed ironicclient_log_level = CONF.ironic.client_log_level if ironicclient_log_level: level = py_logging.getLevelName(ironicclient_log_level) logger = py_logging.getLogger('ironicclient') logger.setLevel(level) self.ironicclient = client_wrapper.IronicClientWrapper()
def _validate_keystone_opts(self, args): ks_opts_to_config = { 'auth_url': 'auth_uri', 'username': '******', 'password': '******', 'project_name': 'admin_tenant_name'} ks_opts = {'auth_url': getattr(args, 'os_auth_url', None), 'username': getattr(args, 'os_username', None), 'password': getattr(args, 'os_password', None), 'project_name': getattr(args, 'os_project_name', None)} if None in ks_opts.values() and not CONF.default_config_files: msg = _LE('Please provide murano config file or credentials for ' 'authorization: {0}').format( ', '.join(['--os-auth-url', '--os-username', '--os-password', '--os-project-name', '--os-tenant-id'])) LOG.error(msg) self.error(msg) # Load keystone configuration parameters from config importutils.import_module('keystonemiddleware.auth_token') for param, value in six.iteritems(ks_opts): if not value: ks_opts[param] = getattr(CONF.keystone_authtoken, ks_opts_to_config[param]) if param == 'auth_url': ks_opts[param] = ks_opts[param].replace('v2.0', 'v3') return ks_opts
def _keystone_v3_endpoint(self): if self.auth_url: auth_uri = self.auth_url else: importutils.import_module('keystonemiddleware.auth_token') auth_uri = cfg.CONF.keystone_authtoken.auth_uri return auth_uri.replace('v2.0', 'v3')
def test_os_params_replaces_config(self, mock_client): # Load keystone configuration parameters from config importutils.import_module('keystonemiddleware.auth_token') self.override_config('admin_user', 'new_value', 'keystone_authtoken') self.shell('-p io.murano.test.MyTest1 io.murano.test.MyTest2') mock_client.assert_has_calls([mock.call(**self.auth_params)])
def _get_keystone_settings(): importutils.import_module('keystonemiddleware.auth_token') return { 'endpoint': cfg.CONF.keystone_authtoken.identity_uri, 'auth_url': cfg.CONF.keystone_authtoken.auth_uri, 'username': cfg.CONF.keystone_authtoken.admin_user, 'password': cfg.CONF.keystone_authtoken.admin_password, 'project_name': cfg.CONF.keystone_authtoken.admin_tenant_name}
def _get_auth_url(self): if 'auth_uri' in self.conf: return self.conf['auth_uri'] else: # Import auth_token to have keystone_authtoken settings setup. auth_token_module = 'keystonemiddleware.auth_token' importutils.import_module(auth_token_module) return cfg.CONF.keystone_authtoken.auth_uri
def _get_credentials(self): importutils.import_module('keystonemiddleware.auth_token') creds = {} creds['username'] = cfg.CONF.keystone_authtoken.username creds['tenant_name'] = cfg.CONF.keystone_authtoken.project_name creds['password'] = cfg.CONF.keystone_authtoken.password creds['auth_url'] = cfg.CONF.keystone_authtoken.auth_uri return creds
def _mk_test_dp(self, name): ofp = importutils.import_module('ryu.ofproto.ofproto_v1_3') ofpp = importutils.import_module('ryu.ofproto.ofproto_v1_3_parser') dp = mock.Mock() dp.ofproto = ofp dp.ofproto_parser = ofpp dp.__repr__ = mock.Mock(return_value=name) return dp
def __call__(self, target, creds, enforcer): if self.target_field not in target: # policy needs a plugin check # target field is in the form resource:field # however if they're not separated by a colon, use an underscore # as a separator for backward compatibility def do_split(separator): parent_res, parent_field = self.target_field.split(separator, 1) return parent_res, parent_field for separator in (":", "_"): try: parent_res, parent_field = do_split(separator) break except ValueError: LOG.debug("Unable to find ':' as separator in %s.", self.target_field) else: # If we are here split failed with both separators err_reason = _("Unable to find resource name in %s") % self.target_field LOG.error(err_reason) raise exceptions.PolicyCheckError(policy="%s:%s" % (self.kind, self.match), reason=err_reason) parent_foreign_key = attributes.RESOURCE_FOREIGN_KEYS.get("%ss" % parent_res, None) if not parent_foreign_key: err_reason = _("Unable to verify match:%(match)s as the " "parent resource: %(res)s was not found") % { "match": self.match, "res": parent_res, } LOG.error(err_reason) raise exceptions.PolicyCheckError(policy="%s:%s" % (self.kind, self.match), reason=err_reason) # NOTE(salv-orlando): This check currently assumes the parent # resource is handled by the core plugin. It might be worth # having a way to map resources to plugins so to make this # check more general # NOTE(ihrachys): if import is put in global, circular # import failure occurs manager = importutils.import_module("neutron.manager") f = getattr(manager.NeutronManager.get_instance().plugin, "get_%s" % parent_res) # f *must* exist, if not found it is better to let neutron # explode. Check will be performed with admin context context = importutils.import_module("neutron.context") try: data = f(context.get_admin_context(), target[parent_foreign_key], fields=[parent_field]) target[self.target_field] = data[parent_field] except exceptions.NotFound as e: # NOTE(kevinbenton): a NotFound exception can occur if a # list operation is happening at the same time as one of # the parents and its children being deleted. So we issue # a RetryRequest so the API will redo the lookup and the # problem items will be gone. raise db_exc.RetryRequest(e) except Exception: with excutils.save_and_reraise_exception(): LOG.exception(_LE("Policy check error while calling %s!"), f) match = self.match % target if self.kind in creds: return match == six.text_type(creds[self.kind]) return False
def _conf_get_auth_uri(self): auth_uri = self._conf_get('auth_uri') if auth_uri: return auth_uri else: # Import auth_token to have keystone_authtoken settings setup. # We can use the auth_uri from the keystone_authtoken section importutils.import_module('keystonemiddleware.auth_token') return cfg.CONF.keystone_authtoken['auth_uri']
def _get_nova_v2_credentials(self): importutils.import_module('keystonemiddleware.auth_token') creds = {} creds['username'] = cfg.CONF.keystone_authtoken.username creds['project_id'] = cfg.CONF.keystone_authtoken.project_name creds['api_key'] = cfg.CONF.keystone_authtoken.password creds['auth_url'] = cfg.CONF.keystone_authtoken.auth_uri creds['version'] = 2 return creds
def _service_admin_creds(self): # Import auth_token to have keystone_authtoken settings setup. importutils.import_module('keystonemiddleware.auth_token') creds = { 'username': cfg.CONF.keystone_authtoken.admin_user, 'password': cfg.CONF.keystone_authtoken.admin_password, 'auth_url': self.v3_endpoint, 'endpoint': self.v3_endpoint, 'project_name': cfg.CONF.keystone_authtoken.admin_tenant_name} return creds
def admin_auth(self): if not self._admin_auth: importutils.import_module('keystonemiddleware.auth_token') self._admin_auth = kc_auth_v3.Password( username=cfg.CONF.keystone_authtoken.admin_user, password=cfg.CONF.keystone_authtoken.admin_password, user_domain_id='default', auth_url=self.v3_endpoint) return self._admin_auth
def _service_admin_creds(self): # Import auth_token to have keystone_authtoken settings setup. importutils.import_module('keystonemiddleware.auth_token') creds = { 'username': cfg.CONF.keystone_authtoken.username, 'password': cfg.CONF.keystone_authtoken.password, 'auth_url': self.endpoint, 'project_name': cfg.CONF.keystone_authtoken.project_name, 'user_domain_name': "Default", 'project_domain_name': "Default"} return creds
def setUp(self): super(OFAAgentTestBase, self).setUp() ryu_cfg = importutils.import_module('ryu.cfg') ryu_cfg.CONF = cfg.ConfigOpts() ryu_cfg.CONF.register_cli_opts([ cfg.StrOpt('ofp-listen-host', default='', help='openflow listen host'), cfg.IntOpt('ofp-tcp-listen-port', default=6633, help='openflow tcp listen port') ]) self.mod_agent = importutils.import_module(self._AGENT_NAME) self.ryuapp = mock.Mock()
def test_setup_default_table(self): br = self.br with mock.patch.object(br, '_send_msg') as sendmsg: br.setup_default_table() (dp, ofp, ofpp) = br._get_dp() arp = importutils.import_module('ryu.lib.packet.arp') ether = importutils.import_module('ryu.ofproto.ether') call = mock.call expected_calls = [ call(ofpp.OFPFlowMod(dp, command=ofp.OFPFC_DELETE, match=ofpp.OFPMatch(), out_group=ofp.OFPG_ANY, out_port=ofp.OFPP_ANY, priority=0, table_id=ofp.OFPTT_ALL)), call(ofpp.OFPFlowMod(dp, priority=0, table_id=0)), call(ofpp.OFPFlowMod(dp, priority=0, table_id=1)), call(ofpp.OFPFlowMod(dp, priority=0, table_id=2)), call(ofpp.OFPFlowMod(dp, instructions=[ ofpp.OFPInstructionGotoTable(table_id=7)], priority=0, table_id=3)), call(ofpp.OFPFlowMod(dp, instructions=[ ofpp.OFPInstructionGotoTable(table_id=5)], priority=0, table_id=4)), call(ofpp.OFPFlowMod(dp, instructions=[ ofpp.OFPInstructionGotoTable(table_id=6)], priority=0, table_id=5)), call(ofpp.OFPFlowMod(dp, instructions=[ ofpp.OFPInstructionActions(ofp.OFPIT_APPLY_ACTIONS, [ofpp.OFPActionOutput(ofp.OFPP_CONTROLLER)])], match=ofpp.OFPMatch(arp_op=arp.ARP_REQUEST, eth_type=ether.ETH_TYPE_ARP), priority=1, table_id=6)), call(ofpp.OFPFlowMod(dp, instructions=[ ofpp.OFPInstructionGotoTable(table_id=7)], priority=0, table_id=6)), call(ofpp.OFPFlowMod(dp, instructions=[ ofpp.OFPInstructionGotoTable(table_id=8)], priority=0, table_id=7)), call(ofpp.OFPFlowMod(dp, instructions=[ ofpp.OFPInstructionGotoTable(table_id=9)], priority=0, table_id=8)), call(ofpp.OFPFlowMod(dp, instructions=[ ofpp.OFPInstructionGotoTable(table_id=10)], priority=0, table_id=9)), call(ofpp.OFPFlowMod(dp, instructions=[ ofpp.OFPInstructionGotoTable(table_id=11)], priority=0, table_id=10)), call(ofpp.OFPFlowMod(dp, instructions=[ ofpp.OFPInstructionGotoTable(table_id=12)], priority=0, table_id=11)), call(ofpp.OFPFlowMod(dp, instructions=[ ofpp.OFPInstructionGotoTable(table_id=13)], priority=0, table_id=12)), call(ofpp.OFPFlowMod(dp, priority=0, table_id=13)), ] sendmsg.assert_has_calls(expected_calls, any_order=True)
def _service_admin_creds(): # Import auth_token to have keystone_authtoken settings setup. importutils.import_module('keystonemiddleware.auth_token') creds = { 'username': cfg.CONF.keystone_authtoken.admin_user, 'password': cfg.CONF.keystone_authtoken.admin_password, 'auth_url': cfg.CONF.keystone_authtoken.auth_uri, 'tenant_name': cfg.CONF.keystone_authtoken.admin_tenant_name, } return creds
def _service_admin_creds(self): # Import auth_token to have keystone_authtoken settings setup. importutils.import_module("keystonemiddleware.auth_token") creds = { "username": cfg.CONF.keystone_authtoken.admin_user, "password": cfg.CONF.keystone_authtoken.admin_password, "auth_url": self.v3_endpoint, "endpoint": self.v3_endpoint, "project_name": cfg.CONF.keystone_authtoken.admin_tenant_name, } LOG.info(_LI("admin creds %s") % creds) return creds
def get_auth_uri(v3=True): # Look for the keystone auth_uri in the configuration. First we # check the [clients_keystone] section, and if it is not set we # look in [keystone_authtoken] if cfg.CONF.clients_keystone.auth_uri: discover = ks_discover.Discover( auth_url=cfg.CONF.clients_keystone.auth_uri) return discover.url_for('3.0') else: # Import auth_token to have keystone_authtoken settings setup. importutils.import_module('keystonemiddleware.auth_token') auth_uri = cfg.CONF.keystone_authtoken.auth_uri return auth_uri.replace('v2.0', 'v3') if auth_uri and v3 else auth_uri
def test_keystone_v3_endpoint_in_keystone_authtoken_config(self): """Ensure that the [keystone_authtoken] section of the configuration is used when the auth_uri is not defined in the context or the [clients_keystone] section. """ importutils.import_module('keystonemiddleware.auth_token') cfg.CONF.set_override('auth_uri', 'http://abc/v2.0', group='keystone_authtoken') policy_check = 'heat.common.policy.Enforcer.check_is_admin' with mock.patch(policy_check) as pc: pc.return_value = False ctx = context.RequestContext(auth_url=None) self.assertEqual(ctx.keystone_v3_endpoint, 'http://abc/v3')
def run_task(self): importutils.import_module('keystonemiddleware.auth_token') username = cfg.CONF.keystone_authtoken.username tenant_name = cfg.CONF.keystone_authtoken.project_name # password = cfg.CONF.keystone_authtoken.password auth_url = cfg.CONF.keystone_authtoken.auth_uri context = cloudpulse_context.make_context( auth_url=auth_url, user=username, project=tenant_name) new_test = self.create_task_entry(context) test_manager.run(test=new_test)
def _get_keystone_admin_parameters(scoped): importutils.import_module('keystonemiddleware.auth_token') settings = { 'auth_url': cfg.CONF.keystone_authtoken.auth_uri.replace('v2.0', 'v3'), 'username': cfg.CONF.keystone_authtoken.admin_user, 'password': cfg.CONF.keystone_authtoken.admin_password, 'user_domain_name': "Default" } if scoped: settings.update({ 'project_name': cfg.CONF.keystone_authtoken.admin_tenant_name, 'project_domain_name': "Default" }) return settings
def _get_keystone_settings(): importutils.import_module('keystonemiddleware.auth_token') return { 'auth_url': cfg.CONF.keystone_authtoken.auth_uri.replace('v2.0', 'v3'), 'username': cfg.CONF.keystone_authtoken.admin_user, 'password': cfg.CONF.keystone_authtoken.admin_password, 'project_name': cfg.CONF.keystone_authtoken.admin_tenant_name, 'ssl': { 'cacert': cfg.CONF.keystone.ca_file, 'insecure': cfg.CONF.keystone.insecure, 'cert': cfg.CONF.keystone.cert_file, 'key': cfg.CONF.keystone.key_file } }
def _sync_extension_repo(extension, version): init_version = 0 try: package_name = '.'.join((contrib.__name__, extension)) package = importutils.import_module(package_name) except ImportError: raise ImportError(_("%s extension does not exist.") % package_name) try: abs_path = find_migrate_repo(package) try: migration.db_version_control(sql.get_engine(), abs_path) # Register the repo with the version control API # If it already knows about the repo, it will throw # an exception that we can safely ignore except exceptions.DatabaseAlreadyControlledError: pass except exception.MigrationNotProvided as e: print(e) sys.exit(1) migration.db_sync(sql.get_engine(), abs_path, version=version, init_version=init_version)
def _import_modules_from_package(): folder = os.path.dirname(os_faults.__file__) library_root = os.path.normpath(os.path.join(folder, os.pardir)) drivers_folder = os.path.join(folder, 'drivers') for root, dirs, files in os.walk(drivers_folder): for filename in files: if (filename.startswith('__') or filename.startswith('test') or not filename.endswith('.py')): continue relative_path = os.path.relpath(os.path.join(root, filename), library_root) name = os.path.splitext(relative_path)[0] # remove extension module_name = '.'.join(name.split(os.sep)) # convert / to . if module_name not in sys.modules: module = importutils.import_module(module_name) sys.modules[module_name] = module else: module = sys.modules[module_name] yield module
def __init__(self, image, partition=None): """Create a new local VFS instance :param image: instance of nova.virt.image.model.Image :param partition: the partition number of access """ super(VFSGuestFS, self).__init__(image, partition) global guestfs if guestfs is None: try: guestfs = importutils.import_module('guestfs') except Exception as e: raise exception.NovaException( _("libguestfs is not installed (%s)") % e) self.handle = None self.mount = False # PF9 : support for IP address injection in CentOS VMs self.gos_type = None self.gos_distro = None self.gos_major_version = None
def __init__(self, uri, read_only=False, conn_event_handler=None, lifecycle_event_handler=None): global libvirt if libvirt is None: libvirt = importutils.import_module('libvirt') self._uri = uri self._read_only = read_only self._initial_connection = True self._conn_event_handler = conn_event_handler self._conn_event_handler_queue = six.moves.queue.Queue() self._lifecycle_event_handler = lifecycle_event_handler self._caps = None self._domain_caps = None self._hostname = None self._wrapped_conn = None self._wrapped_conn_lock = threading.Lock() self._event_queue = None self._events_delayed = {} # Note(toabctl): During a reboot of a domain, STOPPED and # STARTED events are sent. To prevent shutting # down the domain during a reboot, delay the # STOPPED lifecycle event some seconds. self._lifecycle_delay = 15 self._initialized = False # AMD SEV is conditional on support in the hardware, kernel, # qemu, and libvirt. This is determined on demand and # memoized by the supports_amd_sev property below. self._supports_amd_sev = None
def setup_bridge_mock(self, name, cls): self.br = cls(name) self.dp = mock.Mock() self.ofp = importutils.import_module(self._OFP_MODULE) self.ofpp = importutils.import_module(self._OFPP_MODULE) self.arp = importutils.import_module(self._ARP_MODULE) self.ether_types = importutils.import_module(self._ETHER_TYPES_MODULE) self.icmpv6 = importutils.import_module(self._ICMPV6_MODULE) self.in_proto = importutils.import_module(self._IN_PROTO_MODULE) mock.patch.object(self.br, '_get_dp', autospec=True, return_value=self._get_dp()).start() mock__send_msg = mock.patch.object(self.br, '_send_msg').start() mock_delete_flows = mock.patch.object(self.br, 'delete_flows').start() self.mock = mock.Mock() self.mock.attach_mock(mock__send_msg, '_send_msg') self.mock.attach_mock(mock_delete_flows, 'delete_flows')
def monkey_patch(): # NOTE(slaweq): to workaround issue with import cycles in # eventlet < 0.22.0; # This issue is fixed in eventlet with patch # https://github.com/eventlet/eventlet/commit/b756447bab51046dfc6f1e0e299cc997ab343701 # For details please check https://bugs.launchpad.net/neutron/+bug/1745013 hub = eventlet.hubs.get_hub() hub.is_available = lambda: True if os.name != 'nt': eventlet.monkey_patch() p_c_e = importutils.import_module('pyroute2.config.asyncio') p_c_e.asyncio_config() else: # eventlet monkey patching the os module causes subprocess.Popen to # fail on Windows when using pipes due to missing non-blocking IO # support. eventlet.monkey_patch(os=False) # Monkey patch the original current_thread to use the up-to-date _active # global variable. See https://bugs.launchpad.net/bugs/1863021 and # https://github.com/eventlet/eventlet/issues/592 import __original_module_threading as orig_threading import threading # noqa orig_threading.current_thread.__globals__['_active'] = threading._active
def driver_module(driver): mod_name = _client_modules[driver] module = utils.import_module(mod_name) return module
# License for the specific language governing permissions and limitations # under the License. import socket import cffi import mock from neutron.tests import base from oslo_utils import importutils import testtools # mock for dlopen cffi.FFI = mock.Mock() cffi.FFI.dlopen = mock.Mock(return_value=mock.Mock()) lib_log = importutils.import_module( 'neutron_fwaas.privileged.netfilter_log.libnetfilter_log' ) class NFLogAppTestCase(base.BaseTestCase): def setUp(self): self.nflog_app = lib_log.NFLogApp() self.spawn = mock.patch('eventlet.spawn').start() super(NFLogAppTestCase, self).setUp() def test_register_packet_handler(self): def fake_method(): pass self.nflog_app.register_packet_handler(fake_method)
def __init__(self, **kwargs): db_driver = kwargs.get('db_driver') if not db_driver: db_driver = CONF.db_driver self.db = importutils.import_module(db_driver)
def __init__(self, hashtype='SHA256'): self.hashfn = importutils.import_module('Crypto.Hash.' + hashtype) self.max_okm_length = 255 * self.hashfn.digest_size
def __call__(self, target, creds, enforcer): if self.target_field not in target: # policy needs a plugin check # target field is in the form resource:field # however if they're not separated by a colon, use an underscore # as a separator for backward compatibility def do_split(separator): parent_res, parent_field = self.target_field.split( separator, 1) return parent_res, parent_field for separator in (':', '_'): try: parent_res, parent_field = do_split(separator) break except ValueError: LOG.debug("Unable to find ':' as separator in %s.", self.target_field) else: # If we are here split failed with both separators err_reason = ("Unable to find resource name in %s" % self.target_field) LOG.error(err_reason) raise exceptions.PolicyCheckError(policy="%s:%s" % (self.kind, self.match), reason=err_reason) parent_foreign_key = attributes.RESOURCE_FOREIGN_KEYS.get( "%ss" % parent_res, None) if not parent_foreign_key: err_reason = ("Unable to verify match:%(match)s as the " "parent resource: %(res)s was not found" % { 'match': self.match, 'res': parent_res }) LOG.error(err_reason) raise exceptions.PolicyCheckError(policy="%s:%s" % (self.kind, self.match), reason=err_reason) # NOTE(salv-orlando): This check currently assumes the parent # resource is handled by the core plugin. It might be worth # having a way to map resources to plugins so to make this # check more general # NOTE(ihrachys): if import is put in global, circular # import failure occurs manager = importutils.import_module('tacker.manager') f = getattr(manager.TackerManager.get_instance().plugin, 'get_%s' % parent_res) # f *must* exist, if not found it is better to let tacker # explode. Check will be performed with admin context context = importutils.import_module('tacker.context') try: data = f(context.get_admin_context(), target[parent_foreign_key], fields=[parent_field]) target[self.target_field] = data[parent_field] except exceptions.NotFound as e: # NOTE(kevinbenton): a NotFound exception can occur if a # list operation is happening at the same time as one of # the parents and its children being deleted. So we issue # a RetryRequest so the API will redo the lookup and the # problem items will be gone. raise db_exc.RetryRequest(e) except Exception: with excutils.save_and_reraise_exception(): LOG.exception('Policy check error while calling %s!', f) match = self.match % target if self.kind in creds: return match == six.text_type(creds[self.kind]) return False
def get_api(): api = importutils.import_module(CONF.data_api) if hasattr(api, 'configure'): api.configure() return api
def _import_resources(): return importutils.import_module('neutron.api.rpc.callbacks.resources')
def setUp(self): super(TestOFAgentFlows, self).setUp() self.mod = importutils.import_module(self._MOD) self.br = self.mod.OpenFlowSwitch() self.br.set_dp(self._mk_test_dp("dp"))
def get_connection_params(driver_path): driver_path = importutils.import_module(driver_path) connection_params = driver_path.get_connection_params_dict() return connection_params
def ensure_c460c5682e74_migration(): if not migration.schema_has_column('nfp_port_infos', 'project_id'): db_c460c5682e74 = importutils.import_module(DB_c460c5682e74) db_c460c5682e74.upgrade()
def setUp(self): super(InStorageMCSReplicationTestCase, self).setUp() def _run_ssh_aux(cmd, check_exit_code=True, attempts=1): utils.check_ssh_injection(cmd) if len(cmd) > 2 and cmd[1] == 'lssystem': cmd[1] = 'lssystem_aux' ret = self.sim.execute_command(cmd, check_exit_code) return ret aux_connect_patcher = mock.patch( 'cinder.volume.drivers.inspur.instorage.' 'replication.InStorageMCSReplicationManager._run_ssh') self.aux_ssh_mock = aux_connect_patcher.start() self.addCleanup(aux_connect_patcher.stop) self.aux_ssh_mock.side_effect = _run_ssh_aux self.driver = fakes.FakeInStorageMCSISCSIDriver( configuration=conf.Configuration(None)) self.rep_target = { "backend_id": "mcs_aux_target_1", "san_ip": "192.168.10.22", "san_login": "******", "san_password": "******", "pool_name": fakes.get_test_pool() } self.fake_target = { "backend_id": "mcs_id_target", "san_ip": "192.168.10.23", "san_login": "******", "san_password": "******", "pool_name": fakes.get_test_pool() } self._def_flags = { 'san_ip': '192.168.10.21', 'san_login': '******', 'san_password': '******', 'instorage_mcs_volpool_name': fakes.MCS_POOLS, 'replication_device': [self.rep_target] } wwpns = ['1234567890123451', '6543210987654326'] initiator = 'test.initiator.%s' % 123451 self._connector = { 'ip': '1.234.56.78', 'host': 'instorage-mcs-test', 'wwpns': wwpns, 'initiator': initiator } self.sim = fakes.FakeInStorage(fakes.MCS_POOLS) self.driver.set_fake_storage(self.sim) self.ctxt = context.get_admin_context() self._reset_flags() self.ctxt = context.get_admin_context() db_driver = self.driver.configuration.db_driver self.db = importutils.import_module(db_driver) self.driver.db = self.db self.driver.do_setup(None) self.driver.check_for_setup_error() self._create_test_volume_types() self.mock_object(greenthread, 'sleep')
def ensure_da6a25bbcfa8_migration(): if not migration.schema_has_table('gpm_qos_policy_mappings'): db_da6a25bbcfa8 = importutils.import_module(DB_da6a25bbcfa8) db_da6a25bbcfa8.upgrade()
def setUp(self): pl_config.register_config() super(BaseAgentTestCase, self).setUp() self.mod_agent = importutils.import_module(AGENTMOD)
def test_import_module(self): dt = importutils.import_module('datetime') self.assertEqual(sys.modules['datetime'], dt)
def __init__(self, db_driver=None): super(Base, self).__init__() if not db_driver: db_driver = CONF.db_driver self.db = importutils.import_module(db_driver)
def ensure_bff1774e749e_migration(): if not migration.schema_has_column( 'ncp_node_instance_network_function_mappings', 'status_details'): db_bff1774e749e = importutils.import_module(DB_bff1774e749e) db_bff1774e749e.upgrade()
def setUp(self): super(TestSockets, self).setUp() # http patch must not be running or it will mangle the servermanager # import where the https connection classes are defined self.httpPatch.stop() self.sm = importutils.import_module(SERVERMANAGER)
# Copyright 2015 - Huawei Technologies Co. Ltd # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from oslo_utils import importutils # NOTE(xylan): import modules for WorkflowHandler subclasses iteration importutils.import_module('mistral.workflow.direct_workflow') importutils.import_module('mistral.workflow.reverse_workflow')
POWER_RESET=mock.sentinel.POWER_RESET, MOUNT_CD=mock.sentinel.MOUNT_CD, UNMOUNT_CD=mock.sentinel.UNMOUNT_CD, MOUNT_FD=mock.sentinel.MOUNT_FD, UNMOUNT_FD=mock.sentinel.UNMOUNT_FD) sys.modules['scciclient.irmc.elcm'] = mock.MagicMock( spec_set=mock_specs.SCCICLIENT_IRMC_ELCM_SPEC) # if anything has loaded the iRMC driver yet, reload it now that the # external library has been mocked if 'ironic.drivers.modules.irmc' in sys.modules: importlib.reload(sys.modules['ironic.drivers.modules.irmc']) # install mock object to prevent the irmc-virtual-media boot interface from # checking whether NFS/CIFS share file system is mounted or not. irmc_boot = importutils.import_module('ironic.drivers.modules.irmc.boot') irmc_boot.check_share_fs_mounted_orig = irmc_boot.check_share_fs_mounted class MockKwargsException(Exception): def __init__(self, *args, **kwargs): super(MockKwargsException, self).__init__(*args) self.kwargs = kwargs sushy = importutils.try_import('sushy') if not sushy: sushy = mock.MagicMock(spec_set=mock_specs.SUSHY_SPEC, BOOT_SOURCE_TARGET_PXE='Pxe', BOOT_SOURCE_TARGET_HDD='Hdd', BOOT_SOURCE_TARGET_CD='Cd',
def ensure_5239b0a50036_migration(): if not migration.schema_has_column('gp_l2_policies', 'project_id'): db_5239b0a50036 = importutils.import_module(DB_5239b0a50036) db_5239b0a50036.upgrade()
def _import_agents_db(): return importutils.import_module('neutron.db.agents_db')
def __init__(self, enctype='AES', hashtype='SHA256'): self.cipher = importutils.import_module('Crypto.Cipher.' + enctype) self.hashfn = importutils.import_module('Crypto.Hash.' + hashtype)
def import_versioned_module(version, submodule=None): module = 'ironicclient.v%s' % version if submodule: module = '.'.join((module, submodule)) return importutils.import_module(module)
# under the License. # # Copyright (c) 2017 Wind River Systems, Inc. # # The right to copy, distribute, modify, or otherwise make use # of this software may be licensed only pursuant to the terms # of an applicable Wind River license agreement. # """ File to store all the configurations """ from oslo_config import cfg from oslo_utils import importutils # Ensure keystonemiddleware options are imported importutils.import_module('keystonemiddleware.auth_token') global_opts = [ cfg.BoolOpt('use_default_quota_class', default=True, help='Enables or disables use of default quota class ' 'with default quota.'), cfg.IntOpt('report_interval', default=60, help='Seconds between running periodic reporting tasks.'), ] # OpenStack credentials used for Endpoint Cache # We need to register the below non-standard config # options to dcmanager engine keystone_opts = [