def test_encode_as_text(self): self.assertEqual(u'dGV4dA==', base64.encode_as_text(b'text')) self.assertEqual(u'dGV4dA==', base64.encode_as_text(u'text')) self.assertEqual(u'ZTrDqQ==', base64.encode_as_text(u'e:\xe9')) self.assertEqual(u'ZTrp', base64.encode_as_text(u'e:\xe9', encoding='latin1'))
def create_auth_token(self): """Create the authentication token. Creates the authentication token to use in the authentication header return authentication header (Base64(username:password:random no)). :returns: Authentication Header :raises: BrocadeZoningHttpException """ try: # Send GET request to secinfo.html to get random number response = self.connect(zone_constant.GET_METHOD, zone_constant.SECINFO_PAGE) parsed_data = self.get_parsed_data(response, zone_constant.SECINFO_BEGIN, zone_constant.SECINFO_END) # Extract the random no from secinfo.html response self.random_no = self.get_nvp_value(parsed_data, zone_constant.RANDOM) # Form the authentication string auth_string = (self.switch_user + ":" + self.switch_pwd + ":" + self.random_no) auth_token = base64.encode_as_text(auth_string).strip() auth_header = (zone_constant.AUTH_STRING + auth_token ) # Build the proper header except Exception as e: msg = (_("Error while creating authentication token: %s") % six.text_type(e)) LOG.error(msg) raise exception.BrocadeZoningHttpException(reason=msg) return auth_header
def _base64_encode_payload(self, secret_doc): """Ensures secret document payload is compatible with Barbican.""" payload = secret_doc.data secret_type = None # Explicitly list the "empty" payloads we are refusing to store. # We don't use ``if not payload`` because that would not encrypt # and store something like ``data: !!int 0`` if payload in ('', {}, [], None): # There is no point in even bothering to encrypt an empty # body, which just leads to needless overhead, so return # early. LOG.info('Barbican does not accept empty payloads so ' 'Deckhand will not encrypt document [%s, %s] %s.', secret_doc.schema, secret_doc.layer, secret_doc.name) secret_doc.storage_policy = types.CLEARTEXT else: LOG.debug('Setting secret_type=opaque and ' 'base64-encoding payload of type %s for ' 'document [%s, %s] %s.', type(payload), secret_doc.schema, secret_doc.layer, secret_doc.name) secret_type = 'opaque' # nosec # not a hardcoded password try: payload = base64.encode_as_text(repr(payload)) except Exception: message = ('Failed to base64-encode payload of type %s ' 'for Barbican storage.', type(payload)) LOG.error(message) raise errors.UnknownSubstitutionError( src_schema=secret_doc.schema, src_layer=secret_doc.layer, src_name=secret_doc.name, schema='N/A', layer='N/A', name='N/A', details=message) return secret_type, payload
def _update_volume_status(self): """Retrieve status info from volume group.""" LOG.debug("Updating volume status") self._stats = None data = {} backend_name = self.configuration.safe_get('volume_backend_name') data["volume_backend_name"] = backend_name or self.__class__.__name__ data["vendor_name"] = 'Oracle' data["driver_version"] = self.VERSION data["storage_protocol"] = self.protocol lcfg = self.configuration (avail, total) = self.zfssa.get_project_stats(lcfg.zfssa_pool, lcfg.zfssa_project) if avail is None or total is None: return host = lcfg.san_ip pool = lcfg.zfssa_pool project = lcfg.zfssa_project auth_str = '%s:%s' % (lcfg.san_login, lcfg.san_password) auth_str = base64.encode_as_text(auth_str) zfssa_tgt_group = lcfg.zfssa_target_group repl_ip = lcfg.zfssa_replication_ip data['location_info'] = "%s:%s:%s:%s:%s:%s" % ( host, auth_str, pool, project, zfssa_tgt_group, repl_ip) data['total_capacity_gb'] = int(total) / units.Gi data['free_capacity_gb'] = int(avail) / units.Gi data['reserved_percentage'] = 0 data['QoS_support'] = False self._stats = data
def create_auth_token(self): """Create the authentication token. Creates the authentication token to use in the authentication header return authentication header (Base64(username:password:random no)). :returns: Authentication Header :raises: BrocadeZoningHttpException """ try: # Send GET request to secinfo.html to get random number response = self.connect(zone_constant.GET_METHOD, zone_constant.SECINFO_PAGE) parsed_data = self.get_parsed_data(response, zone_constant.SECINFO_BEGIN, zone_constant.SECINFO_END) # Extract the random no from secinfo.html response self.random_no = self.get_nvp_value(parsed_data, zone_constant.RANDOM) # Form the authentication string auth_string = (self.switch_user + ":" + self.switch_pwd + ":" + self.random_no) auth_token = base64.encode_as_text(auth_string).strip() auth_header = (zone_constant.AUTH_STRING + auth_token) # Build the proper header except Exception as e: msg = (_("Error while creating authentication token: %s") % six.text_type(e)) LOG.error(msg) raise exception.BrocadeZoningHttpException(reason=msg) return auth_header
def test_create_server_with_personality(self): file_contents = 'This is a test file.' file_path = '/test.txt' personality = [{'path': file_path, 'contents': base64.encode_as_text(file_contents)}] password = data_utils.rand_password() validation_resources = self.get_test_validation_resources( self.os_primary) created_server = self.create_test_server( personality=personality, adminPass=password, wait_until='ACTIVE', validatable=True, validation_resources=validation_resources) self.addCleanup(waiters.wait_for_server_termination, self.servers_client, created_server['id']) self.addCleanup(test_utils.call_and_ignore_notfound_exc, self.servers_client.delete_server, created_server['id']) server = self.client.show_server(created_server['id'])['server'] if CONF.validation.run_validation: linux_client = remote_client.RemoteClient( self.get_server_ip(server, validation_resources), self.ssh_user, password, validation_resources['keypair']['private_key'], server=server, servers_client=self.client) self.assertEqual(file_contents, linux_client.exec_command( 'sudo cat %s' % file_path))
def _login(self): if self.protocol == fc_zone_constants.REST_HTTPS: self.protocol = fc_zone_constants.HTTPS else: self.protocol = fc_zone_constants.HTTP if self.session is None: self.session = requests.Session() adapter = requests.adapters.HTTPAdapter(pool_connections=1, pool_maxsize=1) self.session.mount(self.protocol + '://', adapter) credentials = base64.encode_as_text( '%s:%s' % (self.sw_user, self.sw_pwd)).replace('\n', '') self.session.headers = { rest_constants.USER_AGENT: rest_constants.ZONE_DRIVER, rest_constants.ACCEPT: rest_constants.YANG, rest_constants.AUTHORIZATION: "Basic %s" % credentials } response = self.session.post(self._build_url(rest_constants.LOGIN)) if response.status_code == 200: auth = response.headers.get('Authorization') LOG.info("REST login success, setting auth: %s", auth) self.session.headers = { rest_constants.USER_AGENT: rest_constants.ZONE_DRIVER, rest_constants.ACCEPT: rest_constants.YANG, rest_constants.CONTENT_TYPE: rest_constants.YANG, rest_constants.AUTHORIZATION: auth } else: msg = (_("REST login failed: %s") % six.text_type(response.text)) LOG.error(msg) raise exception.BrocadeZoningRestException(reason=msg) return response.status_code
def gzip_and_b64encode(io_dict=None, file_list=None): """Gzip and base64 encode files and BytesIO buffers. :param io_dict: A dictionary containing whose the keys are the file names and the value a BytesIO object. :param file_list: A list of file path. :returns: A gzipped and base64 encoded string. """ io_dict = io_dict or {} file_list = file_list or [] with io.BytesIO() as fp: with tarfile.open(fileobj=fp, mode='w:gz') as tar: for fname in io_dict: ioobj = io_dict[fname] tarinfo = tarfile.TarInfo(name=fname) tarinfo.size = ioobj.seek(0, 2) tarinfo.mtime = time.time() ioobj.seek(0) tar.addfile(tarinfo, ioobj) for f in file_list: tar.add(f) fp.seek(0) return base64.encode_as_text(fp.getvalue())
def test_can_create_server_with_max_number_personality_files(self): # Server should be created successfully if maximum allowed number of # files is injected into the server during creation. file_contents = 'This is a test file.' limits = self.user_client.show_limits()['limits'] max_file_limit = limits['absolute']['maxPersonality'] if max_file_limit == -1: raise self.skipException("No limit for personality files") person = [] for i in range(0, int(max_file_limit)): # NOTE(andreaf) The cirros disk image is blank before boot # so we can only inject safely to / path = '/test' + str(i) + '.txt' person.append({ 'path': path, 'contents': base64.encode_as_text(file_contents + str(i)), }) password = data_utils.rand_password() created_server = self.create_test_server(personality=person, adminPass=password, wait_until='ACTIVE', validatable=True) server = self.client.show_server(created_server['id'])['server'] if CONF.validation.run_validation: linux_client = remote_client.RemoteClient( self.get_server_ip(server), self.ssh_user, password, self.validation_resources['keypair']['private_key'], server=server, servers_client=self.client) for i in person: self.assertEqual(base64.decode_as_text(i['contents']), linux_client.exec_command( 'sudo cat %s' % i['path']))
def _update_volume_status(self): """Retrieve status info from volume group.""" LOG.debug("Updating volume status") self._stats = None data = {} backend_name = self.configuration.safe_get('volume_backend_name') data["volume_backend_name"] = backend_name or self.__class__.__name__ data["vendor_name"] = 'Oracle' data["driver_version"] = self.VERSION data["storage_protocol"] = self.protocol lcfg = self.configuration (avail, total) = self.zfssa.get_project_stats(lcfg.zfssa_pool, lcfg.zfssa_project) if avail is None or total is None: return host = lcfg.san_ip pool = lcfg.zfssa_pool project = lcfg.zfssa_project auth_str = '%s:%s' % (lcfg.san_login, lcfg.san_password) auth_str = base64.encode_as_text(auth_str) zfssa_tgt_group = lcfg.zfssa_target_group repl_ip = lcfg.zfssa_replication_ip data['location_info'] = "%s:%s:%s:%s:%s:%s" % (host, auth_str, pool, project, zfssa_tgt_group, repl_ip) data['total_capacity_gb'] = int(total) / units.Gi data['free_capacity_gb'] = int(avail) / units.Gi data['reserved_percentage'] = 0 data['QoS_support'] = False self._stats = data
def __init__(self, server='localhost', secondary_server=None, user=None, password=None, port=2189, ssl=True, timeout=5000, base_uri=''): self.server = server self.secondary_server = secondary_server self.port = port self.ssl = ssl self.base_uri = base_uri self.timeout = timeout if user and password: self.auth = base64.encode_as_text('%s:%s' % (user, password)) self.auth = self.auth.replace('\n', '') else: raise r_exc.AuthenticationMissing() debug_params = {'server': self.server, 'sec_server': self.secondary_server, 'port': self.port, 'ssl': self.ssl} LOG.debug('vDirectRESTClient:init server=%(server)s, ' 'secondary server=%(sec_server)s, ' 'port=%(port)d, ' 'ssl=%(ssl)r', debug_params)
def _get_auth_header(task): """Based on setup of configuration mold storage gets authentication header :param task: A TaskManager instance. :raises IronicException: If using Swift storage and no authentication token found in task's context. """ auth_header = None if CONF.molds.storage == 'swift': # TODO(ajya) Need to update to use Swift client and context session auth_token = swift.get_swift_session().get_token() if auth_token: auth_header = {'X-Auth-Token': auth_token} else: raise exception.IronicException( _('Missing auth_token for configuration mold access for node ' '%s') % task.node.uuid) elif CONF.molds.storage == 'http': if CONF.molds.user and CONF.molds.password: auth_header = { 'Authorization': 'Basic %s' % base64.encode_as_text('%s:%s' % (CONF.molds.user, CONF.molds.password)) } return auth_header
def test_create_and_retrieve_base64_encoded_payload(self): # Validate base64-encoded encryption. payload = {'foo': 'bar'} secret_doc = self.factory.gen_test( 'Certificate', 'encrypted', payload) expected_payload = base64.encode_as_text(six.text_type({'foo': 'bar'})) expected_kwargs = { 'name': secret_doc['metadata']['name'], 'secret_type': 'opaque', 'payload': expected_payload } self._mock_barbican_client_call(payload) secret_ref = secrets_manager.SecretsManager.create(secret_doc) self.assertEqual(self.secret_ref, secret_ref) self.assertEqual('encrypted', secret_doc['metadata']['storagePolicy']) self.mock_barbicanclient.call.assert_called_once_with( "secrets.create", **expected_kwargs) # Validate base64-encoded decryption. self.mock_barbicanclient.get_secret.return_value = ( mock.Mock(payload=expected_payload, secret_type='opaque')) dummy_document = document_wrapper.DocumentDict({}) retrieved_payload = secrets_manager.SecretsManager.get( secret_ref, dummy_document) self.assertEqual(payload, retrieved_payload)
def test_can_create_server_with_max_number_personality_files(self): # Server should be created successfully if maximum allowed number of # files is injected into the server during creation. file_contents = 'This is a test file.' limits = self.user_client.show_limits()['limits'] max_file_limit = limits['absolute']['maxPersonality'] if max_file_limit == -1: raise self.skipException("No limit for personality files") person = [] for i in range(0, int(max_file_limit)): path = '/etc/test' + str(i) + '.txt' person.append({ 'path': path, 'contents': base64.encode_as_text(file_contents), }) password = data_utils.rand_password() created_server = self.create_test_server(personality=person, adminPass=password, wait_until='ACTIVE', validatable=True) server = self.client.show_server(created_server['id'])['server'] if CONF.validation.run_validation: linux_client = remote_client.RemoteClient( self.get_server_ip(server), self.ssh_user, password, self.validation_resources['keypair']['private_key'], server=server, servers_client=self.client) for i in person: self.assertEqual(base64.decode_as_text(i['contents']), linux_client.exec_command( 'sudo cat %s' % i['path']))
def _login(self): if self.protocol == fc_zone_constants.REST_HTTPS: self.protocol = fc_zone_constants.HTTPS else: self.protocol = fc_zone_constants.HTTP if self.session is None: self.session = requests.Session() adapter = requests.adapters.HTTPAdapter(pool_connections=1, pool_maxsize=1) self.session.mount(self.protocol + '://', adapter) credentials = base64.encode_as_text('%s:%s' % (self.sw_user, self.sw_pwd)).replace('\n', '') self.session.headers = {rest_constants.USER_AGENT: rest_constants.ZONE_DRIVER, rest_constants.ACCEPT: rest_constants.YANG, rest_constants.AUTHORIZATION: "Basic %s" % credentials} response = self.session.post(self._build_url(rest_constants.LOGIN)) if response.status_code == 200: auth = response.headers.get('Authorization') LOG.info("REST login success, setting auth: %s", auth) self.session.headers = {rest_constants.USER_AGENT: rest_constants.ZONE_DRIVER, rest_constants.ACCEPT: rest_constants.YANG, rest_constants.CONTENT_TYPE: rest_constants.YANG, rest_constants.AUTHORIZATION: auth} else: msg = (_("REST login failed: %s") % six.text_type(response.text)) LOG.error(msg) raise exception.BrocadeZoningRestException(reason=msg) return response.status_code
def test_create_server_with_personality(self): """Test creating server with file injection""" file_contents = 'This is a test file.' file_path = '/test.txt' personality = [{ 'path': file_path, 'contents': base64.encode_as_text(file_contents) }] password = data_utils.rand_password() validation_resources = self.get_test_validation_resources( self.os_primary) created_server = self.create_test_server( personality=personality, adminPass=password, wait_until='ACTIVE', validatable=True, validation_resources=validation_resources) self.addCleanup(waiters.wait_for_server_termination, self.servers_client, created_server['id']) self.addCleanup(test_utils.call_and_ignore_notfound_exc, self.servers_client.delete_server, created_server['id']) server = self.client.show_server(created_server['id'])['server'] if CONF.validation.run_validation: linux_client = remote_client.RemoteClient( self.get_server_ip(server, validation_resources), self.ssh_user, password, validation_resources['keypair']['private_key'], server=server, servers_client=self.client) self.assertEqual( file_contents, linux_client.exec_command('sudo cat %s' % file_path))
def test_max_injected_file_path_bytes(self): # Quota is 255. path = ''.join(['a' for i in range(255)]) contents = base64.encode_as_text('some content') server = self._build_server() personality = [{'path': path, 'contents': contents}] server['personality'] = personality self.api.post_server({'server': server})
def _save_instance_password_if_sshkey_present(self, new_pass): sshkey = self.instance.get('key_data') if sshkey and sshkey.startswith("ssh-rsa"): ctxt = context.get_admin_context() enc = crypto.ssh_encrypt_text(sshkey, new_pass) self.instance.system_metadata.update( password.convert_password(ctxt, base64.encode_as_text(enc))) self.instance.save()
def _metadata_as_json(self, version, path): metadata = {'uuid': self.uuid} if self.launch_metadata: metadata['meta'] = self.launch_metadata if self.files: metadata['files'] = self.files if self.extra_md: metadata.update(self.extra_md) if self.network_config: metadata['network_config'] = self.network_config if self.instance.key_name: if cells_opts.get_cell_type() == 'compute': cells_api = cells_rpcapi.CellsAPI() keypair = cells_api.get_keypair_at_top( context.get_admin_context(), self.instance.user_id, self.instance.key_name) else: keypairs = self.instance.keypairs # NOTE(mriedem): It's possible for the keypair to be deleted # before it was migrated to the instance_extra table, in which # case lazy-loading instance.keypairs will handle the 404 and # just set an empty KeyPairList object on the instance. keypair = keypairs[0] if keypairs else None if keypair: metadata['public_keys'] = { keypair.name: keypair.public_key, } metadata['keys'] = [{ 'name': keypair.name, 'type': keypair.type, 'data': keypair.public_key }] else: LOG.debug( "Unable to find keypair for instance with " "key name '%s'.", self.instance.key_name, instance=self.instance) metadata['hostname'] = self._get_hostname() metadata['name'] = self.instance.display_name metadata['launch_index'] = self.instance.launch_index metadata['availability_zone'] = self.availability_zone if self._check_os_version(GRIZZLY, version): metadata['random_seed'] = base64.encode_as_text(os.urandom(512)) if self._check_os_version(LIBERTY, version): metadata['project_id'] = self.instance.project_id if self._check_os_version(NEWTON_ONE, version): metadata['devices'] = self._get_device_metadata(version) self.set_mimetype(MIME_TYPE_APPLICATION_JSON) return jsonutils.dump_as_bytes(metadata)
def setUp(self): super(TestController, self).setUp() self.controller = api.Controller() self.request = mock.MagicMock() auth = 'Basic {}'.format(base64.encode_as_text(b'test:test')) self.request.headers = {'Authorization': auth, 'X-Auth-Token': 'foo-bar', 'X-Project-Id': 'bar-baz'}
def _metadata_as_json(self, version, path): metadata = {'uuid': self.uuid} if self.launch_metadata: metadata['meta'] = self.launch_metadata if self.files: metadata['files'] = self.files if self.extra_md: metadata.update(self.extra_md) if self.network_config: metadata['network_config'] = self.network_config if self.instance.key_name: if cells_opts.get_cell_type() == 'compute': cells_api = cells_rpcapi.CellsAPI() keypair = cells_api.get_keypair_at_top( context.get_admin_context(), self.instance.user_id, self.instance.key_name) else: keypairs = self.instance.keypairs # NOTE(mriedem): It's possible for the keypair to be deleted # before it was migrated to the instance_extra table, in which # case lazy-loading instance.keypairs will handle the 404 and # just set an empty KeyPairList object on the instance. keypair = keypairs[0] if keypairs else None if keypair: metadata['public_keys'] = { keypair.name: keypair.public_key, } metadata['keys'] = [ {'name': keypair.name, 'type': keypair.type, 'data': keypair.public_key} ] else: LOG.debug("Unable to find keypair for instance with " "key name '%s'.", self.instance.key_name, instance=self.instance) metadata['hostname'] = self._get_hostname() metadata['name'] = self.instance.display_name metadata['launch_index'] = self.instance.launch_index metadata['availability_zone'] = self.availability_zone if self._check_os_version(GRIZZLY, version): metadata['random_seed'] = base64.encode_as_text(os.urandom(512)) if self._check_os_version(LIBERTY, version): metadata['project_id'] = self.instance.project_id if self._check_os_version(NEWTON_ONE, version): metadata['devices'] = self._get_device_metadata(version) self.set_mimetype(MIME_TYPE_APPLICATION_JSON) return jsonutils.dump_as_bytes(metadata)
def setUp(self): super(TestController, self).setUp() self.controller = api.Controller() self.request = mock.MagicMock() auth = 'Basic {}'.format(base64.encode_as_text(b'test:test')) self.request.headers = {'Authorization': auth, 'X-Auth-Token': 'foo-bar', 'X-Project-Id': 'bar-baz'} self.addCleanup(mock.patch.stopall)
def _build_v1_execution_plan(self, template, resources): scripts_folder = 'scripts' script_files = template.get('Scripts', []) scripts = [] for script in script_files: script_path = os.path.join(scripts_folder, script) script_path = base64.encode_as_text(script_path) + "\n" scripts.append(resources.string(script_path, binary=True)) template['Scripts'] = scripts return template
def test_create_instance_with_user_data_enabled(self): params = {user_data.ATTRIBUTE_NAME: base64.encode_as_text('fake')} old_create = compute_api.API.create def create(*args, **kwargs): self.assertIn('user_data', kwargs) return old_create(*args, **kwargs) self.stubs.Set(compute_api.API, 'create', create) self._test_create_extra(params)
def test_middleware_ext_context_default(self): middleware = ext_context.ExternalContextMiddleware(None) middleware.get_keystone_token = mock.MagicMock(return_value="token?") auth = 'Basic {}'.format(base64.encode_as_text(b'test:test')) request_headers = { 'Authorization': auth, } request = webob.Request.blank('/environments', headers=request_headers) middleware.process_request(request) self.assertEqual(request.headers.get('X-Auth-Token'), "token?")
def _get_body(self, file, resources, folder): use_base64 = self._is_base64(file) if use_base64: path = os.path.join(folder, file[1: -1]) body = resources.string(path, binary=True) body = base64.encode_as_text(body) + "\n" else: path = os.path.join(folder, file) body = resources.string(path) return body
def _get_body(self, file, resources, folder): use_base64 = self._is_base64(file) if use_base64: path = os.path.join(folder, file[1:-1]) body = resources.string(path, binary=True) body = base64.encode_as_text(body) + "\n" else: path = os.path.join(folder, file) body = resources.string(path) return body
def encode_record(self, **kwargs): """Serialize backup object, with optional extra info, into a string.""" # We don't want to export extra fields and we want to force lazy # loading, so we can't use dict(self) or self.obj_to_primitive record = {name: field.to_primitive(self, name, getattr(self, name)) for name, field in self.fields.items()} # We must update kwargs instead of record to ensure we don't overwrite # "real" data from the backup kwargs.update(record) retval = jsonutils.dump_as_bytes(kwargs) return base64.encode_as_text(retval)
def _encode_name(self, name): uuid_str = name.replace("-", "") vol_uuid = uuid.UUID('urn:uuid:%s' % uuid_str) vol_encoded = base64.encode_as_text(vol_uuid.bytes) # 3par doesn't allow +, nor / vol_encoded = vol_encoded.replace('+', '.') vol_encoded = vol_encoded.replace('/', '-') # strip off the == as 3par doesn't like those. vol_encoded = vol_encoded.replace('=', '') return vol_encoded
def test_middleware_ext_context_except_unauthorized(self): middleware = ext_context.ExternalContextMiddleware(None) middleware.get_keystone_token = mock.MagicMock( side_effect=exceptions.Unauthorized('')) auth = 'Basic {}'.format(base64.encode_as_text(b'test:test')) request_headers = { 'Authorization': auth, } request = webob.Request.blank('/environments', headers=request_headers) self.assertRaises(webob.exc.HTTPUnauthorized, middleware.process_request, request)
def test_max_injected_files(self): # Quota is 5. files = [] contents = base64.encode_as_text('some content') for i in range(5): files.append(('/my/path%d' % i, contents)) server = self._build_server() personality = [ {'path': item[0], 'contents': item[1]} for item in files] server['personality'] = personality self.api.post_server({'server': server})
def test_rebuild_server_with_personality(self): server = self.create_test_server(wait_until='ACTIVE', validatable=True) server_id = server['id'] file_contents = 'Test server rebuild.' personality = [{'path': 'rebuild.txt', 'contents': base64.encode_as_text(file_contents)}] rebuilt_server = self.client.rebuild_server(server_id, self.image_ref_alt, personality=personality) waiters.wait_for_server_status(self.client, server_id, 'ACTIVE') self.assertEqual(self.image_ref_alt, rebuilt_server['server']['image']['id'])
def _build_v1_execution_plan(self, template, resources): scripts_folder = 'scripts' script_files = template.get('Scripts', []) scripts = [] for script in script_files: script_path = os.path.join(scripts_folder, script) scripts.append( base64.encode_as_text(resources.string(script_path, binary=True), encoding='latin1')) template['Scripts'] = scripts template['Stamp'] = self._generate_stamp() return template
def _attempt_to_stringify_content(self, content, content_tag): if content is None: return content try: # NOTE(jaosorior): The content is decoded as ascii since the # logging module has problems with utf-8 strings and will end up # trying to decode this as ascii. return self._attempt_ascii(content) except (UnicodeDecodeError, UnicodeEncodeError): # NOTE(jaosorior): Since we are using base64 as default and this is # only for logging (in order to debug); Lets not put too much # effort in this and just use encoded string. return base64.encode_as_text(content)
def test_can_create_server_with_max_number_personality_files(self): """Test creating server with maximum allowed number of injected files Server should be created successfully if maximum allowed number of files is injected into the server during creation. """ file_contents = 'This is a test file.' limits = self.limits_client.show_limits()['limits'] max_file_limit = limits['absolute']['maxPersonality'] if max_file_limit == -1: raise self.skipException("No limit for personality files") person = [] for i in range(0, max_file_limit): # NOTE(andreaf) The cirros disk image is blank before boot # so we can only inject safely to / path = '/test' + str(i) + '.txt' person.append({ 'path': path, 'contents': base64.encode_as_text(file_contents + str(i)), }) password = data_utils.rand_password() validation_resources = self.get_test_validation_resources( self.os_primary) created_server = self.create_test_server( personality=person, adminPass=password, wait_until='ACTIVE', validatable=True, validation_resources=validation_resources) self.addCleanup(waiters.wait_for_server_termination, self.servers_client, created_server['id']) self.addCleanup(test_utils.call_and_ignore_notfound_exc, self.servers_client.delete_server, created_server['id']) server = self.client.show_server(created_server['id'])['server'] if CONF.validation.run_validation: linux_client = remote_client.RemoteClient( self.get_server_ip(server, validation_resources), self.ssh_user, password, validation_resources['keypair']['private_key'], server=server, servers_client=self.client) for i in person: self.assertEqual( base64.decode_as_text(i['contents']), linux_client.exec_command('sudo cat %s' % i['path']))
def test_file_line_endings(self, _get_package): class FakeResources(object): """Class with only string() method from ResourceManager class""" @staticmethod def string(name, owner=None, binary=False): return resource_manager.ResourceManager.string(receiver=None, name=name, owner=owner, binary=binary) # make path equal to provided name inside resources.string() package = mock.Mock() package.get_resource.side_effect = lambda m: m _get_package.return_value = package text = b"First line\nSecond line\rThird line\r\nFourth line" modified_text = u"First line\nSecond line\nThird line\nFourth line" encoded_text = base64.encode_as_text(text) + "\n" resources = FakeResources() with tempfile.NamedTemporaryFile() as script_file: script_file.write(text) script_file.file.flush() os.fsync(script_file.file.fileno()) # check that data has been written correctly script_file.seek(0) file_data = script_file.read() self.assertEqual(text, file_data) # check resources.string() output # text file result = resources.string(script_file.name) self.assertEqual(modified_text, result) # binary file result = resources.string(script_file.name, binary=True) self.assertEqual(text, result) # check _get_body() output filename = os.path.basename(script_file.name) folder = os.path.dirname(script_file.name) # text file body = self.agent._get_body(filename, resources, folder) self.assertEqual(modified_text, body) # binary file filename = '<{0}>'.format(filename) body = self.agent._get_body(filename, resources, folder) self.assertEqual(encoded_text, body)
def test_file_line_endings(self, _get_package): class FakeResources(object): """Class with only string() method from ResourceManager class""" @staticmethod def string(name, owner=None, binary=False): return resource_manager.ResourceManager.string( receiver=None, name=name, owner=owner, binary=binary) # make path equal to provided name inside resources.string() package = mock.Mock() package.get_resource.side_effect = lambda m: m _get_package.return_value = package text = b"First line\nSecond line\rThird line\r\nFourth line" modified_text = u"First line\nSecond line\nThird line\nFourth line" encoded_text = base64.encode_as_text(text) + "\n" resources = FakeResources() with tempfile.NamedTemporaryFile() as script_file: script_file.write(text) script_file.file.flush() os.fsync(script_file.file.fileno()) # check that data has been written correctly script_file.seek(0) file_data = script_file.read() self.assertEqual(text, file_data) # check resources.string() output # text file result = resources.string(script_file.name) self.assertEqual(modified_text, result) # binary file result = resources.string(script_file.name, binary=True) self.assertEqual(text, result) # check _get_body() output filename = os.path.basename(script_file.name) folder = os.path.dirname(script_file.name) # text file body = self.agent._get_body(filename, resources, folder) self.assertEqual(modified_text, body) # binary file filename = '<{0}>'.format(filename) body = self.agent._get_body(filename, resources, folder) self.assertEqual(encoded_text, body)
def _base64_encode_payload(self, secret_doc): """Ensures secret document payload is compatible with Barbican.""" payload = secret_doc.data secret_type = self._get_secret_type(secret_doc.schema) # NOTE(felipemonteiro): The logic for the 2 conditions below is # enforced from Barbican's Python client. Some pre-processing and # transformation is needed to make Barbican work with non-compatible # formats. if not payload and payload is not False: # There is no point in even bothering to encrypt an empty # body, which just leads to needless overhead, so return # early. LOG.info( 'Barbican does not accept empty payloads so ' 'Deckhand will not encrypt document [%s, %s] %s.', secret_doc.schema, secret_doc.layer, secret_doc.name) secret_doc.storage_policy = types.CLEARTEXT elif not isinstance(payload, (six.text_type, six.binary_type)): LOG.debug( 'Forcibly setting secret_type=opaque and ' 'base64-encoding non-string payload for ' 'document [%s, %s] %s.', secret_doc.schema, secret_doc.layer, secret_doc.name) # NOTE(felipemonteiro): base64-encoding the non-string payload is # done for serialization purposes, not for security purposes. # 'opaque' is used to avoid Barbican doing any further # serialization server-side. secret_type = 'opaque' try: payload = base64.encode_as_text(six.text_type(payload)) except Exception: message = ('Failed to base64-encode payload of type %s ' 'for Barbican storage.', type(payload)) LOG.error(message) raise errors.UnknownSubstitutionError( src_schema=secret_doc.schema, src_layer=secret_doc.layer, src_name=secret_doc.name, schema='N/A', layer='N/A', name='N/A', details=message) return secret_type, payload
def test_personality_files_exceed_limit(self): # Server creation should fail if greater than the maximum allowed # number of files are injected into the server. file_contents = 'This is a test file.' personality = [] limits = self.user_client.show_limits()['limits'] max_file_limit = limits['absolute']['maxPersonality'] if max_file_limit == -1: raise self.skipException("No limit for personality files") for i in range(0, int(max_file_limit) + 1): path = 'etc/test' + str(i) + '.txt' personality.append({'path': path, 'contents': base64.encode_as_text( file_contents)}) # A 403 Forbidden or 413 Overlimit (old behaviour) exception # will be raised when out of quota self.assertRaises((lib_exc.Forbidden, lib_exc.OverLimit), self.create_test_server, personality=personality)
def authenticate(self): """Authenticate with the switch. Returns authentication status with modified authentication header (Base64(username:xxx:random no)). :returns: Authentication status :raises BrocadeZoningHttpException: """ headers = {zone_constant.AUTH_HEADER: self.auth_header} try: # GET Request to authenticate.html to verify the credentials response = self.connect(zone_constant.GET_METHOD, zone_constant.AUTHEN_PAGE, header=headers) parsed_data = self.get_parsed_data(response, zone_constant.AUTHEN_BEGIN, zone_constant.AUTHEN_END) isauthenticated = self.get_nvp_value( parsed_data, zone_constant.AUTHENTICATED) if isauthenticated == "yes": if self.auth_version == "3": auth_id = self.get_nvp_value(parsed_data, zone_constant.IDENTIFIER) auth_string = '%s:xxx:%s' % (self.switch_user, auth_id) else: # Replace password in the authentication string with xxx auth_string = '%s:xxx:%s' % (self.switch_user, self.random_no) auth_token = base64.encode_as_text(auth_string).strip() auth_header = zone_constant.AUTH_STRING + auth_token return True, auth_header else: auth_error_code = self.get_nvp_value(parsed_data, "errCode") msg = (_("Authentication failed, verify the switch " "credentials, error code %s.") % auth_error_code) LOG.error(msg) raise b_exception.BrocadeZoningHttpException(reason=msg) except Exception as e: msg = (_("Error while authenticating with switch: %s.") % six.text_type(e)) LOG.error(msg) raise b_exception.BrocadeZoningHttpException(reason=msg)
def test_rebuild_server_with_personality(self): validation_resources = self.get_test_validation_resources( self.os_primary) server = self.create_test_server( wait_until='ACTIVE', validatable=True, validation_resources=validation_resources) server_id = server['id'] self.addCleanup(waiters.wait_for_server_termination, self.servers_client, server_id) self.addCleanup(test_utils.call_and_ignore_notfound_exc, self.servers_client.delete_server, server_id) file_contents = 'Test server rebuild.' personality = [{'path': 'rebuild.txt', 'contents': base64.encode_as_text(file_contents)}] rebuilt_server = self.client.rebuild_server(server_id, self.image_ref_alt, personality=personality) waiters.wait_for_server_status(self.client, server_id, 'ACTIVE') self.assertEqual(self.image_ref_alt, rebuilt_server['server']['image']['id'])
def test_create_server_with_personality(self): file_contents = 'This is a test file.' file_path = '/test.txt' personality = [{'path': file_path, 'contents': base64.encode_as_text(file_contents)}] password = data_utils.rand_password() created_server = self.create_test_server(personality=personality, adminPass=password, wait_until='ACTIVE', validatable=True) server = self.client.show_server(created_server['id'])['server'] if CONF.validation.run_validation: linux_client = remote_client.RemoteClient( self.get_server_ip(server), self.ssh_user, password, self.validation_resources['keypair']['private_key'], server=server, servers_client=self.client) self.assertEqual(file_contents, linux_client.exec_command( 'sudo cat %s' % file_path))
def _update_volume_status(self): """Retrieve status info from volume group.""" LOG.debug("Updating volume status") self._stats = None data = {} backend_name = self.configuration.safe_get("volume_backend_name") data["volume_backend_name"] = backend_name or self.__class__.__name__ data["vendor_name"] = "Oracle" data["driver_version"] = self.VERSION data["storage_protocol"] = self.protocol lcfg = self.configuration (avail, total) = self.zfssa.get_project_stats(lcfg.zfssa_pool, lcfg.zfssa_project) if avail is None or total is None: return host = lcfg.san_ip pool = lcfg.zfssa_pool project = lcfg.zfssa_project auth_str = "%s:%s" % (lcfg.san_login, lcfg.san_password) auth_str = base64.encode_as_text(auth_str) zfssa_tgt_group = lcfg.zfssa_target_group repl_ip = lcfg.zfssa_replication_ip data["location_info"] = "%s:%s:%s:%s:%s:%s" % (host, auth_str, pool, project, zfssa_tgt_group, repl_ip) data["total_capacity_gb"] = int(total) / units.Gi data["free_capacity_gb"] = int(avail) / units.Gi data["reserved_percentage"] = 0 data["QoS_support"] = False pool_details = self.zfssa.get_pool_details(lcfg.zfssa_pool) data["zfssa_poolprofile"] = pool_details["profile"] data["zfssa_volblocksize"] = lcfg.zfssa_lun_volblocksize data["zfssa_sparse"] = six.text_type(lcfg.zfssa_lun_sparse) data["zfssa_compression"] = lcfg.zfssa_lun_compression data["zfssa_logbias"] = lcfg.zfssa_lun_logbias self._stats = data
def do_setup(self, context): if not self.configuration.max_over_subscription_ratio > 0: msg = _("Config 'max_over_subscription_ratio' invalid. Must be > " "0: %s") % self.configuration.max_over_subscription_ratio LOG.error(msg) raise exception.NfsException(msg) package = 'mount.nfs' try: self._execute(package, check_exit_code=False, run_as_root=True) except OSError as exc: if exc.errno == errno.ENOENT: msg = _('%s is not installed') % package raise exception.NfsException(msg) else: raise lcfg = self.configuration LOG.info(_LI('Connecting to host: %s.'), lcfg.san_ip) host = lcfg.san_ip user = lcfg.san_login password = lcfg.san_password https_port = lcfg.zfssa_https_port credentials = ['san_ip', 'san_login', 'san_password', 'zfssa_data_ip'] for cred in credentials: if not getattr(lcfg, cred, None): exception_msg = _('%s not set in cinder.conf') % cred LOG.error(exception_msg) raise exception.CinderException(exception_msg) self.zfssa = factory_zfssa() self.zfssa.set_host(host, timeout=lcfg.zfssa_rest_timeout) auth_str = base64.encode_as_text('%s:%s' % (user, password)) self.zfssa.login(auth_str) self.zfssa.create_project(lcfg.zfssa_nfs_pool, lcfg.zfssa_nfs_project, compression=lcfg.zfssa_nfs_share_compression, logbias=lcfg.zfssa_nfs_share_logbias) share_args = { 'sharedav': 'rw', 'sharenfs': 'rw', 'root_permissions': '777', 'compression': lcfg.zfssa_nfs_share_compression, 'logbias': lcfg.zfssa_nfs_share_logbias } self.zfssa.create_share(lcfg.zfssa_nfs_pool, lcfg.zfssa_nfs_project, lcfg.zfssa_nfs_share, share_args) share_details = self.zfssa.get_share(lcfg.zfssa_nfs_pool, lcfg.zfssa_nfs_project, lcfg.zfssa_nfs_share) mountpoint = share_details['mountpoint'] self.mount_path = lcfg.zfssa_data_ip + ':' + mountpoint https_path = 'https://' + lcfg.zfssa_data_ip + ':' + https_port + \ '/shares' + mountpoint LOG.debug('NFS mount path: %s', self.mount_path) LOG.debug('WebDAV path to the share: %s', https_path) self.shares = {} mnt_opts = self.configuration.zfssa_nfs_mount_options self.shares[self.mount_path] = mnt_opts if len(mnt_opts) > 1 else None # Initialize the WebDAV client self.zfssa.set_webdav(https_path, auth_str) # Edit http service so that WebDAV requests are always authenticated args = {'https_port': https_port, 'require_login': True} self.zfssa.modify_service('http', args) self.zfssa.enable_service('http') if lcfg.zfssa_enable_local_cache: LOG.debug('Creating local cache directory %s.', lcfg.zfssa_cache_directory) self.zfssa.create_directory(lcfg.zfssa_cache_directory)
def do_setup(self, context): """Setup - create multiple elements. Project, initiators, initiatorgroup, target and targetgroup. """ lcfg = self.configuration LOG.info(_LI('Connecting to host: %s.'), lcfg.san_ip) self.zfssa = factory_zfssa() self.tgt_zfssa = factory_zfssa() self.zfssa.set_host(lcfg.san_ip, timeout=lcfg.zfssa_rest_timeout) auth_str = '%s:%s' % (lcfg.san_login, lcfg.san_password) auth_str = base64.encode_as_text(auth_str) self.zfssa.login(auth_str) self.zfssa.create_project(lcfg.zfssa_pool, lcfg.zfssa_project, compression=lcfg.zfssa_lun_compression, logbias=lcfg.zfssa_lun_logbias) if lcfg.zfssa_enable_local_cache: self.zfssa.create_project(lcfg.zfssa_pool, lcfg.zfssa_cache_project, compression=lcfg.zfssa_lun_compression, logbias=lcfg.zfssa_lun_logbias) schemas = [ {'property': 'image_id', 'description': 'OpenStack image ID', 'type': 'String'}, {'property': 'updated_at', 'description': 'Most recent updated time of image', 'type': 'String'}] self.zfssa.create_schemas(schemas) if (lcfg.zfssa_initiator_config != ''): initiator_config = ast.literal_eval(lcfg.zfssa_initiator_config) for initiator_group in initiator_config: zfssa_initiator_group = initiator_group for zfssa_initiator in initiator_config[zfssa_initiator_group]: self.zfssa.create_initiator(zfssa_initiator['iqn'], zfssa_initiator_group + '-' + zfssa_initiator['iqn'], chapuser= zfssa_initiator['user'], chapsecret= zfssa_initiator['password']) if (zfssa_initiator_group != 'default'): self.zfssa.add_to_initiatorgroup( zfssa_initiator['iqn'], zfssa_initiator_group) else: LOG.warning(_LW('zfssa_initiator_config not found. ' 'Using deprecated configuration options.')) if (lcfg.zfssa_initiator != '' and (lcfg.zfssa_initiator_group == '' or lcfg.zfssa_initiator_group == 'default')): LOG.warning(_LW('zfssa_initiator: %(ini)s' ' wont be used on ' 'zfssa_initiator_group= %(inigrp)s.'), {'ini': lcfg.zfssa_initiator, 'inigrp': lcfg.zfssa_initiator_group}) # Setup initiator and initiator group if (lcfg.zfssa_initiator != '' and lcfg.zfssa_initiator_group != '' and lcfg.zfssa_initiator_group != 'default'): for initiator in lcfg.zfssa_initiator.split(','): self.zfssa.create_initiator( initiator, lcfg.zfssa_initiator_group + '-' + initiator, chapuser=lcfg.zfssa_initiator_user, chapsecret=lcfg.zfssa_initiator_password) self.zfssa.add_to_initiatorgroup( initiator, lcfg.zfssa_initiator_group) # Parse interfaces interfaces = [] for interface in lcfg.zfssa_target_interfaces.split(','): if interface == '': continue interfaces.append(interface) # Setup target and target group iqn = self.zfssa.create_target( self._get_target_alias(), interfaces, tchapuser=lcfg.zfssa_target_user, tchapsecret=lcfg.zfssa_target_password) self.zfssa.add_to_targetgroup(iqn, lcfg.zfssa_target_group)
def do_setup(self, context): if not self.configuration.nfs_oversub_ratio > 0: msg = _("NFS config 'nfs_oversub_ratio' invalid. Must be > 0: " "%s") % self.configuration.nfs_oversub_ratio LOG.error(msg) raise exception.NfsException(msg) if (not self.configuration.nfs_used_ratio > 0) and (self.configuration.nfs_used_ratio <= 1): msg = ( _("NFS config 'nfs_used_ratio' invalid. Must be > 0 " "and <= 1.0: %s") % self.configuration.nfs_used_ratio ) LOG.error(msg) raise exception.NfsException(msg) package = "mount.nfs" try: self._execute(package, check_exit_code=False, run_as_root=True) except OSError as exc: if exc.errno == errno.ENOENT: msg = _("%s is not installed") % package raise exception.NfsException(msg) else: raise lcfg = self.configuration LOG.info(_LI("Connecting to host: %s."), lcfg.san_ip) host = lcfg.san_ip user = lcfg.san_login password = lcfg.san_password https_port = lcfg.zfssa_https_port credentials = ["san_ip", "san_login", "san_password", "zfssa_data_ip"] for cred in credentials: if not getattr(lcfg, cred, None): exception_msg = _("%s not set in cinder.conf") % cred LOG.error(exception_msg) raise exception.CinderException(exception_msg) self.zfssa = factory_zfssa() self.zfssa.set_host(host, timeout=lcfg.zfssa_rest_timeout) auth_str = base64.encode_as_text("%s:%s" % (user, password))[:-1] self.zfssa.login(auth_str) self.zfssa.create_project( lcfg.zfssa_nfs_pool, lcfg.zfssa_nfs_project, compression=lcfg.zfssa_nfs_share_compression, logbias=lcfg.zfssa_nfs_share_logbias, ) share_args = { "sharedav": "rw", "sharenfs": "rw", "root_permissions": "777", "compression": lcfg.zfssa_nfs_share_compression, "logbias": lcfg.zfssa_nfs_share_logbias, } self.zfssa.create_share(lcfg.zfssa_nfs_pool, lcfg.zfssa_nfs_project, lcfg.zfssa_nfs_share, share_args) share_details = self.zfssa.get_share(lcfg.zfssa_nfs_pool, lcfg.zfssa_nfs_project, lcfg.zfssa_nfs_share) mountpoint = share_details["mountpoint"] self.mount_path = lcfg.zfssa_data_ip + ":" + mountpoint https_path = "https://" + lcfg.zfssa_data_ip + ":" + https_port + "/shares" + mountpoint LOG.debug("NFS mount path: %s", self.mount_path) LOG.debug("WebDAV path to the share: %s", https_path) self.shares = {} mnt_opts = self.configuration.zfssa_nfs_mount_options self.shares[self.mount_path] = mnt_opts if len(mnt_opts) > 1 else None # Initialize the WebDAV client self.zfssa.set_webdav(https_path, auth_str) # Edit http service so that WebDAV requests are always authenticated args = {"https_port": https_port, "require_login": True} self.zfssa.modify_service("http", args) self.zfssa.enable_service("http") if lcfg.zfssa_enable_local_cache: LOG.debug("Creating local cache directory %s.", lcfg.zfssa_cache_directory) self.zfssa.create_directory(lcfg.zfssa_cache_directory)
def _config_cluster_kubernetes(cluster, cluster_template, cfg_dir, force=False, certs=None, use_keystone=False): """Return and write configuration for the given kubernetes cluster.""" cfg_file = "%s/config" % cfg_dir if cluster_template.tls_disabled or certs is None: cfg = ("apiVersion: v1\n" "clusters:\n" "- cluster:\n" " server: %(api_address)s\n" " name: %(name)s\n" "contexts:\n" "- context:\n" " cluster: %(name)s\n" " user: %(name)s\n" " name: %(name)s\n" "current-context: %(name)s\n" "kind: Config\n" "preferences: {}\n" "users:\n" "- name: %(name)s'\n" % {'name': cluster.name, 'api_address': cluster.api_address}) else: if not use_keystone: cfg = ("apiVersion: v1\n" "clusters:\n" "- cluster:\n" " certificate-authority-data: %(ca)s\n" " server: %(api_address)s\n" " name: %(name)s\n" "contexts:\n" "- context:\n" " cluster: %(name)s\n" " user: admin\n" " name: default\n" "current-context: default\n" "kind: Config\n" "preferences: {}\n" "users:\n" "- name: admin\n" " user:\n" " client-certificate-data: %(cert)s\n" " client-key-data: %(key)s\n" % {'name': cluster.name, 'api_address': cluster.api_address, 'key': base64.encode_as_text(certs['key']), 'cert': base64.encode_as_text(certs['cert']), 'ca': base64.encode_as_text(certs['ca'])}) else: cfg = ("apiVersion: v1\n" "clusters:\n" "- cluster:\n" " certificate-authority-data: %(ca)s\n" " server: %(api_address)s\n" " name: %(name)s\n" "contexts:\n" "- context:\n" " cluster: %(name)s\n" " user: openstackuser\n" " name: openstackuser@kubernetes\n" "current-context: openstackuser@kubernetes\n" "kind: Config\n" "preferences: {}\n" "users:\n" "- name: openstackuser\n" " user:\n" " exec:\n" " command: /bin/bash\n" " apiVersion: client.authentication.k8s.io/v1alpha1\n" " args:\n" " - -c\n" " - >\n" " if [ -z ${OS_TOKEN} ]; then\n" " echo 'Error: Missing OpenStack credential from environment variable $OS_TOKEN' > /dev/stderr\n" # noqa " exit 1\n" " else\n" " echo '{ \"apiVersion\": \"client.authentication.k8s.io/v1alpha1\", \"kind\": \"ExecCredential\", \"status\": { \"token\": \"'\"${OS_TOKEN}\"'\"}}'\n" # noqa " fi\n" % {'name': cluster.name, 'api_address': cluster.api_address, 'ca': base64.encode_as_text(certs['ca'])}) if os.path.exists(cfg_file) and not force: raise exc.CommandError("File %s exists, aborting." % cfg_file) else: f = open(cfg_file, "w") f.write(cfg) f.close() if 'csh' in os.environ['SHELL']: return "setenv KUBECONFIG %s\n" % cfg_file else: return "export KUBECONFIG=%s\n" % cfg_file
def base64encode(value): return base64.encode_as_text(value)
def test_create_instance_with_user_data(self): value = base64.encode_as_text("A random string") params = {user_data.ATTRIBUTE_NAME: value} server = self._test_create_extra(params) self.assertEqual(FAKE_UUID, server['id'])
def do_setup(self, context): """Setup - create multiple elements. Project, initiators, initiatorgroup, target and targetgroup. """ lcfg = self.configuration LOG.info(_LI("Connecting to host: %s."), lcfg.san_ip) self.zfssa = factory_zfssa() self.tgt_zfssa = factory_zfssa() self.zfssa.set_host(lcfg.san_ip, timeout=lcfg.zfssa_rest_timeout) auth_str = "%s:%s" % (lcfg.san_login, lcfg.san_password) auth_str = base64.encode_as_text(auth_str) self.zfssa.login(auth_str) self.zfssa.create_project( lcfg.zfssa_pool, lcfg.zfssa_project, compression=lcfg.zfssa_lun_compression, logbias=lcfg.zfssa_lun_logbias ) schemas = [{"property": "cinder_managed", "description": "Managed by Cinder", "type": "Boolean"}] if lcfg.zfssa_enable_local_cache: self.zfssa.create_project( lcfg.zfssa_pool, lcfg.zfssa_cache_project, compression=lcfg.zfssa_lun_compression, logbias=lcfg.zfssa_lun_logbias, ) schemas.extend( [ {"property": "image_id", "description": "OpenStack image ID", "type": "String"}, {"property": "updated_at", "description": "Most recent updated time of image", "type": "String"}, ] ) self.zfssa.create_schemas(schemas) if lcfg.zfssa_initiator_config != "": initiator_config = ast.literal_eval(lcfg.zfssa_initiator_config) for initiator_group in initiator_config: zfssa_initiator_group = initiator_group for zfssa_initiator in initiator_config[zfssa_initiator_group]: self.zfssa.create_initiator( zfssa_initiator["iqn"], zfssa_initiator_group + "-" + zfssa_initiator["iqn"], chapuser=zfssa_initiator["user"], chapsecret=zfssa_initiator["password"], ) if zfssa_initiator_group != "default": self.zfssa.add_to_initiatorgroup(zfssa_initiator["iqn"], zfssa_initiator_group) else: LOG.warning(_LW("zfssa_initiator_config not found. " "Using deprecated configuration options.")) if not lcfg.zfssa_initiator and ( not lcfg.zfssa_initiator_group and lcfg.zfssa_initiator_group != "default" ): LOG.error(_LE("zfssa_initiator cannot be empty when " "creating a zfssa_initiator_group.")) raise exception.InvalidConfigurationValue(value="", option="zfssa_initiator") if lcfg.zfssa_initiator != "" and ( lcfg.zfssa_initiator_group == "" or lcfg.zfssa_initiator_group == "default" ): LOG.warning( _LW("zfssa_initiator: %(ini)s" " wont be used on " "zfssa_initiator_group= %(inigrp)s."), {"ini": lcfg.zfssa_initiator, "inigrp": lcfg.zfssa_initiator_group}, ) # Setup initiator and initiator group if ( lcfg.zfssa_initiator != "" and lcfg.zfssa_initiator_group != "" and lcfg.zfssa_initiator_group != "default" ): for initiator in lcfg.zfssa_initiator.split(","): initiator = initiator.strip() self.zfssa.create_initiator( initiator, lcfg.zfssa_initiator_group + "-" + initiator, chapuser=lcfg.zfssa_initiator_user, chapsecret=lcfg.zfssa_initiator_password, ) self.zfssa.add_to_initiatorgroup(initiator, lcfg.zfssa_initiator_group) # Parse interfaces interfaces = [] for intrface in lcfg.zfssa_target_interfaces.split(","): if intrface == "": continue interfaces.append(intrface) # Setup target and target group iqn = self.zfssa.create_target( self._get_target_alias(), interfaces, tchapuser=lcfg.zfssa_target_user, tchapsecret=lcfg.zfssa_target_password, ) self.zfssa.add_to_targetgroup(iqn, lcfg.zfssa_target_group) if lcfg.zfssa_manage_policy not in ("loose", "strict"): err_msg = ( _("zfssa_manage_policy property needs to be set to" " 'strict' or 'loose'. Current value is: %s.") % lcfg.zfssa_manage_policy ) LOG.error(err_msg) raise exception.InvalidInput(reason=err_msg) # Lookup the zfssa_target_portal DNS name to an IP address host, port = lcfg.zfssa_target_portal.split(":") host_ip_addr = utils.resolve_hostname(host) self.zfssa_target_portal = host_ip_addr + ":" + port
def _encode_backup(self, backup): retval = json.dumps(backup) return base64.encode_as_text(retval)