def test_conf_load_skip_reload(self): """Test conf load skip_reload argument.""" Conf.load('skip_index', 'json:///tmp/file1.json') expected_lst = Conf.get_keys('skip_index') Conf.load('skip_index', 'toml:///tmp/document.toml', skip_reload=True) out_lst = Conf.get_keys('skip_index') self.assertTrue(True if expected_lst == out_lst else False)
def test_get_keys_delete(self): """Test get_keys after deletion of a key.""" load_config('get_keys_delete', 'json:///tmp/file1.json') Conf.set('get_keys_delete', 'bridge>delete>key', 'del_val') pre_key_list = Conf.get_keys('get_keys_delete') Conf.delete('get_keys_delete', 'bridge>delete>key') post_key_list = Conf.get_keys('get_keys_delete') self.assertTrue(pre_key_list != post_key_list)
def test_conf_load_fail_and_skip_non_hap(self): """Test conf load fail_reload True and skip_reload as True argument.""" Conf.load('non_happy_index', 'json:///tmp/file1.json') expected_lst = Conf.get_keys('non_happy_index') Conf.load('non_happy_index', 'toml:///tmp/document.toml', fail_reload=True, skip_reload=True) out_lst = Conf.get_keys('non_happy_index') self.assertTrue(True if expected_lst == out_lst else False)
def test_conf_store_a_backup_and_save_a_copy(self): """Test by creating a backup file and copying then saving it back.""" conf_file = 'json:/tmp/file1.json' load_config('csm_local', conf_file) Conf.load('backup', f"{conf_file}.bak") Conf.copy('csm_local', 'backup') Conf.save('backup') result_data = Conf.get_keys('backup', key_index=True) # Expected list should match the result_data list output expected_list = Conf.get_keys('csm_local') self.assertListEqual(expected_list, result_data)
def test_conf_store_merge_02no_key(self): Conf.set('src_index', 'new_key', 'new_value') Conf.save('src_index') Conf.merge('dest_index', 'src_index') Conf.save('dest_index') self.assertEqual(Conf.get('dest_index', 'new_key'), 'new_value') self.assertIn('new_key', Conf.get_keys('dest_index'))
def get_keys(args) -> list: """ Returns list of keys present in store """ key_index = 'true' if args.key_index == None else args.key_index.lower().strip() key_index = True if key_index == 'true' else False if key_index == 'false' else None if key_index == None: raise ConfError(errno.EINVAL, "invalid key_index value %s", key_index) return Conf.get_keys(ConfCli._index, key_index=key_index)
def upgrade(config_path: str, change_set_path: str): """Perform upgrade steps.""" Conf.load(DELTA_INDEX, change_set_path) Conf.load(GCONF_INDEX, config_path, skip_reload=True) delta_keys = Conf.get_keys(DELTA_INDEX) # if message_bus_backend changed, add new and delete old msg bus entries if CHANGED_PREFIX + MSG_BUS_BACKEND_KEY in delta_keys: new_msg_bus_backend = Conf.get( DELTA_INDEX, CHANGED_PREFIX + MSG_BUS_BACKEND_KEY).split('|')[1] Conf.set(GCONF_INDEX, MSG_BUS_BACKEND_KEY, new_msg_bus_backend) for key in delta_keys: if NEW_PREFIX + EXTERNAL_KEY + new_msg_bus_backend in key: new_key = key.split(NEW_PREFIX)[1] new_val = Conf.get(DELTA_INDEX, key).split('|')[1] Conf.set(GCONF_INDEX, new_key, new_val) if DELETED_PREFIX + EXTERNAL_KEY + new_msg_bus_backend in key: delete_key = key.split(DELETED_PREFIX)[1] Conf.delete(GCONF_INDEX, delete_key) # update existing messagebus parameters else: msg_bus_backend = Conf.get(GCONF_INDEX, MSG_BUS_BACKEND_KEY) for key in delta_keys: if CHANGED_PREFIX + EXTERNAL_KEY + msg_bus_backend in key: new_val = Conf.get(DELTA_INDEX, key).split('|')[1] change_key = key.split(CHANGED_PREFIX)[1] Conf.set(GCONF_INDEX, change_key, new_val) Conf.save(GCONF_INDEX) Utils.init(config_path) return 0
def save(self, cortx_conf, cortx_solution_config): """Save cortx-config into confstore""" try: cortx_solution_config_keys = filter( lambda x: x.startswith('cortx'), Conf.get_keys(cortx_solution_config)) cortx_conf.copy(cortx_solution_config, cortx_solution_config_keys) # Change environment_type to setup_type # TODO: remove this code once setup_type key is deleted. cortx_conf.set('cortx>common>setup_type', cortx_conf.get('cortx>common>environment_type')) cortx_conf.delete('cortx>common>environment_type') # Check for release key. release_spec = self._cortx_solution_config.get('common').get( 'release') is_valid, release_info = self._cortx_release.validate(release_spec) if is_valid is False: for key in release_info.keys(): release_key_path = f'cortx>common>release>{key}' Log.warn( f'Release key {release_key_path} is missing or has ' 'incorrect value.') Log.info(f'Adding key "{release_key_path}" ' f'and value "{release_info[key]}" in confstore.') cortx_conf.set(release_key_path, release_info[key]) except KeyError as e: raise CortxProvisionerError( errno.EINVAL, f'Error occurred while adding CORTX config information into confstore {e}' )
def merge(args): """ merges source conf file into dest. conf file. """ src_index = 'src_index' dest_index = ConfCli._index ConfCli.load(args.src_url, src_index) if not args.keys: # no keys provided keys = Conf.get_keys(src_index) # getting src file keys else: keys = args.keys[0].split(';') src_keys = Conf.get_keys(src_index) for key in keys: if key not in src_keys: raise ConfError(errno.ENOENT, "%s is not present in %s", \ key, args.src_url) Conf.merge(dest_index, src_index, keys) Conf.save(dest_index)
def test_conf_key_index_b_False(self): """ Test confStore get_key api with key_index argument as False """ key_lst = Conf.get_keys("getKeys_local", key_index=False) expected_list = [ 'bridge>name', 'bridge>username', 'bridge>manufacturer', 'bridge>model', 'bridge>pin', 'bridge>port', 'bridge>lte_type' ] self.assertListEqual(key_lst, expected_list)
def test_json_message_kv_store(self): """Tests jsonmessage basic operation.""" index = 'json_message_kv_store_index' Conf.load(index, 'jsonmessage:{"key1":"val1"}') self.assertEqual(Conf.get(index, 'key1'), 'val1') Conf.set(index, 'key2', 'val2') self.assertEqual(Conf.get_keys(index), ['key1', 'key2']) Conf.set(index, 'key2>key3>key4', 'val4') Conf.delete(index, 'key2>key3>key4') self.assertEqual(Conf.get(index, 'key2>key3>key4'), None)
def test_get_keys_starts_with(self): """set and get keys which starts with a string. """ key_list = ['swtest_k1', 'swtest_k2', 'swtest_k3', 'swtest_k4'] for index in TestConfStore.indexes: for key in key_list: Conf.set(index, key, '#!random_value') get_key_list = Conf.get_keys(index, starts_with='swtest') self.assertTrue( all([ True if key in get_key_list else False for key in key_list ]))
def test_get_keys(self): """ set keys and get keys """ key_list = ['test_k1', 'test_k2', 'test_k3', 'test_k4'] for index in TestConfStore.indexes: for key in key_list: Conf.set(index, key, '#!random_value') get_key_list = Conf.get_keys(index) self.assertTrue( all([ True if key in get_key_list else False for key in key_list ]))
def _get_keys_for_phase(self, phase_name: str): """Extract keylist to be used as yardstick for validating keys.""" prov_keys_list = Conf.get_keys(self.prov) phase_key_list = [] for key in prov_keys_list: if key.find(phase_name) == 0: value = Conf.get(self.prov, key) if value is not None: phase_key_list.append(value) return phase_key_list
def test_new_key_should_be_added_in_config(self): # Add new key in new config file Conf.set("existing", "FOO>bar", "spam") Conf.save("existing") Conf.set("new", "FOO>bar", "spam") Conf.set("new", "FOO>eggs", "ham") Conf.set("new", "BAZ>bar", "spam") Conf.save("new") conf_upgrade = ConfUpgrade(self.existing_config_url, self.new_config_url, self.merged_config_url) conf_upgrade.create_merged_config() Conf.load("merged", self.merged_config_url) self.assertIsNotNone(Conf.get("merged", "FOO>eggs")) self.assertIsNotNone(Conf.get("merged", "BAZ>bar")) self.assertEqual(Conf.get("new", "FOO>eggs"), Conf.get("merged", "FOO>eggs")) self.assertEqual(Conf.get("new", "BAZ>bar"), Conf.get("merged", "BAZ>bar")) for key in Conf.get_keys("existing", key_index=False): self.assertIn(key, Conf.get_keys("merged", key_index=False))
def create_merged_config(self): """Create merged config file using existing and new configs.""" existing_keys = set(Conf.get_keys(EXISTING_CONF, key_index=False)) new_keys = set(Conf.get_keys(NEW_CONF, key_index=False)) changed_keys = self.get_changed_keys() removed_keys = existing_keys - new_keys added_keys = new_keys - existing_keys retained_keys = existing_keys - removed_keys # For newly added keys, get key and value both from new config file for key in added_keys: Conf.set(MERGED_CONF, key, Conf.get(NEW_CONF, key)) # For changed keys, get key from new config file and value from old # config file for old_key, new_key in changed_keys.items(): Conf.set(MERGED_CONF, new_key, Conf.get(EXISTING_CONF, old_key)) # For retained keys. get key and value both from existing config file for key in retained_keys: Conf.set(MERGED_CONF, key, Conf.get(EXISTING_CONF, key)) # OBSOLETE and CHANGED should always come from new config Conf.set(MERGED_CONF, CHANGED, Conf.get(NEW_CONF, CHANGED)) Conf.set(MERGED_CONF, OBSOLETE, Conf.get(NEW_CONF, OBSOLETE)) Conf.save(MERGED_CONF)
def test_conf_key_index_a_True(self): """ Test confStore get_key api with key_index argument as True Default key_index will be True """ load_config('getKeys_local', 'json:///tmp/file1.json') key_lst = Conf.get_keys("getKeys_local", key_index=True) expected_list = [ 'bridge>name', 'bridge>username', 'bridge>manufacturer', 'bridge>model', 'bridge>pin', 'bridge>port', 'bridge>lte_type[0]', 'bridge>lte_type[1]' ] self.assertListEqual(key_lst, expected_list)
def test_conf_store_backup_and_save_a_copy(self): """Test by creating a backup file and copying then saving it back.""" conf_file = 'json:/tmp/file1.json' load_config('csm_local', conf_file) Conf.load('backup', f"{conf_file}.bak") Conf.copy('csm_local', 'backup') Conf.save('backup') result_data = Conf.get_keys('backup') # Expected list should match the result_data list output expected_list = [ 'bridge>name', 'bridge>username', 'bridge>manufacturer', 'bridge>model', 'bridge>pin', 'bridge>port', 'bridge>lte_type[0]', 'bridge>lte_type[1]' ] self.assertListEqual(expected_list, result_data)
def cluster_bootstrap(cortx_conf_url: str, force_override: bool = False): """ Description: Configures Cluster Components 1. Compares current installed version with New version 2. Invoke Mini Provisioners of cluster components deploy/upgrade based on version compatibility Paramaters: [IN] CORTX Config URL """ Conf.load(CortxProvisioner._conf_index, cortx_conf_url) Conf.load(CortxProvisioner._tmp_index, CortxProvisioner._tmp_cortx_conf_url) tmp_conf_keys = Conf.get_keys(CortxProvisioner._tmp_index) node_id = Conf.machine_id installed_version = Conf.get(CortxProvisioner._conf_index, f'node>{node_id}>provisioning>version') release_version = CortxProvisioner.cortx_release.get_release_version() if installed_version is None: Conf.copy(CortxProvisioner._tmp_index, CortxProvisioner._conf_index, tmp_conf_keys) Conf.save(CortxProvisioner._conf_index) CortxProvisioner._apply_consul_config(CortxProvisioner._conf_index) CortxProvisioner.cluster_deploy(cortx_conf_url, force_override) else: # TODO: add a case where release_version > installed_version but is not compatible. ret_code = CortxProvisioner.cortx_release.version_check( release_version, installed_version) if ret_code == 1: CortxProvisioner._prepare_diff(CortxProvisioner._conf_index, CortxProvisioner._tmp_index, CortxProvisioner._changeset_index) CortxProvisioner.cluster_upgrade(cortx_conf_url, force_override) # TODO: update_conf needs to be removed once gconf moves to consul. # Gconf update after upgrade should not be handled here if gconf is in consul. CortxProvisioner._update_conf(CortxProvisioner._conf_index, CortxProvisioner._tmp_index) # TODO: This will be removed once downgrade is also supported. elif ret_code == -1: raise CortxProvisionerError(errno.EINVAL, 'Downgrade is Not Supported') elif ret_code == 0: Conf.copy(CortxProvisioner._tmp_index, CortxProvisioner._conf_index, tmp_conf_keys) Conf.save(CortxProvisioner._conf_index) CortxProvisioner._apply_consul_config(CortxProvisioner._conf_index) CortxProvisioner.cluster_deploy(cortx_conf_url, force_override) else: raise CortxProvisionerError(errno.EINVAL, 'Internal error. Could not determine version. Invalid image.')
def _apply_consul_config(_conf_idx: str): try: num_endpoints = int(Conf.get(_conf_idx, 'cortx>external>consul>num_endpoints')) if num_endpoints == 0: raise CortxProvisionerError(errno.EINVAL, f"Invalid value for num_endpoints '{num_endpoints}'") for idx in range(0, num_endpoints): consul_endpoint = Conf.get(_conf_idx, f'cortx>external>consul>endpoints[{idx}]') if not consul_endpoint: raise CortxProvisionerError(errno.EINVAL, "Consul Endpoint can't be empty.") if urlparse(consul_endpoint).scheme not in ['http', 'https', 'tcp']: raise CortxProvisionerError(errno.EINVAL, f"Invalid Consul Endpoint {consul_endpoint}") if 'http' in consul_endpoint: break except ConfError as e: raise CortxProvisionerError(errno.EINVAL, f"Unable to get consul endpoint detail , Error:{e}") gconf_consul_url = consul_endpoint.replace('http','consul') + '/conf' Conf.load(CortxProvisioner._cortx_gconf_consul_index, gconf_consul_url) Conf.copy(_conf_idx, CortxProvisioner._cortx_gconf_consul_index, Conf.get_keys(_conf_idx)) Conf.save(CortxProvisioner._cortx_gconf_consul_index) # TODO: place the below code at a proper location when this function is removed. with open(const.CONSUL_CONF_URL, 'w') as f: f.write(gconf_consul_url)
def test_conf_store_get_keys(self): """Test listing all available keys for given index""" load_config('get_keys_local', 'json:///tmp/file1.json') result_data = Conf.get_keys('get_keys_local') self.assertTrue(True if 'bridge>name' in result_data else False)
def test_conf_store_load_and_get(self): """Test by loading the give config file to in-memory""" load_config('sspl_local', 'json:///tmp/file1.json') result_data = Conf.get_keys('sspl_local') self.assertTrue('bridge>name' in result_data)
def get_all_keys(self): """Get all the key value pairs from confstore.""" return Conf.get_keys(self.default_index)
def test_conf_store_by_load_and_get(self): """ Test by loading the give properties config file to in-memory """ load_config('pro_local', 'properties:///tmp/example.properties') result_data = Conf.get_keys('pro_local') self.assertTrue('bridge' in result_data)
def test_006_conf_dictkvstore_delete_single_key(self): """Test conf store delete kv.""" Conf.delete('dict', 'k1') out = Conf.get('dict', 'k1') self.assertNotIn('k1', Conf.get_keys('dict')) self.assertIsNone(out)
def extract_yardstick_list(self, phase_name: str): """Extract keylist to be used as yardstick for validating keys of each phase.""" # The openldap prov config file has below pairs : # "Key Constant" : "Actual Key" # Example of "Key Constant" : # CONFSTORE_SITE_COUNT_KEY # PREPARE # CONFIG>CONFSTORE_LDAPADMIN_USER_KEY # INIT # Example of "Actual Key" : # cluster>cluster-id>site>storage_set_count # cortx>software>openldap>sgiam>user # # When we call get_all_keys on openldap prov config # file, it returns all the "Key Constant", # which will contain PHASE(name) as the root # attribute (except for unsolicited keys). # To get "Actual Key" from each "Key Constant", # we need to call get_confkey on every such key. # # Note that for each of these "Key Constant", # there may not exist an "Actual Key" because # some phases do not have any "Actual Key". # Example of such cases - # POST_INSTALL # PREPARE # For such examples, we skip and continue with # remaining keys. prov_keys_list = Conf.get_keys('openldap_keys_index') # We have all "Key Constant" in prov_keys_list, # now extract "Actual Key" if it exists and # depending on phase and hierarchy, decide # whether it should be added to the yardstick # list for the phase passed here. yardstick_list = [] prev_phase = True next_phase = False for key in prov_keys_list: # If PHASE is not relevant, skip the key. # Or set flag as appropriate. For test, # reset and cleanup, do not inherit keys # from previous phases. if next_phase: break if key.find(phase_name) == 0: prev_phase = False else: if (phase_name == "TEST" or phase_name == "RESET" or phase_name == "CLEANUP"): continue if not prev_phase: next_phase = True break value = self.get_confkey(key) # If value does not exist which can be the # case for certain phases as mentioned above, # skip the value. if value is None: continue yardstick_list.append(value) return yardstick_list
def phase_keys_validate(self, arg_file: str, phase_name: str): # Setting the desired values before we begin if self.machine_id is not None: machine_id_val = self.machine_id if self.cluster_id is not None: cluster_id_val = self.cluster_id # The 'storage_set_count' is read using # below hard-coded key which is the max # array size for storage set. storage_set_count_key = "cluster>cluster-id>site>storage_set_count" if self.cluster_id is not None: storage_set_count_key = storage_set_count_key.replace( "cluster-id", cluster_id_val) storage_set_count_str = self.get_confvalue(storage_set_count_key) if storage_set_count_str is not None: storage_set_val = int(storage_set_count_str) else: storage_set_val = 0 # Set phase name to upper case required for inheritance phase_name = phase_name.upper() try: # Extract keys from yardstick file for current phase considering inheritance yardstick_list = self.extract_yardstick_list(phase_name) # Set argument file confstore Conf.load('argument_file_index', arg_file) # Extract keys from argument file arg_keys_list = Conf.get_keys() # Since get_all_keys misses out listing entries inside # an array, the below code is required to fetch such # array entries. The result will be stored in a full # list which will be complete and will be used to verify # keys required for each phase. full_arg_keys_list = [] for key in arg_keys_list: if ((key.find('[') != -1) and (key.find(']') != -1)): storage_set = self.get_confvalue(key) base_key = key for set_key in storage_set: key = base_key + ">" + set_key full_arg_keys_list.append(key) else: full_arg_keys_list.append(key) # Below algorithm uses tokenization # of both yardstick and argument key # based on delimiter to generate # smaller key-tokens. Then check if # (A) all the key-tokens are pairs of # pre-defined token. e.g., # if key_yard is machine-id, then # key_arg must have corresponding # value of machine_id_val. # OR # (B) both the key-tokens from key_arg # and key_yard are the same. list_match_found = True key_match_found = False for key_yard in yardstick_list: key_yard_token_list = re.split('>|\[|\]', key_yard) key_match_found = False for key_arg in full_arg_keys_list: if key_match_found is False: key_arg_token_list = re.split('>|\[|\]', key_arg) if len(key_yard_token_list) == len(key_arg_token_list): for key_x, key_y in zip(key_yard_token_list, key_arg_token_list): key_match_found = False if key_x == "machine-id": if key_y != machine_id_val: break elif key_x == "cluster-id": if key_y != cluster_id_val: break elif key_x == "storage-set-count": if int(key_y) >= storage_set_val: break elif key_x != key_y: break key_match_found = True if key_match_found is False: list_match_found = False break if list_match_found is False: raise Exception(f'No match found for {key_yard}') sys.stdout.write("Validation complete\n") except Exception as e: raise Exception(f'ERROR : Validating keys failed, exception {e}\n')
def config_apply(solution_config_url: str, cortx_conf_url: str = None, force_override: bool = False): """ Description: Parses input config and store in CORTX config location Parameters: [IN] Solution Config URL [OUT] CORTX Config URL """ if Log.logger is None: CortxProvisionerLog.initialize(const.SERVICE_NAME, const.TMP_LOG_PATH) if cortx_conf_url is None: cortx_conf_url = CortxProvisioner._cortx_conf_url cortx_conf = MappedConf(CortxProvisioner._tmp_cortx_conf_url) # Load same config again if force_override is True try: cs_option = {"fail_reload": False} if force_override else {"skip_reload": True} Log.info('Applying config %s' % solution_config_url) Conf.load(CortxProvisioner._solution_index, solution_config_url, **cs_option) except ConfError as e: Log.error(f'Unable to load {solution_config_url} url, Error:{e}') # Secrets path from config file if cortx_conf.get('cortx>common>storage>local'): CortxProvisioner._secrets_path = cortx_conf.get('cortx>common>storage>local')+CortxProvisioner._rel_secret_path # source code for encrypting and storing secret key if Conf.get(CortxProvisioner._solution_index, 'cluster') is not None: CortxProvisioner.apply_cluster_config(cortx_conf, CortxProvisioner.cortx_release) if Conf.get(CortxProvisioner._solution_index, 'cortx') is not None: # generating cipher key cipher_key = None cluster_id = Conf.get(CortxProvisioner._solution_index, 'cluster>id') if cluster_id is None: cluster_id = cortx_conf.get('cluster>id') if cluster_id is None: raise CortxProvisionerError(errno.EINVAL, 'Cluster ID not specified') cipher_key = Cipher.gen_key(cluster_id, 'cortx') if cipher_key is None: raise CortxProvisionerError(errno.EINVAL, 'Cipher key not specified') for key in Conf.get_keys(CortxProvisioner._solution_index): # using path /etc/cortx/solution/secret to confirm secret if key.endswith('secret'): secret_val = Conf.get(CortxProvisioner._solution_index, key) val = None with open(os.path.join(CortxProvisioner._secrets_path, secret_val), 'rb') as secret: val = secret.read() if val is None: raise CortxProvisionerError(errno.EINVAL, f'Could not find the Secret in {CortxProvisioner._secrets_path}') val = Cipher.encrypt(cipher_key, val) # decoding the byte string in val variable Conf.set(CortxProvisioner._solution_index, key, val.decode('utf-8')) CortxProvisioner.apply_cortx_config(cortx_conf, CortxProvisioner.cortx_release) # Adding array count key in conf cortx_conf.add_num_keys() Conf.save(cortx_conf._conf_idx)
def get_all_keys(self): """Get all the key value pairs from confstore.""" # TODO recurse flag will be deprecated in future. # Do changes in all places wherever its applicable # refer validate_config_files() and phase_keys_validate() in setupcmd.py return Conf.get_keys(self.default_index, recurse = True)
def test_007_conf_dictkvstore_delete_nested_key(self): """Test conf store delete nested kv.""" Conf.delete('dict', 'k2>k4>k6>k7') out = Conf.get('dict', 'k2>k4>k6>k7') self.assertNotIn('k2>k4>k6>k7', Conf.get_keys('dict')) self.assertIsNone(out)