def __init__(self,config: str): """Constructor.""" self.endpoint = None self._url = None self._provisioner_confstore = None self._s3_confkeys_store = None self.machine_id = None self.cluster_id = None self.ldap_user = "******" s3deployment_logger_name = "s3-deployment-logger-" + "[" + str(socket.gethostname()) + "]" self.logger = logging.getLogger(s3deployment_logger_name) self._s3_confkeys_store = S3CortxConfStore(f'yaml://{self.s3_prov_config}', 'setup_s3keys_index') if config is None: self.logger.warning(f'Empty Config url') return if not config.strip(): self.logger.error(f'Config url:[{config}] must be a valid url path') raise Exception('Empty config URL path') self._url = config self._provisioner_confstore = S3CortxConfStore(self._url, 'setup_prov_index') # Get machine-id of current node from constore self.machine_id = self._provisioner_confstore.get_machine_id() self.logger.info(f'Machine id : {self.machine_id}') self.cluster_id = self.get_confvalue_with_defaults('CONFIG>CONFSTORE_CLUSTER_ID_KEY')
def __init__(self, config: str): """Constructor.""" self.endpoint = None self._url = None self._provisioner_confstore = None self._s3_confkeys_store = None self.machine_id = None self.cluster_id = None s3deployment_logger_name = "s3-deployment-logger-" + "[" + str( socket.gethostname()) + "]" self.logger = logging.getLogger(s3deployment_logger_name) if config is None: self.logger.warning(f'Empty Config url') return if not config.strip(): self.logger.error( f'Config url:[{config}] must be a valid url path') raise Exception('Empty config URL path') self._url = config self._provisioner_confstore = S3CortxConfStore(self._url, 'setup_prov_index') self._s3_confkeys_store = S3CortxConfStore( f'yaml://{self.s3_prov_config}', 'setup_s3keys_index') # machine_id will be used to read confstore keys with open('/etc/machine-id') as f: self.machine_id = f.read().strip() self.cluster_id = self.get_confvalue( self.get_confkey('CONFIG>CONFSTORE_CLUSTER_ID_KEY').replace( "machine-id", self.machine_id))
def validate_config_files(self, phase_name: str): """Validate the sample file and config file keys. Both files should have same keys. if keys mismatch then there is some issue in the config file.""" self.logger.info(f'validating S3 config files for {phase_name}.') upgrade_items = { 's3' : { 'configFile' : "/opt/seagate/cortx/s3/conf/s3config.yaml", 'SampleFile' : "/opt/seagate/cortx/s3/conf/s3config.yaml.sample", 'fileType' : 'yaml://' }, 'auth' : { 'configFile' : "/opt/seagate/cortx/auth/resources/authserver.properties", 'SampleFile' : "/opt/seagate/cortx/auth/resources/authserver.properties.sample", 'fileType' : 'properties://' }, 'keystore' : { 'configFile' : "/opt/seagate/cortx/auth/resources/keystore.properties", 'SampleFile' : "/opt/seagate/cortx/auth/resources/keystore.properties.sample", 'fileType' : 'properties://' }, 'bgdelete' : { 'configFile' : "/opt/seagate/cortx/s3/s3backgrounddelete/config.yaml", 'SampleFile' : "/opt/seagate/cortx/s3/s3backgrounddelete/config.yaml.sample", 'fileType' : 'yaml://' }, 'cluster' : { 'configFile' : "/opt/seagate/cortx/s3/s3backgrounddelete/s3_cluster.yaml", 'SampleFile' : "/opt/seagate/cortx/s3/s3backgrounddelete/s3_cluster.yaml.sample", 'fileType' : 'yaml://' } } for upgrade_item in upgrade_items: configFile = upgrade_items[upgrade_item]['configFile'] SampleFile = upgrade_items[upgrade_item]['SampleFile'] filetype = upgrade_items[upgrade_item]['fileType'] self.logger.info(f'validating config file {str(configFile)}.') # new sample file conf_sample = filetype + SampleFile cs_conf_sample = S3CortxConfStore(config=conf_sample, index=conf_sample + "validator") conf_sample_keys = cs_conf_sample.get_all_keys() # active config file conf_file = filetype + configFile cs_conf_file = S3CortxConfStore(config=conf_file, index=conf_file + "validator") conf_file_keys = cs_conf_file.get_all_keys() # compare the keys of sample file and config file if conf_sample_keys.sort() == conf_file_keys.sort(): self.logger.info(f'config file {str(configFile)} validated successfully.') else: self.logger.error(f'config file {str(conf_file)} and sample file {str(conf_sample)} keys does not matched.') self.logger.error(f'sample file keys: {str(conf_sample_keys)}') self.logger.error(f'config file keys: {str(conf_file_keys)}') raise Exception(f'ERROR: Failed to validate config file {str(configFile)}.')
def test_validate_configfile_doesnotexist(self): confurl = "json:///s3confstoreut-config-file-does-not-exist.json" index = "dummy_index_7" with self.assertRaises(SystemExit) as cm: S3CortxConfStore(confurl, index) self.assertEqual(cm.exception.code, 1)
def test_validate_configfile_unsupportedformat(self, mock_path): mock_path.isfile.return_value = True confurl = "/s3confstoreut-unsupportedfileformat.txt" with self.assertRaises(SystemExit) as cm: S3CortxConfStore(confurl, "dummy_index_8") self.assertEqual(cm.exception.code, 1)
def update_rootdn_credentials(self): """Set rootdn username and password to opfile.""" try: s3cipher_obj = CortxS3Cipher(None, False, 0, self.get_confkey('CONFSTORE_OPENLDAP_CONST_KEY')) cipher_key = s3cipher_obj.generate_key() self.ldap_root_user = self.get_confvalue_with_defaults('CONFIG>CONFSTORE_ROOTDN_USER_KEY') encrypted_rootdn_pass = self.get_confvalue_with_defaults('CONFIG>CONFSTORE_ROOTDN_PASSWD_KEY') if encrypted_rootdn_pass is not None: self.rootdn_passwd = s3cipher_obj.decrypt(cipher_key, encrypted_rootdn_pass) if encrypted_rootdn_pass is None: raise S3PROVError('password cannot be None.') op_file = "/opt/seagate/cortx/s3/s3backgrounddelete/s3_cluster.yaml" key = 'cluster_config>rootdn_user' opfileconfstore = S3CortxConfStore(f'yaml://{op_file}', 'write_rootdn_idx') opfileconfstore.set_config(f'{key}', f'{self.ldap_root_user}', True) key = 'cluster_config>rootdn_pass' opfileconfstore.set_config(f'{key}', f'{encrypted_rootdn_pass}', True) except Exception as e: self.logger.error(f'update rootdn credentials failed, error: {e}') raise e
def _load_and_fetch_config(self): """Populate configuration data.""" conf_home_dir = os.path.join('/', 'opt', 'seagate', 'cortx', 's3', 's3backgrounddelete') self._conf_file = os.path.join(conf_home_dir, 'config.yaml') if not os.path.isfile(self._conf_file): try: os.stat(conf_home_dir) except BaseException: os.mkdir(conf_home_dir) shutil.copy( os.path.join(self.get_conf_dir(), 's3_background_delete_config.yaml'), self._conf_file) if not os.access(self._conf_file, os.R_OK): self.logger.error("Failed to read " + self._conf_file + " it doesn't have read access") sys.exit() with open(self._conf_file, 'r') as file_config: self._config = yaml.safe_load(file_config) # Load config.yaml file through confstore. self._conf_file = 'yaml://' + self._conf_file self.s3confstore = S3CortxConfStore(config=self._conf_file, index=str(uuid.uuid1()))
def __init__(self, confstore: str): """Constructor.""" s3deployment_logger_name = "s3-deployment-logger-" + "[" + str( socket.gethostname()) + "]" self.logger = logging.getLogger(s3deployment_logger_name) if self.logger.hasHandlers(): self.logger.info("Logger has valid handler") else: self.logger.setLevel(logging.DEBUG) # create console handler with a higher log level chandler = logging.StreamHandler(sys.stdout) chandler.setLevel(logging.DEBUG) s3deployment_log_format = "%(asctime)s - %(name)s - %(levelname)s - %(message)s" formatter = logging.Formatter(s3deployment_log_format) # create formatter and add it to the handlers chandler.setFormatter(formatter) # add the handlers to the logger self.logger.addHandler(chandler) # Read machine-id of current node with open('/etc/machine-id', 'r') as mcid_file: self.machine_id = mcid_file.read().strip() if not confstore.strip(): self.logger.error( f'config url:[{confstore}] must be a valid url path') raise Exception('empty config URL path') self.provisioner_confstore = S3CortxConfStore(confstore, 'haproxy_config_index')
def test_get_s3instancecount_success(self, mock_get_return): mock_get_return.side_effect = [{ "mockmachineid-A": "mockserver_1" }, "5"] s3confstore = S3CortxConfStore() self.assertEqual(s3confstore.get_privateip("mockmachineid-A"), "5") self.assertEqual(mock_get_return.call_count, 2)
def test_get_nodenames_list_success(self, mock_get_return): mock_get_return.side_effect = [{ "mockmachineid-A": "mockserver_1" }, "mock-host1"] s3confstore = S3CortxConfStore() self.assertEqual(s3confstore.get_nodenames_list(), ["mock-host1"]) self.assertEqual(mock_get_return.call_count, 2)
def test_json_conf(self): index = "dummy_idx_1" test_config = { 'cluster': { "cluster_id": 'abcd-efgh-ijkl-mnop', "cluster_hosts": 'myhost-1,myhost2,myhost-3', } } tmpdir = tempfile.mkdtemp() filename = 'cortx_s3_confstoreuttest.json' saved_umask = os.umask(0o077) path = os.path.join(tmpdir, filename) with open(path, 'w+') as file: json.dump(test_config, file, indent=2) conf_url = 'json://' + path s3confstore = S3CortxConfStore(conf_url, index) result_data = s3confstore.get_config('cluster') if 'cluster_id' not in result_data: os.remove(path) os.umask(saved_umask) os.rmdir(tmpdir) self.assertFalse(True) s3confstore.set_config('cluster>cluster_id', '1234', False) result_data = s3confstore.get_config('cluster>cluster_id') if result_data != '1234': os.remove(path) os.umask(saved_umask) os.rmdir(tmpdir) self.assertFalse(True) os.remove(path) os.umask(saved_umask) os.rmdir(tmpdir)
def test_mock_copy(self, mock_copy_return): index1 = "dummy_idx_copy_1" index2 = "dummy_idx_copy_2" filename1 = "/tmp/dummy1" filename2 = "/tmp/dummy2" self.make_dummy_conf(filename1) self.make_dummy_conf(filename2) s3confstore1 = S3CortxConfStore("yaml://" + filename1, index1) s3confstore2 = S3CortxConfStore("yaml://" + filename2, index2) mock_copy_return.return_value = None s3confstore1.set_config("dummykey1", "Test1", False) s3confstore2.set_config("dummykey2", "Test2", False) s3confstore2.set_config("dummykey1", "Test3", False) s3confstore1.merge_config("dummy3") result = s3confstore1.get_all_keys() resultdata = s3confstore1.get_config("dummykey1") self.assertFalse(("dummykey2" in result) and ("dummykey1" in result) and (result_data == "Test3"))
def __init__(self, config: str): """Constructor.""" if not config.strip(): sys.stderr.write(f'config url:[{config}] must be a valid url path\n') raise Exception('empty config URL path') self._url = config self._s3confstore = S3CortxConfStore(self._url)
def test_mock_load(self, mock_load_return, mock_get_return): index = "dummy_idx_3" s3confstore = S3CortxConfStore() mock_load_return.return_value = None mock_get_return.return_value = None s3confstore.load_config(index, "dummy") result_data = s3confstore.get_config('bridge') self.assertTrue(result_data == mock_get_return.return_value)
def read_cluster_id(self): """Get 'cluster>cluster_id' from confstore.""" try: localconfstore = S3CortxConfStore(f'yaml://{self.s3_prov_config}', 'read_cluster_ididx') self.cluster_id = self.s3confstore.get_config(localconfstore.get_config('CONFSTORE_CLUSTER_ID_KEY')) except Exception as e: raise S3PROVError(f'exception: {e}\n')
def test_get_publicip_success(self, mock_get_return): mock_get_return.side_effect = [{ "mockmachineid-A": "mockserver_1" }, "1.2.3.4"] s3confstore = S3CortxConfStore() self.assertEqual(s3confstore.get_publicip("mockmachineid-A"), "1.2.3.4") self.assertEqual(mock_get_return.call_count, 2)
def test_copy(self): index1 = "dummy_idx_copy_1" index2 = "dummy_idx_copy_2" filename1 = "/tmp/dummy1" filename2 = "/tmp/dummy2" self.make_dummy_conf(filename1) self.make_dummy_conf(filename2) s3confstore1 = S3CortxConfStore("yaml://" + filename1, index1) s3confstore2 = S3CortxConfStore("yaml://" + filename2, index2) s3confstore1.set_config("dummykey1", "Test1", False) s3confstore2.set_config("dummykey2", "Test2", False) s3confstore2.set_config("dummykey1", "Test3", False) s3confstore1.merge_config("dummy_idx_copy_2") result = s3confstore1.get_all_keys() resultdata = s3confstore1.get_config("dummykey1") print("Result: " + str(result)) print("resultdata: " + str(resultdata)) self.assertTrue(("dummykey2" in result) and ("dummykey1" in result) and (resultdata == "Test3"))
def test_delete(self): index = "dummy_idx_delete" filename = "/tmp/dummy1" self.make_dummy_conf(filename) s3confstore = S3CortxConfStore("yaml://" + filename, index) s3confstore.set_config("dummykey1", "Test1", False) s3confstore.set_config("dummykey2", "Test2", False) s3confstore.delete_key("dummykey1", False) result = s3confstore.get_all_keys() self.assertFalse("dummykey1" in result)
def test_get_nodenames_list_exception(self, mock_get_return): mock_get_return.side_effect = [{ "mockmachineid-A": "mockserver_1" }, None] s3confstore = S3CortxConfStore() with self.assertRaises(SystemExit) as cm: s3confstore.get_nodenames_list() self.assertEqual(cm.exception.code, 1) self.assertEqual(mock_get_return.call_count, 2)
def test_mock_save(self, mock_load_return, mock_get_return, mock_set_return, mock_save_return): index = "dummy_idx_6" s3confstore = S3CortxConfStore() mock_load_return.return_value = None mock_get_return.return_value = None mock_set_return.return_value = None mock_save_return.return_value = None s3confstore.load_config(index, "dummy") s3confstore.set_config("dummykey", "bridge:NA", True) result_data = s3confstore.get_config('bridge') self.assertTrue(result_data == mock_get_return.return_value)
def get_config_param_for_BG_delete_account(self): """To get the config parameters required in init and reset phase.""" opfileconfstore = S3CortxConfStore(f'yaml://{self.BG_delete_config_file}', 'read_bg_delete_config_idx') param_list = ['account_name', 'account_id', 'canonical_id', 'mail', 's3_user_id', 'const_cipher_secret_str', 'const_cipher_access_str'] bgdelete_acc_input_params_dict = {} for param in param_list: key = 'background_delete_account' + '>' + param value_for_key = opfileconfstore.get_config(f'{key}') bgdelete_acc_input_params_dict[param] = value_for_key return bgdelete_acc_input_params_dict
def get_iam_admin_credentials(self): """Used for reset and cleanup phase to get the iam-admin user and decrypted passwd.""" opfileconfstore = S3CortxConfStore(f'properties://{self.auth_conf_file}', 'read_ldap_idx') s3cipher_obj = CortxS3Cipher(None, False, 0, self.get_confkey('CONFSTORE_OPENLDAP_CONST_KEY')) enc_ldap_passwd = opfileconfstore.get_config('ldapLoginPW') cipher_key = s3cipher_obj.generate_key() if enc_ldap_passwd != None: self.ldap_passwd = s3cipher_obj.decrypt(cipher_key, enc_ldap_passwd)
def process(self): """Main processing function.""" sys.stdout.write("Running validations..\n") try: localconfstore = S3CortxConfStore( f'json://{self._preqs_conf_file}', 'confindex') self.validate_pre_requisites( rpms=localconfstore.get_config('rpms'), services=localconfstore.get_config('services'), pip3s=localconfstore.get_config('pip3s'), files=localconfstore.get_config('exists')) except Exception as e: raise e
def __init__(self, config: str): """Constructor.""" if config is None: return if not config.strip(): sys.stderr.write( f'config url:[{config}] must be a valid url path\n') raise Exception('empty config URL path') self._url = config self._provisioner_confstore = S3CortxConfStore(self._url, 'setup_prov_index') self._s3_confkeys_store = S3CortxConfStore( f'yaml://{self.s3_prov_config}', 'setup_s3keys_index') # machine_id will be used to read confstore keys with open('/etc/machine-id') as f: self.machine_id = f.read().strip() self.cluster_id = self.get_confvalue( self.get_confkey('CONFSTORE_CLUSTER_ID_KEY').format( self.machine_id))
def write_cluster_id(self, op_file: str = "/opt/seagate/cortx/s3/s3backgrounddelete/s3_cluster.yaml"): """Set 'cluster>cluster_id' to op_file.""" try: if path.isfile(f'{op_file}') == False: raise S3PROVError(f'{op_file} must be present\n') else: key = 'cluster_config>cluster_id' opfileconfstore = S3CortxConfStore(f'yaml://{op_file}', 'write_cluster_id_idx') opfileconfstore.set_config(f'{key}', f'{self.cluster_id}', True) new_cluster_id = opfileconfstore.get_config(f'{key}') if new_cluster_id != self.cluster_id: raise S3PROVError(f'set_config failed to set {key}: {self.cluster_id} in {op_file} \n') except Exception as e: raise S3PROVError(f'exception: {e}\n')
def phase_keys_validate(self, arg_file: str, phase_name: str): """Validate keys of each phase derived from s3_prov_config and compare with argument file.""" storage_set_count_str = self.get_confvalue_with_defaults('CONFIG>CONFSTORE_STORAGE_SET_COUNT_KEY') if storage_set_count_str is not None: storage_set_val = int(storage_set_count_str) else: storage_set_val = 0 # Set phase name to upper case required for inheritance phase_name = phase_name.upper() # Extract keys from yardstick file for current phase considering inheritance yardstick_list = self.extract_yardstick_list(phase_name) self.logger.info(f"yardstick_list -> {yardstick_list}") # Set argument file confstore argument_file_confstore = S3CortxConfStore(arg_file, 'argument_file_index') # Extract keys from argument file arg_keys_list = argument_file_confstore.get_all_keys() # Below algorithm uses tokenization # of both yardstick and argument key # based on delimiter to generate # smaller key-tokens. Then check if # (A) all the key-tokens are pairs of # pre-defined token. e.g., # if key_yard is machine-id, then # key_arg must have corresponding # value of machine_id_val. # OR # (B) both the key-tokens from key_arg # and key_yard are the same. for key_yard in yardstick_list: if "machine-id" in key_yard: key_yard = key_yard.replace("machine-id", self.machine_id) if "cluster-id" in key_yard: key_yard = key_yard.replace("cluster-id", self.cluster_id) if "server_nodes" in key_yard: index = 0 while index < storage_set_val: key_yard_server_nodes = self.get_confvalue(key_yard.replace("storage-set-count", str(index))) if key_yard_server_nodes is None: raise Exception("Validation for server_nodes failed") index += 1 else: if key_yard in arg_keys_list: self.key_value_verify(key_yard) else: raise Exception(f'No match found for {key_yard}') self.logger.info("Validation complete")
def phase_prereqs_validate(self, phase_name: str): """Validate pre requisites using cortx-py-utils validator for the 'phase_name'.""" if not os.path.isfile(self._preqs_conf_file): raise FileNotFoundError(f'pre-requisite json file: {self._preqs_conf_file} not found') _prereqs_confstore = S3CortxConfStore(f'json://{self._preqs_conf_file}', f'{phase_name}') if self.ldap_user != "sgiamadmin": raise ValueError('Username should be "sgiamadmin"') try: prereqs_block = _prereqs_confstore.get_config(f'{phase_name}') if prereqs_block is not None: self.validate_pre_requisites(rpms=_prereqs_confstore.get_config(f'{phase_name}>rpms'), services=_prereqs_confstore.get_config(f'{phase_name}>services'), pip3s=_prereqs_confstore.get_config(f'{phase_name}>pip3s'), files=_prereqs_confstore.get_config(f'{phase_name}>files')) except Exception as e: raise S3PROVError(f'ERROR: {phase_name} prereqs validations failed, exception: {e} ')
def get_ldap_root_credentials(self): """Used for reset and cleanup phase to get the ldap root user and decrypted passwd.""" key = 'cluster_config>rootdn_user' opfileconfstore = S3CortxConfStore(f'yaml://{self.s3_cluster_file}', 'read_rootdn_idx') self.ldap_root_user = opfileconfstore.get_config(f'{key}') key = 'cluster_config>rootdn_pass' enc_rootdn_passwd = opfileconfstore.get_config(f'{key}') s3cipher_obj = CortxS3Cipher(None, False, 0, self.get_confkey('CONFSTORE_OPENLDAP_CONST_KEY')) cipher_key = s3cipher_obj.generate_key() if enc_rootdn_passwd != None: self.rootdn_passwd = s3cipher_obj.decrypt(cipher_key, enc_rootdn_passwd)
def read_ldap_credentials(self): """Get 'ldapadmin' user name and password from confstore.""" try: localconfstore = S3CortxConfStore(f'yaml://{self.s3_prov_config}', 'read_ldap_credentialsidx') s3cipher_obj = CortxS3Cipher(None, False, 0, localconfstore.get_config('CONFSTORE_OPENLDAP_CONST_KEY')) cipher_key = s3cipher_obj.generate_key() encrypted_ldapadmin_pass = self.s3confstore.get_config(localconfstore.get_config('CONFSTORE_LDAPADMIN_PASSWD_KEY')) self.ldap_passwd = s3cipher_obj.decrypt(cipher_key, encrypted_ldapadmin_pass) self.ldap_user = self.s3confstore.get_config(localconfstore.get_config('CONFSTORE_LDAPADMIN_USER_KEY')) encrypted_rootdn_pass = self.s3confstore.get_config(localconfstore.get_config('CONFSTORE_ROOTDN_PASSWD_KEY')) self.rootdn_passwd = s3cipher_obj.decrypt(cipher_key, encrypted_rootdn_pass) except Exception as e: sys.stderr.write(f'read ldap credentials failed, error: {e}\n') raise e
def update_motr_max_units_per_request(self): """ update S3_MOTR_MAX_UNITS_PER_REQUEST in the s3config file based on VM/OVA/HW S3_MOTR_MAX_UNITS_PER_REQUEST = 8 for VM/OVA S3_MOTR_MAX_UNITS_PER_REQUEST = 32 for HW """ # get the motr_max_units_per_request count from the config file motr_max_units_per_request = self.get_confvalue( self.get_confkey('CONFIG>CONFSTORE_S3_MOTR_MAX_UNITS_PER_REQUEST')) self.logger.info( f'motr_max_units_per_request: {motr_max_units_per_request}') #validate min and max unit should be between 1 to 128 if 2 <= int(motr_max_units_per_request) <= 128: if math.log2(int(motr_max_units_per_request)).is_integer(): self.logger.info( "motr_max_units_per_request is in valid range") else: raise S3PROVError( "motr_max_units_per_request should be power of 2") else: raise S3PROVError( "motr_max_units_per_request should be between 2 to 128") # update the S3_MOTR_MAX_UNITS_PER_REQUEST in s3config.yaml file s3configfile = self.get_confkey('S3_CONFIG_FILE') if path.isfile(f'{s3configfile}') == False: self.logger.error(f'{s3configfile} file is not present') raise S3PROVError(f'{s3configfile} file is not present') else: motr_max_units_per_request_key = 'S3_MOTR_CONFIG>S3_MOTR_MAX_UNITS_PER_REQUEST' s3configfileconfstore = S3CortxConfStore( f'yaml://{s3configfile}', 'write_s3_motr_max_unit_idx') s3configfileconfstore.set_config(motr_max_units_per_request_key, int(motr_max_units_per_request), True) self.logger.info( f'Key {motr_max_units_per_request_key} updated successfully in {s3configfile}' )