Esempio n. 1
0
    def run(self, storage_set_name=None, server_node=None):
        try:
            index = 'storage_add_index'
            cluster_id = get_cluster_id()
            machine_id = []

            self.load_conf_store(index, f'json://{CONFSTORE_CLUSTER_FILE}')

            try:
                storageset = Conf.get(index,
                                      f'cluster>{cluster_id}>storage_set')
                storage_set_len = len(storageset)
            except Exception:
                self.logger.debug(
                    "No storage-set found, setting storage_set_len to 0")
                storage_set_len = 0

            if storage_set_len == 0:
                self.logger.debug("storage-set object is empty")
                raise Exception(
                    f"Error: Storage-set {storage_set_name} does not exist."
                    " Use command - cortx_setup storageset create")

            ss_found = False
            for ss_index in range(0, storage_set_len):
                ss_name = Conf.get(
                    index,
                    f'cluster>{cluster_id}>storage_set[{ss_index}]>name')
                if ss_name == storage_set_name:
                    ss_found = True
                else:
                    continue

            if ss_found == False:
                self.logger.debug(
                    f"Cannot find storage-set: {storage_set_name}")
                raise Exception(
                    f"Error: Storage-set {storage_set_name} does not exist."
                    " Use command - cortx_setup storageset create")

            node_count = get_pillar_data("cluster/storage_set/count")

            # TODO: Addnl validation needed. Change server_node from list
            # to string and allow only one node to be updated at a time?

            input_nodes_count = len(server_node)

            if input_nodes_count > node_count:
                raise ValueError(
                    f"Invalid count: {input_nodes_count} number of nodes received. "
                    f"Given Storageset can accept a maximum of {node_count} nodes. "
                    "Update it with `cortx_setup storageset create` command.")

            # Get corresponding machine-id of each node
            for node in server_node:
                machine_id.append(get_machine_id(node))

            self.logger.debug(
                f"Adding machine_id '{machine_id}' to storage-set "
                f"'{storage_set_name}' in ConfStore.")

            PillarSet().run('cluster/storage_set/server_nodes', machine_id)
            Conf.set(
                index,
                f'cluster>{cluster_id}>storage_set[{ss_index}]>server_nodes',
                machine_id)

            for node in server_node:
                machine_id = get_machine_id(node)
                self.logger.debug(
                    f"Adding storage set ID:{storage_set_name} to "
                    f"server {node} with machine id: {machine_id}")
                Conf.set(index, f'server_node>{machine_id}>storage_set_id',
                         storage_set_name)

            Conf.save(index)
            self.logger.debug(f"Server nodes {server_node} with correspoding "
                              f"machine_ids added to Storageset")

        except ValueError as exc:
            raise ValueError(
                f"Failed to add node to storageset. Reason: {str(exc)}")
    def perform_base_config(rootpassword, forcecleanup, config_values):
        forceclean = False
        ROOTDNPASSWORD = None
        ROOTDNPASSWORD = rootpassword
        if ROOTDNPASSWORD == None:
            Log.error('Password not provided for ldap configuration')
            quit()
        if forcecleanup != None:
            forceclean = forcecleanup
        adminuser = config_values.get('bind_base_dn').split('dc=')[0]
        mdb_dir = Conf.get(index='util_config_file_index',
                           key='install_path') + '/cortx/utils/conf'
        BaseConfig.cleanup(forceclean)
        copyfile(mdb_dir + '/olcDatabase={2}mdb.ldif' ,\
        '/etc/openldap/slapd.d/cn=config/olcDatabase={2}mdb.ldif')
        os.system('chgrp ldap /etc/openldap/certs/password')
        cmd = 'slappasswd -s ' + str(ROOTDNPASSWORD)
        pwd = os.popen(cmd).read()
        pwd.replace('/', '\/')
        #restart slapd post cleanup
        os.system('systemctl restart slapd')
        dn = 'olcDatabase={0}config,cn=config'
        BaseConfig.modify_attribute(dn, 'olcRootDN', (adminuser + 'cn=config'))
        BaseConfig.modify_attribute(dn, 'olcRootPW', pwd)
        BaseConfig.modify_attribute(
            dn, 'olcAccess',
            '{0}to * by dn.base="gidNumber=0+uidNumber=0,cn=peercred,cn=external,cn=auth" write by self write by * read'
        )
        dn = 'olcDatabase={2}mdb,cn=config'
        BaseConfig.modify_attribute(dn, 'olcSuffix',
                                    config_values.get('base_dn'))
        BaseConfig.modify_attribute(dn, 'olcRootDN',
                                    config_values.get('bind_base_dn'))
        ldap_conn = ldap.initialize("ldapi:///")
        ldap_conn.sasl_non_interactive_bind_s('EXTERNAL')
        mod_attrs = [(ldap.MOD_ADD, 'olcDbMaxSize', [b'10737418240'])]
        try:
            ldap_conn.modify_s(dn, mod_attrs)
        except Exception:
            Log.error(
                'Error while modifying olcDbMaxSize attribute for olcDatabase={2}mdb'
            )
            raise Exception(
                'Error while modifying olcDbMaxSize attribute for olcDatabase={2}mdb'
            )
        ldap_conn.unbind_s()
        BaseConfig.modify_attribute(dn, 'olcRootPW', pwd)
        BaseConfig.modify_attribute(
            dn, 'olcAccess',
            '{0}to attrs=userPassword by self write by dn.base="' +
            config_values.get('bind_base_dn') +
            '" write by anonymous auth by * none')
        BaseConfig.modify_attribute(
            dn, 'olcAccess',
            '{1}to * by dn.base="' + config_values.get('bind_base_dn') +
            '" write by self write by * none')

        #add_s - init.ldif
        base = config_values.get('base_dn').split(',')[0].split('=')[1]
        add_record = [('dc', (bytes(base, 'utf-8'))),
                      ('o', (bytes(base, 'utf-8'))),
                      ('description', [b'Root entry for seagate.com.']),
                      ('objectClass', [b'top', b'dcObject', b'organization'])]
        BaseConfig.add_attribute(config_values.get('bind_base_dn'),
                                 config_values.get('base_dn'), add_record,
                                 ROOTDNPASSWORD)

        #add iam constraint
        BaseConfig.perform_ldif_operation(
            '/opt/seagate/cortx/utils/conf/iam-constraints.ldif',
            (adminuser + 'cn=config'), ROOTDNPASSWORD)
        #add ppolicy schema
        BaseConfig.perform_ldif_operation('/etc/openldap/schema/ppolicy.ldif',
                                          (adminuser + 'cn=config'),
                                          ROOTDNPASSWORD)
        BaseConfig.perform_ldif_operation(
            '/opt/seagate/cortx/utils/conf/ppolicymodule.ldif',
            (adminuser + 'cn=config'), ROOTDNPASSWORD)
        add_record = [('objectClass',
                       [b'olcOverlayConfig', b'olcPPolicyConfig']),
                      ('olcOverlay', [b'ppolicy']),
                      ('olcPPolicyDefault',
                       bytes(('cn=passwordDefault,ou=Policies,' +
                              config_values.get('base_dn')), 'utf-8')),
                      ('olcPPolicyHashCleartext', [b'FALSE']),
                      ('olcPPolicyUseLockout', [b'FALSE']),
                      ('olcPPolicyForwardUpdates', [b'FALSE'])]
        BaseConfig.add_attribute(
            (adminuser + 'cn=config'),
            "olcOverlay=ppolicy,olcDatabase={2}mdb,cn=config", add_record,
            ROOTDNPASSWORD)

        add_record = [('objectClass', [b'organizationalUnit']),
                      ('ou', [b'Policies'])]
        BaseConfig.add_attribute(
            config_values.get('bind_base_dn'),
            str("ou=Policies," + config_values.get('base_dn')), add_record,
            ROOTDNPASSWORD)

        add_record = [('objectClass', [b'pwdPolicy', b'person',
                                       b'top']), ('cn', [b'passwordDefault']),
                      ('sn', [b'passwordDefault']),
                      ('pwdAttribute', [b'userPassword']),
                      ('pwdReset', [b'TRUE']), ('pwdCheckQuality', [b'0']),
                      ('pwdMinAge', [b'0']), ('pwdMaxAge', [b'0']),
                      ('pwdMinLength', [b'0']), ('pwdInHistory', [b'0']),
                      ('pwdMaxFailure', [b'0']),
                      ('pwdFailureCountInterval', [b'0']),
                      ('pwdLockout', [b'FALSE']),
                      ('pwdLockoutDuration', [b'0']),
                      ('pwdAllowUserChange', [b'TRUE']),
                      ('pwdExpireWarning', [b'0']),
                      ('pwdGraceAuthNLimit', [b'0']),
                      ('pwdMustChange', [b'FALSE']),
                      ('pwdSafeModify', [b'FALSE'])]
        BaseConfig.add_attribute(
            config_values.get('bind_base_dn'),
            str("cn=passwordDefault,ou=Policies," +
                config_values.get('base_dn')), add_record, ROOTDNPASSWORD)
 def __init__(self, conf_url):
     """Initialize class."""
     Conf.load(self.index, conf_url)
Esempio n. 4
0
    def upgrade(config_path: str, change_set_path: str):
        """Perform upgrade steps."""

        Conf.load(DELTA_INDEX, change_set_path)
        Conf.load(GCONF_INDEX, config_path, skip_reload=True)
        delta_keys = Conf.get_keys(DELTA_INDEX)

        # if message_bus_backend changed, add new and delete old msg bus entries
        if CHANGED_PREFIX + MSG_BUS_BACKEND_KEY in delta_keys:
            new_msg_bus_backend = Conf.get(
                DELTA_INDEX,
                CHANGED_PREFIX + MSG_BUS_BACKEND_KEY).split('|')[1]
            Conf.set(GCONF_INDEX, MSG_BUS_BACKEND_KEY, new_msg_bus_backend)
            for key in delta_keys:
                if NEW_PREFIX + EXTERNAL_KEY + new_msg_bus_backend in key:
                    new_key = key.split(NEW_PREFIX)[1]
                    new_val = Conf.get(DELTA_INDEX, key).split('|')[1]
                    Conf.set(GCONF_INDEX, new_key, new_val)
                if DELETED_PREFIX + EXTERNAL_KEY + new_msg_bus_backend in key:
                    delete_key = key.split(DELETED_PREFIX)[1]
                    Conf.delete(GCONF_INDEX, delete_key)

        # update existing messagebus parameters
        else:
            msg_bus_backend = Conf.get(GCONF_INDEX, MSG_BUS_BACKEND_KEY)
            for key in delta_keys:
                if CHANGED_PREFIX + EXTERNAL_KEY + msg_bus_backend in key:
                    new_val = Conf.get(DELTA_INDEX, key).split('|')[1]
                    change_key = key.split(CHANGED_PREFIX)[1]
                    Conf.set(GCONF_INDEX, change_key, new_val)

        Conf.save(GCONF_INDEX)
        Utils.init(config_path)
        return 0
Esempio n. 5
0
 def init(url: str):
     """ Load ConfStore URL """
     Conf.load(ConfCli._index, url)
Esempio n. 6
0
    def config_apply(solution_config_url: str, cortx_conf_url: str = None,
        force_override: bool = False):
        """
        Description:

        Parses input config and store in CORTX config location
        Parameters:
        [IN]  Solution Config URL
        [OUT] CORTX Config URL
        """
        if Log.logger is None:
            CortxProvisionerLog.initialize(const.SERVICE_NAME, const.TMP_LOG_PATH)

        if cortx_conf_url is None:
            cortx_conf_url = CortxProvisioner._cortx_conf_url
        cortx_conf = MappedConf(CortxProvisioner._tmp_cortx_conf_url)

        # Load same config again if force_override is True
        try:
            cs_option = {"fail_reload": False} if force_override else {"skip_reload": True}
            Log.info('Applying config %s' % solution_config_url)
            Conf.load(CortxProvisioner._solution_index, solution_config_url,
                **cs_option)
        except ConfError as e:
            Log.error(f'Unable to load {solution_config_url} url, Error:{e}')

        # Secrets path from config file
        if cortx_conf.get('cortx>common>storage>local'):
            CortxProvisioner._secrets_path = cortx_conf.get('cortx>common>storage>local')+CortxProvisioner._rel_secret_path

        # source code for encrypting and storing secret key
        if Conf.get(CortxProvisioner._solution_index, 'cluster') is not None:
            CortxProvisioner.apply_cluster_config(cortx_conf, CortxProvisioner.cortx_release)

        if Conf.get(CortxProvisioner._solution_index, 'cortx') is not None:
            # generating cipher key
            cipher_key = None
            cluster_id = Conf.get(CortxProvisioner._solution_index, 'cluster>id')
            if cluster_id is None:
                cluster_id = cortx_conf.get('cluster>id')
                if cluster_id is None:
                    raise CortxProvisionerError(errno.EINVAL, 'Cluster ID not specified')
            cipher_key = Cipher.gen_key(cluster_id, 'cortx')
            if cipher_key is None:
                raise CortxProvisionerError(errno.EINVAL, 'Cipher key not specified')
            for key in Conf.get_keys(CortxProvisioner._solution_index):
                # using path /etc/cortx/solution/secret to confirm secret
                if key.endswith('secret'):
                    secret_val = Conf.get(CortxProvisioner._solution_index, key)
                    val = None
                    with open(os.path.join(CortxProvisioner._secrets_path, secret_val), 'rb') as secret:
                        val = secret.read()
                    if val is None:
                        raise CortxProvisionerError(errno.EINVAL,
                            f'Could not find the Secret in  {CortxProvisioner._secrets_path}')
                    val = Cipher.encrypt(cipher_key, val)
                    # decoding the byte string in val variable
                    Conf.set(CortxProvisioner._solution_index, key, val.decode('utf-8'))
            CortxProvisioner.apply_cortx_config(cortx_conf, CortxProvisioner.cortx_release)
            # Adding array count key in conf
            cortx_conf.add_num_keys()
            Conf.save(cortx_conf._conf_idx)
Esempio n. 7
0
def get_nodes(self):
    nodes_info = Conf.get(self._index, 'server_node')
    nodes= []
    for value in nodes_info.values():
        nodes.append(value["hostname"])
    return nodes
Esempio n. 8
0
 def _create_cluster_config(server_info: dict):
     """ Create the config file required for Event Message """
     for key, value in server_info.items():
         Conf.set('cluster', f'server_node>{key}', value)
     Conf.save('cluster')
Esempio n. 9
0
    def config(config_template: str):
        """Performs configurations."""
        # Copy cluster.conf.sample file to /etc/cortx/cluster.conf
        Utils._copy_conf_sample_to_conf()

        # Load required files
        config_template_index = 'cluster_config'
        Conf.load('cluster',
                  'json:///etc/cortx/cluster.conf',
                  skip_reload=True)
        Conf.load(config_template_index, config_template)

        try:
            server_list, port_list, config = \
                MessageBrokerFactory.get_server_list(config_template_index)
        except SetupError:
            Log.error(
                f"Could not find server information in {config_template}")
            raise SetupError(errno.EINVAL, \
                "Could not find server information in %s", config_template)

        Utils._create_msg_bus_config(server_list, port_list, config)
        # Cluster config
        server_info = \
            Utils._get_server_info(config_template_index, Conf.machine_id)
        if server_info is None:
            Log.error(
                f"Could not find server information in {config_template}")
            raise SetupError(errno.EINVAL, "Could not find server " +\
                "information in %s", config_template)
        Utils._create_cluster_config(server_info)

        #set cluster nodename:hostname mapping to cluster.conf
        Utils._copy_cluster_map(config_template_index)
        Utils._configure_rsyslog()

        # get shared storage info from config phase input conf template file
        shared_storage = Conf.get('cluster_config', 'cortx>support')

        # set shared storage info to cortx.conf conf file
        if shared_storage:
            Utils._set_to_conf_file('support>shared_path', shared_storage)

        # temporary fix for a common message bus log file
        # The issue happend when some user other than root:root is trying
        # to write logs in these log dir/files. This needs to be removed soon!
        LOG_DIR = '/var/log'
        utils_log_dir = os.path.join(LOG_DIR, 'cortx/utils')
        #message_bus
        os.makedirs(os.path.join(utils_log_dir, 'message_bus'), exist_ok=True)
        os.chmod(os.path.join(utils_log_dir, 'message_bus'), 0o0777)
        Path(os.path.join(utils_log_dir,'message_bus/message_bus.log')) \
            .touch(exist_ok=True)
        os.chmod(os.path.join(utils_log_dir, 'message_bus/message_bus.log'),
                 0o0666)
        #iem
        os.makedirs(os.path.join(utils_log_dir, 'iem'), exist_ok=True)
        os.chmod(os.path.join(utils_log_dir, 'iem'), 0o0777)
        Path(os.path.join(utils_log_dir, 'iem/iem.log')).touch(exist_ok=True)
        os.chmod(os.path.join(utils_log_dir, 'iem/iem.log'), 0o0666)
        return 0
 def get_manifest_info(rpath):
     """
     Fetch manifest information for given FRU
     rpath: Resource id (Example: node>server[0]>hw>disk)
     """
     return Conf.get(mock_manifest, rpath)
Esempio n. 11
0
# Load cortx common config
store_type = "json"
config_url = "%s://%s" % (store_type, const.CORTX_CONF_FILE)
common_config = KvStoreFactory.get_instance(config_url)
common_config.load()

# Load mock data
test_dir = os.path.dirname(os.path.realpath(__file__))
health_store_path = os.path.join(test_dir, 'solution/lr2/health.json')
manifest_store_path = os.path.join(test_dir, 'solution/lr2/manifest.json')
mock_health_data_url = "%s://%s" % (store_type, health_store_path)
mock_manifest_data_url = "%s://%s" % (store_type, manifest_store_path)
mock_health = "mock-health"
mock_manifest = "mock-manifest"
Conf.load(mock_health, mock_health_data_url)
Conf.load(mock_manifest, mock_manifest_data_url)

# Sample rpaths
#valid_rpath = "node"
#valid_rpath = "node>server[0]"
#valid_rpath = "node>storage[0]"
valid_rpath = "node>storage[0]>hw>controller"
invalid_rpath = "node>notexist[0]"


class TestDiscovery(unittest.TestCase):
    """Test Discovery module interfaces."""
    def setUp(self):
        self.solution_platform_monitor = common_config.get(
            ["discovery>solution_platform_monitor"])[0]
 def get_health_info(rpath):
     """
     Fetch health information for given FRU
     rpath: Resource id (Example: node>server[0]>hw>disk)
     """
     return Conf.get(mock_health, rpath)
Esempio n. 13
0
 def __init__(self):
     """initial variables and ConfStor setup."""
     self.role = None
     self.dp = True
     Conf.load('sspl', f"yaml://{file_store_config_path}")
Esempio n. 14
0
# please email [email protected] or [email protected].

from aiohttp import web
from cortx.utils.log import Log


class RestServer:
    """ Base class for Cortx Rest Server implementation """
    def __init__(self):
        app = web.Application()
        from cortx.utils.iem_framework import IemRequestHandler
        from cortx.utils.message_bus import MessageBusRequestHandler
        app.add_routes([web.post('/EventMessage/event', IemRequestHandler.send), \
            web.get('/EventMessage/event', IemRequestHandler.receive), \
            web.post('/MessageBus/message/{message_type}', \
            MessageBusRequestHandler.send), \
            web.get('/MessageBus/message/{message_type}', \
            MessageBusRequestHandler.receive)])

        Log.info("Starting Message Server 127.0.0.1 on port 28300")
        web.run_app(app, host='127.0.0.1', port=28300)


if __name__ == '__main__':
    from cortx.utils.conf_store import Conf

    Conf.load('config_file', 'json:///etc/cortx/cortx.conf')
    log_level = Conf.get('config_file', 'utils>log_level', 'INFO')
    Log.init('utils_server', '/var/log/cortx/utils/utils_server', \
        level=log_level, backup_count=5, file_size_in_mb=5)
    RestServer()
Esempio n. 15
0
 def _prepare_diff(idx1: str, idx2: str, diff_idx: str):
     """
     Description:
     Compare two conf index and prepare changeset diff config.
     1. Fetch new/deleted/updated keys by comparing idx1 and idx2
     2. Prepare changeset config on diff_index
     Paramaters:
     [idx1] conf index 1
     [idx2] conf index 2
     [diff_idx] changeset diff index
     """
     new_keys, deleted_keys, changed_keys = Conf.compare(idx1, idx2)
     Conf.load(diff_idx, const.CORTX_CHANGESET_URL)
     for key in new_keys:
         Conf.set(diff_idx, f'new>{key}', Conf.get(idx2, key))
     for key in deleted_keys:
         Conf.set(diff_idx, f'deleted>{key}', Conf.get(idx1, key))
     for key in changed_keys:
         value = f"{Conf.get(idx1, key)}|{Conf.get(idx2, key)}"
         Conf.set(diff_idx, f'changed>{key}', value)
     Conf.save(diff_idx)
Esempio n. 16
0
 def _set_to_conf_file(key, value):
     """ Add key value pair to cortx.conf file """
     config_file = 'json:///etc/cortx/cortx.conf'
     Conf.load('config_file', config_file, skip_reload=True)
     Conf.set('config_file', key, value)
     Conf.save('config_file')
Esempio n. 17
0
    def _update_provisioning_status(_conf_idx: str, node_id: str,
        phase: str, status: str = ProvisionerStatus.DEFAULT.value):
        """
        Description:

        Add phase, status, version, release keys in confstore.
        Args:
        cortx_conf: config store url. eg. yaml:///etc/cortx/cluster.conf
        node_id: machine-id
        phase: deployment/upgrade
        status: default/progress/success/error."""
        key_prefix = f'node>{node_id}>provisioning'
        Conf.set(_conf_idx, f'{key_prefix}>phase', phase)
        Conf.set(_conf_idx, f'{key_prefix}>status', status)
        Conf.save(_conf_idx)

        # TODO: Remove the following section once gconf is moved to consul completely.
        CortxProvisioner._load_consul_conf(CortxProvisioner._cortx_gconf_consul_index)
        Conf.set(CortxProvisioner._cortx_gconf_consul_index, f'{key_prefix}>phase', phase)
        Conf.set(CortxProvisioner._cortx_gconf_consul_index, f'{key_prefix}>status', status)
        Conf.save(CortxProvisioner._cortx_gconf_consul_index)
Esempio n. 18
0
    def _create_msg_bus_config(message_server_list: list, port_list: list, \
        config: dict):
        """ Create the config file required for message bus """

        with open(r'/etc/cortx/message_bus.conf.sample', 'w+') as file:
            json.dump({}, file, indent=2)
        Conf.load('index', 'json:///etc/cortx/message_bus.conf.sample')
        Conf.set('index', 'message_broker>type', 'kafka')
        for i in range(len(message_server_list)):
            Conf.set('index', f'message_broker>cluster[{i}]>server', \
                     message_server_list[i])
            Conf.set('index', f'message_broker>cluster[{i}]>port',
                     port_list[i])
        Conf.set('index', 'message_broker>message_bus', config)
        Conf.save('index')
        # copy this conf file as message_bus.conf
        try:
            os.rename('/etc/cortx/message_bus.conf.sample', \
                      '/etc/cortx/message_bus.conf')
        except OSError as e:
            raise SetupError(
                e.errno, "Failed to create \
                /etc/cortx/message_bus.conf %s", e)
Esempio n. 19
0
    def process(self):
        self.plan = self.args.plan[0]
        self.avoid_rmq = self.args.avoid_rmq

        # Take back up of sspl test config
        sspl_test_backup = '/etc/sspl_tests.conf.back'
        shutil.copyfile(sspl_test_file_path, sspl_test_backup)

        # Add global config in sspl_test config and revert the changes once test completes.
        # Global config path in sspl_tests.conf will be referred by sspl_tests later
        global_config_copy_url = Conf.get(
            SSPL_CONFIG_INDEX, "SYSTEM_INFORMATION>global_config_copy_url")
        Conf.copy(GLOBAL_CONFIG_INDEX, SSPL_TEST_CONFIG_INDEX)
        Conf.set(SSPL_CONFIG_INDEX,
                 "SYSTEM_INFORMATION>global_config_copy_url",
                 sspl_test_config_path)
        Conf.save(SSPL_CONFIG_INDEX)

        # Enable & disable sensors based on environment
        update_sensor_info(SSPL_TEST_CONFIG_INDEX)

        # Get rabbitmq values from sspl.conf and update sspl_tests.conf
        rmq_passwd = Conf.get(SSPL_CONFIG_INDEX,
                              "RABBITMQEGRESSPROCESSOR>password")
        Conf.set(SSPL_TEST_CONFIG_INDEX, "RABBITMQEGRESSPROCESSOR>password",
                 rmq_passwd)
        Conf.save(SSPL_TEST_CONFIG_INDEX)

        # TODO: Convert shell script to python
        # from cortx.sspl.sspl_test.run_qa_test import RunQATest
        # RunQATest(self.plan, self.avoid_rmq).run()
        CMD = "%s/run_qa_test.sh %s %s" % (TEST_DIR, self.plan, self.avoid_rmq)
        output, error, returncode = SimpleProcess(CMD).run(
            realtime_output=True)
        # Restore the original path/file & service, then throw exception
        # if execution is failed.
        Conf.set(SSPL_CONFIG_INDEX,
                 "SYSTEM_INFORMATION>global_config_copy_url",
                 global_config_copy_url)
        Conf.save(SSPL_CONFIG_INDEX)
        shutil.copyfile(sspl_test_backup, sspl_test_file_path)
        Service('dbus').process('restart', 'sspl-ll.service')
        if returncode != 0:
            raise SetupError(returncode, "%s - ERROR: %s - CMD %s", self.name,
                             error, CMD)
Esempio n. 20
0
 def setUpClass(cls):
     """Test Setup class."""
     Conf.load('cluster_conf', 'json:///etc/cortx/cluster.conf')
     cls.node_name = Conf.get('cluster_conf', 'cluster>srvnode-1')
Esempio n. 21
0
    def config(self):
        """Performs configurations. Raises exception on error."""
        config_path = Conf.get(self.index, "cortx>software>consul>config_path",
                               "/etc/consul.d")
        data_path = Conf.get(self.index, "cortx>software>consul>data_path",
                             "/opt/consul")
        os.makedirs(config_path, exist_ok=True)
        os.makedirs(data_path, exist_ok=True)
        content = ""
        with open("/usr/lib/systemd/system/consul.service", "r+") as f:
            content = f.read()
            content = re.sub("config-dir=.*", f"config-dir={config_path}",
                             content)
            content = re.sub(
                "ConditionFileNotEmpty=.*",
                f"ConditionFileNotEmpty={config_path}/consul.hcl", content)
            f.seek(0)
            f.truncate()
            f.write(content)

        command = "systemd-analyze verify consul.service"
        _, err, returncode = SimpleProcess(command).run()
        if returncode != 0:
            raise ConsulSetupError(
                returncode,
                "Consul Setup systemd service file validation failed with error: %s",
                err)

        command = "systemctl daemon-reload"
        _, err, returncode = SimpleProcess(command).run()
        if returncode != 0:
            raise ConsulSetupError(
                returncode,
                "Consul Setup systemd daemon-reload failed with error: %s",
                err)

        bind_addr = Conf.get(
            self.index,
            f"server_node>{Conf.machine_id}>network>data>private_interfaces[0]"
        )
        # server_node_fqdn have fqdn of nodes on which consul will run in server
        # mode. It is used for retry-join config
        server_node_fqdns = []
        bootstrap_expect = 0
        is_server_node = False
        for machine_id in Conf.get(self.index, "server_node").keys():
            if "consul_server" in Conf.get(self.index,
                                           f"server_node>{machine_id}>roles",
                                           []):
                bootstrap_expect += 1
                if machine_id != Conf.machine_id:
                    server_node_fqdns.append(
                        Conf.get(
                            self.index,
                            f"server_node>{machine_id}>network>data>private_fqdn"
                        ))
                else:
                    is_server_node = True

        env = Environment(
            loader=FileSystemLoader("/opt/seagate/cortx/utils/conf"))
        template = env.get_template('consul.hcl.tmpl')
        template.stream(bind_addr=bind_addr,
                        data_dir=data_path,
                        retry_join=json.dumps(server_node_fqdns),
                        server=str(is_server_node).lower(),
                        bootstrap_expect=bootstrap_expect).dump(
                            f"{config_path}/consul.hcl")

        command = f"consul validate {config_path}/consul.hcl"

        _, err, returncode = SimpleProcess(command).run()
        if returncode != 0:
            raise ConsulSetupError(
                returncode,
                "Consul Setup config file %s validation failed with error :%s",
                f"{config_path}/consul.hcl", err)
        command = f"chown -R consul:consul {config_path} {data_path}"
        _, err, returncode = SimpleProcess(command).run()
        if returncode != 0:
            raise ConsulSetupError(
                returncode,
                "Consul Setup changing ownership failed for %s %s with error: %s",
                config_path, data_path, err)
Esempio n. 22
0
 def load_config(cls, solution_conf_url, cortx_conf_url):
     """Load config"""
     cls.solution_conf_url = solution_conf_url
     cls.cortx_conf_url = cortx_conf_url
     Conf.load(cls._solution_index, cls.solution_conf_url)
     cls.cortx_conf = MappedConf(cls.cortx_conf_url)
Esempio n. 23
0
 def create_merged_config(self):
     """Create merged config file using existing and new configs."""
     existing_keys = set(Conf.get_keys(EXISTING_CONF, key_index=False))
     new_keys = set(Conf.get_keys(NEW_CONF, key_index=False))
     changed_keys = self.get_changed_keys()
     removed_keys = existing_keys - new_keys
     added_keys = new_keys - existing_keys
     retained_keys = existing_keys - removed_keys
     # For newly added keys, get key and value both from new config file
     for key in added_keys:
         Conf.set(MERGED_CONF, key, Conf.get(NEW_CONF, key))
     # For changed keys, get key from new config file and value from old
     # config file
     for old_key, new_key in changed_keys.items():
         Conf.set(MERGED_CONF, new_key, Conf.get(EXISTING_CONF, old_key))
     # For retained keys. get key and value both from existing config file
     for key in retained_keys:
         Conf.set(MERGED_CONF, key, Conf.get(EXISTING_CONF, key))
     # OBSOLETE and CHANGED should always come from new config
     Conf.set(MERGED_CONF, CHANGED, Conf.get(NEW_CONF, CHANGED))
     Conf.set(MERGED_CONF, OBSOLETE, Conf.get(NEW_CONF, OBSOLETE))
     Conf.save(MERGED_CONF)
Esempio n. 24
0
 def apply_cortx_config(cortx_conf, cortx_release):
     """Convert CORTX config into confstore keys"""
     config_info = Conf.get(CortxProvisioner._solution_index, 'cortx')
     cortx_solution_config = CortxConfig(config_info, cortx_release)
     cortx_solution_config.save(cortx_conf, CortxProvisioner._solution_index)
Esempio n. 25
0
 def load(url: str, index: str):
     """ Load ConfStore URL """
     Conf.load(index, url)
Esempio n. 26
0
    def _provision_components(cortx_conf_url: str, _conf_idx: str, interfaces: Enum, apply_phase: str):
        """Invoke Mini Provisioners of cluster components."""
        node_id, _ = CortxProvisioner._get_node_info(_conf_idx)
        num_components = int(Conf.get(_conf_idx, f'node>{node_id}>num_components'))
        for interface in interfaces:
            for comp_idx in range(0, num_components):
                key_prefix = f'node>{node_id}>components[{comp_idx}]'
                component_name = Conf.get(_conf_idx, f'{key_prefix}>name')
                # Check if RPM exists for the component, if it does exist get the build version
                component_version = CortxProvisioner.cortx_release.get_component_version(
                    component_name)
                # Get services.
                service_idx = 0
                services = []
                while (Conf.get(_conf_idx, f'{key_prefix}>services[{service_idx}]') is not None):
                    services.append(Conf.get(_conf_idx, f'{key_prefix}>services[{service_idx}]'))
                    service_idx = service_idx + 1
                service = 'all' if service_idx == 0 else ','.join(services)
                if apply_phase == ProvisionerStages.UPGRADE.value:
                    version = Conf.get(_conf_idx, f'{key_prefix}>version')
                    # Skip update for component if it is already updated.
                    is_updated = CortxProvisioner._is_component_updated(component_name, version)
                    if is_updated is True:
                        Log.info(f'{component_name} is already updated with {version} version.')
                        continue
                CortxProvisioner._update_provisioning_status(
                        _conf_idx, node_id, apply_phase, ProvisionerStatus.PROGRESS.value)
                if interface.value == 'upgrade':
                    # TODO: add --changeset parameter once all components support config upgrade
                    cmd = (
                        f"/opt/seagate/cortx/{component_name}/bin/{component_name}_setup {interface.value}"
                        f" --config {cortx_conf_url} --services {service}")
                else:
                    cmd = (
                        f"/opt/seagate/cortx/{component_name}/bin/{component_name}_setup {interface.value}"
                        f" --config {cortx_conf_url} --services {service}")
                Log.info(f"{cmd}")
                cmd_proc = SimpleProcess(cmd)
                _, err, rc = cmd_proc.run()
                if rc != 0:
                    CortxProvisioner._update_provisioning_status(
                        _conf_idx, node_id, apply_phase, ProvisionerStatus.ERROR.value)
                    raise CortxProvisionerError(
                        rc, "%s phase of %s, failed. %s", interface.value,
                        component_name, err)

                # Update version for each component if Provisioning successful.
                Conf.set(_conf_idx, f'{key_prefix}>version', component_version)

                # TODO: Remove the following code when gconf is completely moved to consul.
                CortxProvisioner._load_consul_conf(CortxProvisioner._cortx_gconf_consul_index)
                Conf.set(CortxProvisioner._cortx_gconf_consul_index,
                        f'{key_prefix}>version', component_version)
                Conf.save(CortxProvisioner._cortx_gconf_consul_index)
Esempio n. 27
0
    def config(self):
        """Performs configurations. Raises exception on error."""
        config_path = Conf.get(self.index, "cortx>software>consul>config_path",
                               "/etc/consul.d")
        data_path = Conf.get(self.index, "cortx>software>consul>data_path",
                             "/opt/consul")
        os.makedirs(config_path, exist_ok=True)
        os.makedirs(data_path, exist_ok=True)
        content = ""
        with open("/usr/lib/systemd/system/consul.service", "r+") as f:
            content = f.read()
            content = re.sub("config-dir=.*", f"config-dir={config_path}",
                             content)
            content = re.sub(
                "ConditionFileNotEmpty=.*",
                f"ConditionFileNotEmpty={config_path}/consul.hcl", content)
            content = re.sub(
                "User=.*", "User=root",
                content
            )
            content = re.sub(
                "Group=.*", "Group=root",
                content
            )
            f.seek(0)
            f.truncate()
            f.write(content)

        command = "systemd-analyze verify consul.service"
        _, err, returncode = SimpleProcess(command).run()
        if returncode != 0:
            raise ConsulSetupError(
                returncode,
                "Consul Setup systemd service file validation failed with error: %s",
                err)

        command = "systemctl daemon-reload"
        _, err, returncode = SimpleProcess(command).run()
        if returncode != 0:
            raise ConsulSetupError(
                returncode,
                "Consul Setup systemd daemon-reload failed with error: %s",
                err)

        bind_addr = Conf.get(
            self.index,
            f"server_node>{Conf.machine_id}>network>data>private_interfaces[0]"
        )
        # server_node_fqdn have fqdn of nodes on which consul will run in server
        # mode. It is used for retry-join config
        server_node_fqdns = []
        bootstrap_expect = 0
        is_server_node = False
        for machine_id in Conf.get(self.index, "server_node").keys():
            if "consul_server" in Conf.get(self.index,
                                           f"server_node>{machine_id}>roles",
                                           []):
                bootstrap_expect += 1
                if machine_id != Conf.machine_id:
                    server_node_fqdns.append(
                        Conf.get(
                            self.index,
                            f"server_node>{machine_id}>network>data>private_fqdn"
                        ))
                else:
                    is_server_node = True

        with open(f"{config_path}/consul.hcl", "w") as f:
            with open("/opt/seagate/cortx/utils/conf/consul.hcl.tmpl") as t:
                content = t.read()
                content = content.replace("BIND_ADDR", bind_addr)
                content = content.replace("DATA_DIR", data_path)
                content = content.replace("SERVER", str(is_server_node).lower())
                content = content.replace("BOOTSTRAP_EXPECT", str(bootstrap_expect))
                content = content.replace("RETRY_JOIN", json.dumps(server_node_fqdns))
                f.write(content)

        command = f"consul validate {config_path}/consul.hcl"

        _, err, returncode = SimpleProcess(command).run()
        if returncode != 0:
            raise ConsulSetupError(
                returncode,
                "Consul Setup config file %s validation failed with error :%s",
                f"{config_path}/consul.hcl", err)
        command = f"chown -R root:root {config_path} {data_path}"
        _, err, returncode = SimpleProcess(command).run()
        if returncode != 0:
            raise ConsulSetupError(
                returncode,
                "Consul Setup changing ownership failed for %s %s with error: %s",
                config_path, data_path, err)
Esempio n. 28
0
    def cluster_bootstrap(cortx_conf_url: str, force_override: bool = False):
        """
        Description:

        Configures Cluster Components
        1. Compares current installed version with New version
        2. Invoke Mini Provisioners of cluster components deploy/upgrade based on version compatibility
        Paramaters:
        [IN] CORTX Config URL
        """
        Conf.load(CortxProvisioner._conf_index, cortx_conf_url)
        Conf.load(CortxProvisioner._tmp_index, CortxProvisioner._tmp_cortx_conf_url)
        tmp_conf_keys = Conf.get_keys(CortxProvisioner._tmp_index)
        node_id = Conf.machine_id
        installed_version = Conf.get(CortxProvisioner._conf_index, f'node>{node_id}>provisioning>version')
        release_version = CortxProvisioner.cortx_release.get_release_version()
        if installed_version is None:
            Conf.copy(CortxProvisioner._tmp_index, CortxProvisioner._conf_index, tmp_conf_keys)
            Conf.save(CortxProvisioner._conf_index)
            CortxProvisioner._apply_consul_config(CortxProvisioner._conf_index)
            CortxProvisioner.cluster_deploy(cortx_conf_url, force_override)
        else:
            # TODO: add a case where release_version > installed_version but is not compatible.
            ret_code = CortxProvisioner.cortx_release.version_check(
                release_version, installed_version)
            if ret_code == 1:
                CortxProvisioner._prepare_diff(CortxProvisioner._conf_index, CortxProvisioner._tmp_index, CortxProvisioner._changeset_index)
                CortxProvisioner.cluster_upgrade(cortx_conf_url, force_override)
                # TODO: update_conf needs to be removed once gconf moves to consul.
                # Gconf update after upgrade should not be handled here if gconf is in consul.
                CortxProvisioner._update_conf(CortxProvisioner._conf_index, CortxProvisioner._tmp_index)
            # TODO: This will be removed once downgrade is also supported.
            elif ret_code == -1:
                raise CortxProvisionerError(errno.EINVAL, 'Downgrade is Not Supported')
            elif ret_code == 0:
                Conf.copy(CortxProvisioner._tmp_index, CortxProvisioner._conf_index, tmp_conf_keys)
                Conf.save(CortxProvisioner._conf_index)
                CortxProvisioner._apply_consul_config(CortxProvisioner._conf_index)
                CortxProvisioner.cluster_deploy(cortx_conf_url, force_override)
            else:
                raise CortxProvisionerError(errno.EINVAL, 'Internal error. Could not determine version. Invalid image.')
Esempio n. 29
0
 def validate(self):
     """Check for required packages are installed."""
     # RPM dependency
     rpm_deps = {"cortx-sspl-test": None}
     # python 3rd party package dependency
     pip3_3ps_packages_test = {"Flask": "1.1.1"}
     pkg_validator = PkgV()
     pkg_validator.validate_pip3_pkgs(host=socket.getfqdn(),
                                      pkgs=pip3_3ps_packages_test,
                                      skip_version_check=False)
     pkg_validator.validate_rpm_pkgs(host=socket.getfqdn(),
                                     pkgs=rpm_deps,
                                     skip_version_check=True)
     # Load global, sspl and test configs
     Conf.load(SSPL_CONFIG_INDEX, sspl_config_path)
     Conf.load(SSPL_TEST_CONFIG_INDEX, sspl_test_config_path)
     # Take copy of supplied config passed to sspl_test and load it
     with open(self.sspl_test_gc_copy_file, "w") as f:
         f.write("")
     self.sspl_test_gc_copy_url = "yaml://%s" % self.sspl_test_gc_copy_file
     Conf.load(SSPL_TEST_GLOBAL_CONFIG, self.sspl_test_gc_copy_url)
     Conf.load("global_config", self.sspl_test_gc_url)
     Conf.copy("global_config", SSPL_TEST_GLOBAL_CONFIG)
     # Validate input configs
     machine_id = Utility.get_machine_id()
     self.node_type = Conf.get(SSPL_TEST_GLOBAL_CONFIG,
                               "server_node>%s>type" % machine_id)
     enclosure_id = Conf.get(
         SSPL_TEST_GLOBAL_CONFIG,
         "server_node>%s>storage>enclosure_id" % machine_id)
     self.enclosure_type = Conf.get(
         SSPL_TEST_GLOBAL_CONFIG,
         "storage_enclosure>%s>type" % enclosure_id)
Esempio n. 30
0
    def process(self):
        self.plan = self.args.plan[0]
        self.avoid_rmq = self.args.avoid_rmq

        # Take back up of sspl test config
        sspl_test_backup = '/etc/sspl_tests.conf.back'
        shutil.copyfile(sspl_test_file_path, sspl_test_backup)

        # Add global config in sspl_test config and revert the changes once test completes.
        # Global config path in sspl_tests.conf will be referred by sspl_tests later
        global_config_copy_url = Conf.get(
            SSPL_CONFIG_INDEX, "SYSTEM_INFORMATION>global_config_copy_url")
        Conf.copy(GLOBAL_CONFIG_INDEX, SSPL_TEST_CONFIG_INDEX)
        Conf.set(SSPL_CONFIG_INDEX,
                 "SYSTEM_INFORMATION>global_config_copy_url",
                 sspl_test_config_path)
        Conf.save(SSPL_CONFIG_INDEX)

        # Enable & disable sensors based on environment
        update_sensor_info(SSPL_TEST_CONFIG_INDEX)

        # Get rabbitmq values from sspl.conf and update sspl_tests.conf
        rmq_passwd = Conf.get(SSPL_CONFIG_INDEX,
                              "RABBITMQEGRESSPROCESSOR>password")
        Conf.set(SSPL_TEST_CONFIG_INDEX, "RABBITMQEGRESSPROCESSOR>password",
                 rmq_passwd)
        Conf.save(SSPL_TEST_CONFIG_INDEX)

        # TODO: Move lines 90-116 & 125-127 to RunQATest class
        # Create dummy service and add service name in /etc/sspl.conf
        service_name = "dummy_service.service"
        service_file_path_src = f"{TEST_DIR}/alerts/os/dummy_service_files/dummy_service.service"
        service_executable_code_src = f"{TEST_DIR}/alerts/os/dummy_service_files/dummy_service.py"
        service_file_path_des = "/etc/systemd/system"
        service_executable_code_des = "/var/cortx/sspl/test"

        os.makedirs(service_executable_code_des, 0o777, exist_ok=True)

        shutil.copy(service_executable_code_src,
                    f'{service_executable_code_des}/dummy_service.py')
        # Make service file executable.
        cmd = f"chmod +x {service_executable_code_des}/dummy_service.py"
        _, error, returncode = SimpleProcess(cmd).run()
        if returncode != 0:
            print("%s error occurred while executing cmd: %s" % (error, cmd))
            print("failed to assign execute permission for dummy_service.py."\
                    " dummy_service will fail.")

        # Copy service file to /etc/systemd/system/ path.
        shutil.copyfile(service_file_path_src,
                        f'{service_file_path_des}/dummy_service.service')
        cmd = "systemctl daemon-reload"
        _, error, returncode = SimpleProcess(cmd).run()
        if returncode != 0:
            print(f"failed to execute '{cmd}', systemctl will be unable"\
                f" to manage the dummy_service.service \nError: {error}")

        self.dbus_service.enable(service_name)
        self.dbus_service.start(service_name)

        service_list = Conf.get(SSPL_CONFIG_INDEX,
                                "SERVICEMONITOR>monitored_services")
        service_list.append(service_name)
        Conf.set(SSPL_CONFIG_INDEX, "SERVICEMONITOR>monitored_services",
                 service_list)

        threshold_inactive_time_original = Conf.get(
            SSPL_CONFIG_INDEX, "SERVICEMONITOR>threshold_inactive_time")
        threshold_inactive_time_new = 30
        Conf.set(SSPL_CONFIG_INDEX, "SERVICEMONITOR>threshold_inactive_time",
                 threshold_inactive_time_new)
        Conf.save(SSPL_CONFIG_INDEX)

        # TODO: Convert shell script to python
        # from cortx.sspl.sspl_test.run_qa_test import RunQATest
        # RunQATest(self.plan, self.avoid_rmq).run()
        CMD = "%s/run_qa_test.sh %s %s" % (TEST_DIR, self.plan, self.avoid_rmq)
        output, error, returncode = SimpleProcess(CMD).run(
            realtime_output=True)
        # Restore the original path/file & service, then throw exception
        # if execution is failed.
        service_list.remove(service_name)
        Conf.set(SSPL_CONFIG_INDEX, "SERVICEMONITOR>monitored_services",
                 service_list)
        Conf.set(SSPL_CONFIG_INDEX, "SERVICEMONITOR>threshold_inactive_time",
                 threshold_inactive_time_original)
        Conf.set(SSPL_CONFIG_INDEX,
                 "SYSTEM_INFORMATION>global_config_copy_url",
                 global_config_copy_url)
        Conf.save(SSPL_CONFIG_INDEX)
        shutil.copyfile(sspl_test_backup, sspl_test_file_path)
        self.dbus_service.restart('sspl-ll.service')
        if returncode != 0:
            raise SetupError(returncode, "%s - ERROR: %s - CMD %s", self.name,
                             error, CMD)