Exemple #1
0
    def _initial_check(self):
        """Pre-checks

        checks:
          cluster-id setup file and data.
          If not present, generates a uuid and writes to file.

        returns:
          cluster_id value from file

        """
        try:
            if (self.is_exists() and self.is_loaded()):
                cluster_id = CLUSTER_ID_FILE.read_text().replace('\n', '')

            else:
                logger.info("ClusterID file does not exist "
                            "or is empty. Generating an ID to set..")

                cluster_id = str(uuid.uuid4())

                with open(CLUSTER_ID_FILE, "w+") as file_value:
                    file_value.write(cluster_id)

                # TODO: check if there's a better way to make file immutable
                _cmd = f"chattr +i {CLUSTER_ID_FILE} && lsattr {CLUSTER_ID_FILE}"
                run_subprocess_cmd([_cmd], check=False, shell=True)  # nosec

            return cluster_id

        except Exception as exc:
            raise ValueError(
                f"Failed: cluster_id assignment initial check: {str(exc)}")
Exemple #2
0
 def run(self, certificate=None):
     self.logger.debug(f"Ceritifact path: {certificate}")
     CERT_PATH.mkdir(parents=True, exist_ok=True)
     if certificate:
         try:
             end_date = None
             # pyOpenSSL will not be available by default
             # on centos JIRA: EOS-21290
             res = run_subprocess_cmd(
                 f"openssl x509 -enddate -noout -in {certificate}")
             if not res.returncode:
                 res = res.stdout
                 end_date = res.split('=')[1]
                 ed = datetime.strptime(end_date.strip(),
                                        '%b %d %H:%M:%S %Y %Z')
                 current_d = datetime.now()
                 if current_d > ed:
                     raise CortxSetupError(
                         f"Certificate {certificate} is expired "
                         f"End date: {ed}")
         except Exception as exc:
             self.logger.error(f"Invalida ceritificate file {certificate}\n"
                               f"Error: {exc}")
             raise
         self.logger.debug(f"Copy certificate to {str(CERT_PATH)}")
         run_subprocess_cmd(f"cp -rf {certificate} {str(CERT_PATH)}")
     self.logger.debug("Done")
def fetch_enclosure_serial():

    current_node = getattr(sys.modules[__name__], '__grains__')['id']

    current_enclosure = "enclosure-" + ((current_node).split('-'))[1]
    __pillar__ = getattr(sys.modules[__name__], '__pillar__')
    ctrl_cli_utility = __pillar__['provisioner']['storage']['controller'][
        'cli_utility_path']

    host = __pillar__['storage'][current_enclosure]['controller']['primary'][
        'ip']
    user = __pillar__['storage'][current_enclosure]['controller']['user']
    secret = __pillar__['storage'][current_enclosure]['controller']['secret']
    logs = "/var/log/seagate/provisioner/controller-cli.log"
    _opt = "--show-license"

    logger.info(
        "[ INFO ] Running controller-cli utility to get enclosure serial...")
    _cmd = (
        f"sh {ctrl_cli_utility} host -h {host} -u {user} -p '{secret}' {_opt}"
        " | grep -A2 Serial | tail -1 > /etc/enclosure-id")
    run_subprocess_cmd([_cmd], check=False,
                       shell=True).stdout.splitlines()  # nosec

    _enc_id_file = Path('/etc/enclosure-id')
    if not _enc_id_file.exists():
        msg = ("ERROR: Could not generate the enclosure id "
               "from controller cli utility, please check "
               f"the {str(logs)} for more details")
        # raise Exception(msg)
        logger.error(msg)
        return False
    else:
        # Check if file /etc/enclosure-id has correct content:
        # 1. has only one line and
        # 2. has only one word - enclosure serial.
        with open(_enc_id_file) as _fp:
            _line_cnt = 0
            _words = 0
            for _line in _fp:
                logger.info(f"content of line {_line_cnt}: {_line}")
                _words = _line.strip()
                _line_cnt += 1

        _n_words = 1
        for i in _words:
            if i == " ":
                _n_words += 1

        if ((_line_cnt > 1) or (_n_words > 1)):
            msg = "ERROR: The contents of /etc/enclosure-id looks incorrect, failing"
            logger.error(msg)
            return False

    logger.info(
        "Enclosure id generated successfully and is kept at /etc/enclosure-id")

    return True
Exemple #4
0
def validate_firewall():  # noqa: C901
    """Validates firewall ports from pillar data.

    Args:
      Takes no mandatory argument as input.

    """
    validate = False
    ret_code = False
    _target_node = getattr(sys.modules[__name__], '__grains__')['id']
    data = provisioner.pillar_get()
    fw_pillar = data[_target_node]["firewall"]

    validate_ports = []
    validate_services = []

    for zone in fw_pillar:
        for services, ports in fw_pillar[zone]["ports"].items():
            validate_services.append(services)
            for pt in ports:
                port_num = int(''.join(filter(str.isdigit, pt)))
                validate_ports.append(port_num)

    # TODO: Future scope: as utilities increase in Salt,
    # opt for wrapper around run_subprocess_cmd and
    # use it in salt utility modules as srv/_utils/<file>.py
    # Ref: https://github.com/Seagate/cortx-prvsnr/pull/1111#discussion_r614055880

    for port, service in zip(validate_ports, validate_services):

        tcp_port = run_subprocess_cmd([f"ss -ltrp | grep {port}"],
                                      check=False,
                                      shell=True).stdout  # nosec
        tcp_svc = run_subprocess_cmd([f"ss -lt | grep {service}"],
                                     check=False,
                                     shell=True).stdout  # nosec

        udp_port = run_subprocess_cmd([f"ss -lurp | grep {port}"],
                                      check=False,
                                      shell=True).stdout  # nosec
        udp_svc = run_subprocess_cmd([f"ss -lu | grep {service}"],
                                     check=False,
                                     shell=True).stdout  # nosec

        # Either of TCP/ UDP port and service should pass
        if not ((tcp_port or udp_port) and (tcp_svc or udp_svc)):
            ret_code = True

    if ret_code:
        logger.error(
            "Failed: Validation of open firewall ports. Ensure all services "
            "and ports mentioned in pillar are running and accessible.")
    else:
        validate = True
        logger.info("Success: Validation of open firewall ports")

    return validate
Exemple #5
0
    def _factory_backup(self):
        """
        This function would create backup for files and folders
        that would be affected in feild deployment.
        """
        BACKUP_FACTORY_FOLDER.mkdir(parents=True, exist_ok=True)

        self.logger.debug("Backup factory data")
        for source_path, dest_path in BACKUP_FILE_DICT.items():
            run_subprocess_cmd(f"cp -rf {source_path} {str(dest_path)} ")
def test_run_subprocess_cmd_raises_exception(mocker, patch_logging):
    cmd_name = "some command"

    mocker.patch.object(
        utils.subprocess, 'run', autospec=True, side_effect=FileNotFoundError
    )
    with pytest.raises(errors.SubprocessCmdError) as exec:
        utils.run_subprocess_cmd(cmd_name)

    assert "FileNotFoundError" in str(exec.value.reason)
Exemple #7
0
def test_run_subprocess_cmd_prepares_str(mocker):
    cmd_name = "ls -la aaa bbb"
    kwargs = dict(universal_newlines=True,
                  stdout=subprocess.PIPE,
                  stderr=subprocess.PIPE)
    kwargs.update(kwargs)
    kwargs["check"] = True

    run_m = mocker.patch.object(utils.subprocess, 'run', autospec=True)

    utils.run_subprocess_cmd(cmd_name)
    run_m.assert_called_once_with(cmd_name.split(), **kwargs)
Exemple #8
0
def test_run_subprocess_cmd_check_is_true(mocker):
    cmd_name = "ls"
    kwargs = dict(universal_newlines=True,
                  stdout=subprocess.PIPE,
                  stderr=subprocess.PIPE)
    kwargs["check"] = False

    run_m = mocker.patch.object(utils.subprocess, 'run', autospec=True)

    utils.run_subprocess_cmd(cmd_name, **kwargs)
    kwargs["check"] = True
    run_m.assert_called_once_with([cmd_name], **kwargs)
def test_run_subprocess_cmd_happy_path(mocker):
    cmd_name = "ls -l"
    return_value = "some-return-value"

    mocker.patch.object(
        utils.subprocess, 'run', autospec=True, return_value=return_value
    )

    assert utils.run_subprocess_cmd(cmd_name) == return_value
 def run(self):
     minion_id = local_minion_id()
     if minion_id:
         self.logger.debug(f"removing minion {minion_id} from salt cluster")
         run_subprocess_cmd(f"salt-key -d {minion_id} -y")
     self.logger.debug("Remove minion_id and minion_master.pub from system")
     run_subprocess_cmd(f"rm -rf /etc/salt/minion_id")
     run_subprocess_cmd(f"rm -rf /etc/salt/pki/minion/minion_master.pub")
     self.logger.debug(f"Minion {minion_id} removed from salt configurtion")
def jbod_storage_config():

    _target_node = getattr(sys.modules[__name__], '__grains__')['id']
    _data_field = "cluster/{0}/storage/data_devices".format(_target_node)
    _metadata_field = "cluster/{0}/storage/metadata_devices".format(_target_node)

    _cmd = "multipath -ll | grep mpath | sort -k2.2 | awk '{ print $1 }'"
    _device_list = run_subprocess_cmd([_cmd], check=False, shell=True).stdout.splitlines()  # nosec

    metadata_devices = ["/dev/disk/by-id/dm-name-{0}".format(_device_list[0])]
    data_device = ["/dev/disk/by-id/dm-name-{0}".format(device) for device in _device_list[1:]]

    provisioner.pillar_set(_metadata_field, metadata_devices)
    provisioner.pillar_set(_data_field, data_device)
    return True
def storage_device_config():

    server_nodes = [
        node for node in provisioner.pillar_get("cluster").keys()
        if "srvnode-" in node
    ]
    for node in server_nodes:
        cluster_dict = provisioner.pillar_get(f"cluster/{node}/roles", targets=node)

        if "primary" in cluster_dict[node][f"cluster/{node}/roles"]:
            cmd = ("multipath -ll | grep prio=50 -B2 |"
                  " grep mpath|sort -k2.2 | awk '{ print $1 }'")
        else:
            cmd = ("multipath -ll | grep prio=10 -B2 |"
                  " grep mpath|sort -k2.2 | awk '{ print $1 }'")

        device_list = []
        _timeout = 60
        _count = 0
        _sleep_time = 5
        while device_list == []:
            if ( _count == 0 ):
                logger.info(f"[ INFO ] Attempt {_count} "
                       "Waiting for multipath device to come up...")

            if ( _count > _timeout ):
                msg = ("[ ERROR ] multipath devices don't exist. "
                        f"Giving up after {_timeout} second.")
                raise Exception(msg)
                # break
                # return False
            else:
                time.sleep(_sleep_time)

                logger.info(f"Command to populate multipath devices: {cmd}")
                device_list = run_subprocess_cmd([cmd], check=False, shell=True).stdout.splitlines()  # nosec

                if ( len(device_list) > 0 ):
                    logger.info("[ INFO ] Found multipath devices...")
                else:
                    print(".")
                _count = _count + _sleep_time

        if device_list == []:
            raise Exception("[ ERROR ] multipath devices don't exist.")
            # return False

        metadata_devices = list()
        metadata_devices.append(f"/dev/disk/by-id/dm-name-{device_list[0]}")
        metadata_field = f"cluster/{node}/storage/metadata_devices".format(node)
        provisioner.pillar_set(metadata_field, metadata_devices)

        data_device = [f"/dev/disk/by-id/dm-name-{device}" for device in device_list[1:]]
        data_field = f"cluster/{node}/storage/data_devices"
        provisioner.pillar_set(data_field, data_device)

    if (len(provisioner.pillar_get("cluster/srvnode-1/storage/data_devices"))
        != len(provisioner.pillar_get("cluster/srvnode-2/storage/data_devices"))):
        msg = ("[ ERROR ] multipath devices don't match for the 2 nodes. "
                "Can't proceed exiting...")
        raise Exception(msg)
        # return False

    return True
Exemple #13
0
    def docker_image(request, docker_client, project_path, tmpdir_session):
        env_spec = ENV_LEVELS_HIERARCHY[env_level]
        docker_spec = {}
        build_type = 'dockerfile'
        pre_scripts = []

        if env_level == 'base':
            parent_env_level = None
        elif type(env_spec) is dict:
            parent_env_level = env_spec['parent']
            pre_scripts = env_spec.get('pre_scripts', [])
            docker_spec = env_spec.get('docker', {})
            build_type = docker_spec.get('build_type', 'dockerfile')
        else:
            parent_env_level = env_spec

        def _image_short_id(image):
            return image.short_id.replace('sha256:', '')

        def _image_tag(image, os_name, env_level):
            # TODO better parts separator
            return '-'.join([os_name, env_level, _image_short_id(image)])

        def _image_name(image, os_name, env_level):
            return ("{}:{}".format(DOCKER_IMAGES_REPO,
                                   _image_tag(image, os_name, env_level)))

        def _parent_image_name():
            if env_level == 'base':
                # TODO image object would be necessary for 'commit' built_type
                return env_spec[os_name]['docker'], None
            else:
                p_image = request.getfixturevalue("docker_image_{}".format(
                    env_fixture_suffix(os_name, parent_env_level)))
                p_image_name = _image_name(p_image, os_name, parent_env_level)
                if p_image_name not in p_image.tags:
                    logger.warning(
                        "parent image {} doesn't have expected tag {}".format(
                            p_image, p_image_name))
                    # TODO what if no tags at all,
                    #      do we allow referencing by id ?
                    p_image_name = p_image.tags[0]
                return p_image_name, p_image

        parent_image_name, parent_image = _parent_image_name()

        # build image
        logger.info("Building docker env '{}' for base env '{}'".format(
            env_level, os_name))
        if pre_scripts:
            logger.info(
                "Running pre-scripts of docker env '{}' for base env '{}'".
                format(env_level, os_name))
            for script in pre_scripts:
                logger.debug(f"Running script '{script}'")
                utils.run_subprocess_cmd(script.split())

        if build_type == 'dockerfile':
            df_name = "Dockerfile.{}".format(env_level)
            dockerfile = project_path / 'images' / 'docker' / df_name
            dockerfile_tmpl = dockerfile.parent / (dockerfile.name + '.tmpl')

            if dockerfile_tmpl.exists():
                dockerfile_str = dockerfile_tmpl.read_text().format(
                    parent=parent_image_name)
            else:
                dockerfile_str = dockerfile.read_text()

            dockerfile = tmpdir_session / dockerfile.name
            dockerfile.write_text(dockerfile_str)
            image = h._docker_image_build(docker_client,
                                          dockerfile,
                                          ctx=project_path)
        else:  # image as container commit
            assert parent_image is not None
            remote = build_remote('docker',
                                  request,
                                  os_name,
                                  env_level=parent_env_level,
                                  base_level=parent_image)

            try:
                mhost = discover_remote(request, remote)
            except Exception:
                remote.destroy()
                raise

            with mhost.remote as _:
                # apply scripts to running container
                for script in docker_spec.get('scripts', []):
                    mhost.check_output(script.format(repo_path=mhost.repo))
                # commit it to image
                image = h._docker_container_commit(remote)

        # set image name
        if _image_name(image, os_name, env_level) not in image.tags:
            try:
                image.tag(DOCKER_IMAGES_REPO,
                          tag=_image_tag(image, os_name, env_level))
            except Exception:
                # ensure that image doesn't have any other tags
                # TODO what if it actually was tagged but failed then,
                #      is it possible in docker API
                if not image.tags:
                    docker_client.images.remove(image.id,
                                                force=False,
                                                noprune=False)
                raise
            else:
                image.reload()

        return image
Exemple #14
0
def git_head_sha1(project_path):
    return utils.run_subprocess_cmd(
        ['git', '-C',
         str(project_path), 'rev-parse', '--short', 'HEAD']).stdout.strip()