def _turnoff_executable_ruby(node):
        """Set mode -x for /usr/bin/ruby

        :param node: dict, node attributes
        """
        ssh = SSHManager()
        cmd = 'chmod -x /usr/bin/ruby'
        ssh.execute_on_remote(node['ip'], cmd)
Beispiel #2
0
    def _turnon_executable_ruby(node):
        """Set mode +x for /usr/bin/ruby

        :param node: dict, node attributes
        """
        ssh = SSHManager()
        cmd = 'chmod +x /usr/bin/ruby'
        ssh.execute_on_remote(node['ip'], cmd)
    def _turnon_executable_ruby(node):
        """Set mode +x for /usr/bin/ruby

        :param node: dict, node attributes
        """
        ssh = SSHManager()
        cmd = "chmod +x /usr/bin/ruby"
        ssh.execute_on_remote(node["ip"], cmd)
Beispiel #4
0
def generate_facts(ip):
    ssh_manager = SSHManager()
    facter_dir = '/var/lib/puppet/lib/facter'
    exluded_facts = ['naily.rb']

    if not ssh_manager.isdir_on_remote(ip, facter_dir):
        ssh_manager.mkdir_on_remote(ip, facter_dir)
        logger.debug('Directory {0} was created'.format(facter_dir))

    ssh_manager.execute_on_remote(ip, 'rm -f {0}/*.rb'.format(facter_dir))
    logger.debug('rb files were removed from {0}'.format(facter_dir))

    facts_files = ssh_manager.execute_on_remote(
        ip,
        'find /etc/puppet/modules/ -wholename "*/lib/facter/*.rb"')['stdout']
    facts_files = [i.strip() for i in facts_files]
    logger.debug('The following facts {0} will'
                 ' be copied to {1}'.format(facts_files, facter_dir))
    for fact in facts_files:
        if not fact or re.sub(r'.*/', '', fact) in exluded_facts:
            continue
        ssh_manager.execute_on_remote(ip,
                                      'cp {0} {1}/'.format(fact, facter_dir))
    logger.debug('Facts were copied')

    ssh_manager.execute_on_remote(ip, 'facter -p -y > /tmp/facts.yaml')
    logger.info('Facts yaml was created')

    ssh_manager.execute_on_remote(ip, 'rm -f {0}/*.rb'.format(facter_dir))
    logger.debug('rb files were removed from {0}'.format(facter_dir))
Beispiel #5
0
def generate_facts(ip):
    ssh_manager = SSHManager()
    facter_dir = '/var/lib/puppet/lib/facter'
    exluded_facts = ['naily.rb']

    if not ssh_manager.isdir_on_remote(ip, facter_dir):
        ssh_manager.mkdir_on_remote(ip, facter_dir)
        logger.debug('Directory {0} was created'.format(facter_dir))

    ssh_manager.execute_on_remote(ip, 'rm -f {0}/*.rb'.format(facter_dir))
    logger.debug('rb files were removed from {0}'.format(facter_dir))

    facts_files = ssh_manager.execute_on_remote(
        ip,
        'find /etc/puppet/modules/ -wholename "*/lib/facter/*.rb"')['stdout']
    facts_files = [i.strip() for i in facts_files]
    logger.debug('The following facts {0} will'
                 ' be copied to {1}'.format(facts_files, facter_dir))
    for fact in facts_files:
        if not fact or re.sub(r'.*/', '', fact) in exluded_facts:
            continue
        ssh_manager.execute_on_remote(ip,
                                      'cp {0} {1}/'.format(fact, facter_dir))
    logger.debug('Facts were copied')

    ssh_manager.execute_on_remote(ip, 'facter -p -y > /tmp/facts.yaml')
    logger.info('Facts yaml was created')

    ssh_manager.execute_on_remote(ip, 'rm -f {0}/*.rb'.format(facter_dir))
    logger.debug('rb files were removed from {0}'.format(facter_dir))
Beispiel #6
0
    def delete_astute_log():
        """Delete astute.log file(s) on master node.

        This is to ensure that no unwanted tasks are used by tests (e.g. from
        previous deployments).

        :return: None
        """
        ssh = SSHManager()
        ssh.execute_on_remote(ssh.admin_ip, "rm /var/log/astute/astute*")
        ssh.execute_on_remote(ssh.admin_ip, "systemctl restart astute.service")
Beispiel #7
0
    def delete_astute_log():
        """Delete astute.log file(s) on master node.

        This is to ensure that no unwanted tasks are used by tests (e.g. from
        previous deployments).

        :return: None
        """
        ssh = SSHManager()
        ssh.execute_on_remote(ssh.admin_ip, "rm /var/log/astute/astute*")
        ssh.execute_on_remote(ssh.admin_ip, "systemctl restart astute.service")
Beispiel #8
0
def inject_nailgun_agent_ubuntu_bootstrap(environment):
    """Inject nailgun agent packet from review into ubuntu bootsrap
    environment - Environment Model object - self.env
    """
    logger.info("Update nailgun-agent code and assemble new ubuntu bootstrap")
    ssh = SSHManager()
    if not settings.UPDATE_FUEL:
        raise Exception("{} variable don't exist"
                        .format(settings.UPDATE_FUEL))
    pack_path = '/var/www/nailgun/nailgun-agent-review/'
    # Step 1 - install squashfs-tools
    cmd = "yum install -y squashfs-tools"
    ssh.execute_on_remote(ip=ssh.admin_ip, cmd=cmd)

    # Step 2 - unpack bootstrap
    bootstrap = "/var/www/nailgun/bootstraps/active_bootstrap"
    bootstrap_var = "/var/root.squashfs"

    cmd = "unsquashfs -d /var/root.squashfs {}/root.squashfs".format(
        bootstrap)
    ssh.execute_on_remote(ip=ssh.admin_ip, cmd=cmd)

    # Step 3 - replace nailgun-agent code in unpacked bootstrap
    agent_path = "/usr/bin/nailgun-agent"
    bootstrap_file = bootstrap + "/root.squashfs"
    logger.info('bootsrap file {0}{1}'.format(bootstrap_var, agent_path))
    old_sum = get_sha_sum('{0}{1}'.format(bootstrap_var, agent_path))
    logger.info('Old sum is {0}'.format(old_sum))
    cmd_etc_sync = ('rsync -r {1}etc/* {0}/etc/'.format(
        bootstrap_var, pack_path))
    ssh.execute_on_remote(ssh.admin_ip, cmd=cmd_etc_sync)
    cmd = ("rsync -r {1}usr/* {0}/usr/;" "mv {2} "
           "/var/root.squashfs.old;"
           "").format(bootstrap_var, pack_path, bootstrap_file)
    ssh.execute_on_remote(ip=ssh.admin_ip, cmd=cmd)
    new_sum = get_sha_sum('{0}{1}'.format(bootstrap_var, agent_path))
    logger.info('new sum is {0}'.format(new_sum))
    assert_equal(new_sum != old_sum, True)

    # Step 4 - assemble new bootstrap
    compression = "-comp xz"
    no_progress_bar = "-no-progress"
    no_append = "-noappend"
    image_rebuild = "mksquashfs {0} {1} {2} {3} {4}".format(
        bootstrap_var,
        bootstrap_file,
        compression,
        no_progress_bar,
        no_append)
    ssh.execute_on_remote(ip=ssh.admin_ip, cmd=image_rebuild)
    checkers.check_file_exists(ssh.admin_ip, bootstrap_file)
    def centos_setup_fuel(self, hostname):
        with TimeStat("bootstrap_centos_node", is_uniq=True):
            admin = list(self.env.d_env.get_nodes(role__contains='master'))[0]
            self.env.d_env.start([admin])
            logger.info("Waiting for Centos node to start up")
            wait(lambda: admin.driver.node_active(admin), 60,
                 timeout_msg='Centos node failed to start')
            logger.info("Waiting for Centos node ssh ready")
            self.env.wait_for_provisioning()

        ssh = SSHManager()
        logger.debug("Update host information")
        cmd = "echo HOSTNAME={} >> /etc/sysconfig/network".format(hostname)
        ssh.execute_on_remote(ssh.admin_ip, cmd=cmd)

        cmd = "echo {0} {1} {2} >> /etc/hosts".format(
            ssh.admin_ip,
            hostname,
            settings.FUEL_MASTER_HOSTNAME)

        ssh.execute_on_remote(ssh.admin_ip, cmd=cmd)

        cmd = "hostname {}".format(hostname)
        ssh.execute_on_remote(ssh.admin_ip, cmd=cmd)

        cmd = "yum install -y screen"
        ssh.execute_on_remote(ssh.admin_ip, cmd=cmd)

        install_mos_repos()

        logger.info("Install Fuel services")

        cmd = "screen -dm bash -c 'showmenu=no wait_for_external_config=yes " \
              "bootstrap_admin_node.sh'"
        ssh.execute_on_remote(ssh.admin_ip, cmd=cmd)

        self.env.wait_for_external_config()
        self.env.admin_actions.modify_configs(self.env.d_env.router())
        if CUSTOM_FUEL_SETTING_YAML:
            self.env.admin_actions.update_fuel_setting_yaml(
                CUSTOM_FUEL_SETTING_YAML)
        self.env.kill_wait_for_external_config()

        self.env.wait_bootstrap()

        logger.debug("Check Fuel services")
        self.env.admin_actions.wait_for_fuel_ready()

        logger.debug("post-installation configuration of Fuel services")
        self.fuel_post_install_actions()
Beispiel #10
0
def check_service(ip, commands):
    """Check that required nova services are running on controller.

        :param ip: ip address of node
        :param commands: type list, nova commands to execute on controller,
                         example of commands:
                         ['nova-manage service list | grep vcenter-vmcluster1'
        """
    ssh_manager = SSHManager()
    ssh_manager.execute_on_remote(ip=ip, cmd='source openrc')

    for cmd in commands:
        wait(lambda: ':-)' in ssh_manager.execute_on_remote(ip=ip, cmd=cmd)[
            'stdout'][-1],
             timeout=200)
Beispiel #11
0
def upload_nailgun_agent_rpm():
    """Upload nailgun_agent.rpm on master node
    """
    ssh = SSHManager()
    logger.info("Upload nailgun-agent")
    if not settings.UPDATE_FUEL:
        raise exceptions.FuelQAVariableNotSet('UPDATE_FUEL', 'True')
    pack_path = '/var/www/nailgun/nailgun-agent-review/'
    ssh.upload_to_remote(ip=ssh.admin_ip,
                         source=settings.UPDATE_FUEL_PATH.rstrip('/'),
                         target=pack_path)
    # Extract rpm context
    cmd = 'cd {0}; rpm2cpio {1} | cpio -idmv'.format(
        pack_path, 'nailgun-agent-*.noarch.rpm ')
    ssh.execute_on_remote(ssh.admin_ip, cmd)
Beispiel #12
0
    def get_nodes_tasks(node_id):
        """
        :param node_id: an integer number of node id
        :return: a set of deployment tasks for corresponding node
        """
        tasks = set()
        ssh = SSHManager()

        result = ssh.execute_on_remote(ssh.admin_ip, "ls /var/log/astute")
        filenames = [filename.strip() for filename in result['stdout']]

        for filename in filenames:
            ssh.download_from_remote(
                ssh.admin_ip,
                destination="/var/log/astute/{0}".format(filename),
                target="/tmp/{0}".format(filename))

        data = fileinput.FileInput(
            files=["/tmp/{0}".format(filename) for filename in filenames],
            openhook=fileinput.hook_compressed)
        for line in data:
            if "Task time summary" in line \
                    and "node {}".format(node_id) in line:
                # FIXME: define an exact search of task
                task_name = line.split("Task time summary: ")[1].split()[0]
                check = any([excluded_task in task_name
                             for excluded_task in TASKS_BLACKLIST])
                if check:
                    continue
                tasks.add(task_name)
        return tasks
Beispiel #13
0
    def get_nodes_tasks(node_id):
        """
        :param node_id: an integer number of node id
        :return: a set of deployment tasks for corresponding node
        """
        tasks = set()
        ssh = SSHManager()

        result = ssh.execute_on_remote(ssh.admin_ip, "ls /var/log/astute")
        filenames = [filename.strip() for filename in result['stdout']]

        for filename in filenames:
            ssh.download_from_remote(
                ssh.admin_ip,
                destination="/var/log/astute/{0}".format(filename),
                target="/tmp/{0}".format(filename))

        data = fileinput.FileInput(
            files=["/tmp/{0}".format(filename) for filename in filenames],
            openhook=fileinput.hook_compressed)
        for line in data:
            if "Task time summary" in line \
                    and "node {}".format(node_id) in line:
                # FIXME: define an exact search of task
                task_name = line.split("Task time summary: ")[1].split()[0]
                check = any([excluded_task in task_name
                             for excluded_task in TASKS_BLACKLIST])
                if check:
                    continue
                tasks.add(task_name)
        return tasks
Beispiel #14
0
def upload_nailgun_agent_rpm():
    """Upload nailgun_agent.rpm on master node
    """
    ssh = SSHManager()
    logger.info("Upload nailgun-agent")
    if not settings.UPDATE_FUEL:
        raise exceptions.FuelQAVariableNotSet('UPDATE_FUEL', 'True')
    pack_path = '/var/www/nailgun/nailgun-agent-review/'
    ssh.upload_to_remote(
        ip=ssh.admin_ip,
        source=settings.UPDATE_FUEL_PATH.rstrip('/'),
        target=pack_path)
    # Extract rpm context
    cmd = 'cd {0}; rpm2cpio {1} | cpio -idmv'.format(
        pack_path, 'nailgun-agent-*.noarch.rpm ')
    ssh.execute_on_remote(ssh.admin_ip, cmd)
Beispiel #15
0
def inject_nailgun_agent_ubuntu_bootstrap(environment):
    """Inject nailgun agent packet from review into ubuntu bootsrap
    environment - Environment Model object - self.env
    """
    logger.info("Update nailgun-agent code and assemble new ubuntu bootstrap")
    ssh = SSHManager()
    if not settings.UPDATE_FUEL:
        raise Exception("{} variable don't exist".format(settings.UPDATE_FUEL))
    pack_path = '/var/www/nailgun/nailgun-agent-review/'
    # Step 1 - install squashfs-tools
    cmd = "yum install -y squashfs-tools"
    ssh.execute_on_remote(ip=ssh.admin_ip, cmd=cmd)

    # Step 2 - unpack bootstrap
    bootstrap = "/var/www/nailgun/bootstraps/active_bootstrap"
    bootstrap_var = "/var/root.squashfs"

    cmd = "unsquashfs -d /var/root.squashfs {}/root.squashfs".format(bootstrap)
    ssh.execute_on_remote(ip=ssh.admin_ip, cmd=cmd)

    # Step 3 - replace nailgun-agent code in unpacked bootstrap
    agent_path = "/usr/bin/nailgun-agent"
    bootstrap_file = bootstrap + "/root.squashfs"
    logger.info('bootsrap file {0}{1}'.format(bootstrap_var, agent_path))
    old_sum = get_sha_sum('{0}{1}'.format(bootstrap_var, agent_path))
    logger.info('Old sum is {0}'.format(old_sum))
    cmd_etc_sync = ('rsync -r {1}etc/* {0}/etc/'.format(
        bootstrap_var, pack_path))
    ssh.execute_on_remote(ssh.admin_ip, cmd=cmd_etc_sync)
    cmd = ("rsync -r {1}usr/* {0}/usr/;"
           "mv {2} "
           "/var/root.squashfs.old;"
           "").format(bootstrap_var, pack_path, bootstrap_file)
    ssh.execute_on_remote(ip=ssh.admin_ip, cmd=cmd)
    new_sum = get_sha_sum('{0}{1}'.format(bootstrap_var, agent_path))
    logger.info('new sum is {0}'.format(new_sum))
    assert_equal(new_sum != old_sum, True)

    # Step 4 - assemble new bootstrap
    compression = "-comp xz"
    no_progress_bar = "-no-progress"
    no_append = "-noappend"
    image_rebuild = "mksquashfs {0} {1} {2} {3} {4}".format(
        bootstrap_var, bootstrap_file, compression, no_progress_bar, no_append)
    ssh.execute_on_remote(ip=ssh.admin_ip, cmd=image_rebuild)
    checkers.check_file_exists(ssh.admin_ip, bootstrap_file)
def check_service(ip, commands):
        """Check that required nova services are running on controller.

        :param ip: ip address of node
        :param commands: type list, nova commands to execute on controller,
                         example of commands:
                         ['nova-manage service list | grep vcenter-vmcluster1'
        """
        ssh_manager = SSHManager()
        ssh_manager.execute_on_remote(ip=ip, cmd='source openrc')

        for cmd in commands:
            wait(
                lambda:
                ':-)' in ssh_manager.execute_on_remote(ip=ip,
                                                      cmd=cmd)['stdout'][-1],
                timeout=200)
Beispiel #17
0
def patch_centos_bootstrap():
    """Replaced initramfs.img in /var/www/nailgun/
    with newly_builded from review
    environment - Environment Model object - self.env
    """
    logger.info("Update fuel-agent code and assemble new bootstrap")
    ssh = SSHManager()
    if not settings.UPDATE_FUEL:
        raise Exception("{} variable don't exist"
                        .format(settings.UPDATE_FUEL))
    try:
        pack_path = '/var/www/nailgun/fuel-agent-review/'
        ssh.upload_to_remote(
            ip=ssh.admin_ip,
            source=settings.FUEL_AGENT_REPO_PATH.rstrip('/'),
            target=pack_path)
        # Step 1 - unpack bootstrap
        bootstrap_var = "/var/initramfs"
        bootstrap = "/var/www/nailgun/bootstrap"
        cmd = ("mkdir {0}; cp /{1}/initramfs.img {0}/; cd {0}; "
               "cat initramfs.img | gunzip | cpio -imudv;").format(
            bootstrap_var, bootstrap)
        result = ssh.execute_on_remote(
            ip=ssh.admin_ip, cmd=cmd)['stdout_str']
        logger.debug("Patching bootsrap finishes with {0}".format(result))

        # Step 2 - replace fuel-agent code in unpacked bootstrap
        agent_path = "/usr/lib/python2.7/site-packages/fuel_agent"
        image_rebuild = "{} | {} | {}".format(
            "find . -xdev",
            "cpio --create --format='newc'",
            "gzip -9 > /var/initramfs.img.updated")

        cmd = ("rm -rf {0}/initramfs.img; "
               "rsync -r {2}fuel_agent/* {0}{1}/;"
               "cd {0}/;"
               "{3};").format(bootstrap_var, agent_path, pack_path,
                              image_rebuild)
        result = ssh.execute_on_remote(
            ip=ssh.admin_ip, cmd=cmd)['stdout_str']
        logger.debug("Failed to rebuild image with {0}".format(result))

    except Exception as e:
        logger.error("Could not upload package {e}".format(e=e))
        raise
Beispiel #18
0
def install_mos_repos():
    """
    Upload and install fuel-release packet with mos-repo description
    and install necessary packets for packetary Fuel installation
    :return: nothing
    """
    logger.info("upload fuel-release packet")
    if not settings.FUEL_RELEASE_PATH:
        raise exceptions.FuelQAVariableNotSet('FUEL_RELEASE_PATH', '/path')
    try:
        ssh = SSHManager()
        pack_path = '/tmp/'
        full_pack_path = os.path.join(pack_path,
                                      'fuel-release*.noarch.rpm')
        ssh.upload_to_remote(
            ip=ssh.admin_ip,
            source=settings.FUEL_RELEASE_PATH.rstrip('/'),
            target=pack_path)

        if settings.RPM_REPOS_YAML:
            with ssh.open_on_remote(
                    ip=ssh.admin_ip,
                    path='/etc/yum.repos.d/custom.repo',
                    mode="w") as f:
                f.write(generate_yum_repos_config(settings.RPM_REPOS_YAML))

        if settings.DEB_REPOS_YAML:
            ssh = SSHManager()
            pack_path = "/root/default_deb_repos.yaml"
            ssh.upload_to_remote(
                ip=ssh.admin_ip,
                source=settings.DEB_REPOS_YAML,
                target=pack_path)

    except Exception:
        logger.exception("Could not upload package")
        raise

    logger.debug("setup MOS repositories")
    cmd = "rpm -ivh {}".format(full_pack_path)
    ssh.execute_on_remote(ssh.admin_ip, cmd=cmd)

    cmd = "yum install -y fuel-setup"
    ssh.execute_on_remote(ssh.admin_ip, cmd=cmd)
Beispiel #19
0
def patch_and_assemble_ubuntu_bootstrap(environment):
    """Replaced initramfs.img in /var/www/nailgun/
    with newly_builded from review
    environment - Environment Model object - self.env
    """
    logger.info("Update fuel-agent code and assemble new ubuntu bootstrap")
    ssh = SSHManager()
    if not settings.UPDATE_FUEL:
        raise Exception("{} variable don't exist"
                        .format(settings.UPDATE_FUEL))
    try:
        pack_path = '/var/www/nailgun/fuel-agent-review/'
        ssh.upload_to_remote(
            ip=ssh.admin_ip,
            source=settings.FUEL_AGENT_REPO_PATH.rstrip('/'),
            target=pack_path)
        # renew code in bootstrap

        # Step 1 - install squashfs-tools
        cmd = "yum install -y squashfs-tools"
        ssh.execute_on_remote(ip=ssh.admin_ip, cmd=cmd)

        # Step 2 - unpack bootstrap
        bootstrap = "/var/www/nailgun/bootstraps/active_bootstrap"
        bootstrap_var = "/var/root.squashfs"

        cmd = "unsquashfs -d /var/root.squashfs {}/root.squashfs".format(
            bootstrap)
        ssh.execute_on_remote(ip=ssh.admin_ip, cmd=cmd)

        # Step 3 - replace fuel-agent code in unpacked bootstrap
        agent_path = "/usr/lib/python2.7/dist-packages/fuel_agent"
        bootstrap_file = bootstrap + "/root.squashfs"
        cmd = ("rsync -r {2}fuel_agent/* {0}{1}/;"
               "mv {3} /var/root.squashfs.old;"
               ).format(bootstrap_var, agent_path, pack_path, bootstrap_file)
        ssh.execute_on_remote(ip=ssh.admin_ip, cmd=cmd)

        # Step 4 - assemble new bootstrap
        compression = "-comp xz"
        no_progress_bar = "-no-progress"
        no_append = "-noappend"
        image_rebuild = "mksquashfs {0} {1} {2} {3} {4}".format(
            bootstrap_var,
            bootstrap_file,
            compression,
            no_progress_bar,
            no_append)
        ssh.execute_on_remote(ip=ssh.admin_ip, cmd=image_rebuild)
        with environment.d_env.get_admin_remote() as remote:
            checkers.check_file_exists(remote, '{0}'.format(bootstrap_file))
    except Exception as e:
        logger.error("Could not upload package {e}".format(e=e))
        raise
Beispiel #20
0
class BaseActions(object):
    """BaseActions."""  # TODO documentation

    def __init__(self):
        self.ssh_manager = SSHManager()
        self.admin_ip = self.ssh_manager.admin_ip

    def __repr__(self):
        klass, obj_id = type(self), hex(id(self))
        return "[{klass}({obj_id})]".format(
            klass=klass,
            obj_id=obj_id)

    def restart_service(self, service):
        self.ssh_manager.execute_on_remote(
            ip=self.admin_ip,
            cmd="systemctl restart {0}".format(service),
            err_msg="Failed to restart service {!r}, please inspect logs for "
                    "details".format(service))
class BaseActions(object):
    """BaseActions."""  # TODO documentation

    def __init__(self):
        self.ssh_manager = SSHManager()
        self.admin_ip = self.ssh_manager.admin_ip

    def __repr__(self):
        klass, obj_id = type(self), hex(id(self))
        return "[{klass}({obj_id})]".format(
            klass=klass,
            obj_id=obj_id)

    def restart_service(self, service):
        self.ssh_manager.execute_on_remote(
            ip=self.admin_ip,
            cmd="systemctl restart {0}".format(service),
            err_msg="Failed to restart service {!r}, please inspect logs for "
                    "details".format(service))
Beispiel #22
0
def replace_fuel_agent_rpm():
    """Replaced fuel_agent.rpm on master node with fuel_agent.rpm
    from review
    """
    ssh = SSHManager()
    logger.info("Patching fuel-agent")
    if not settings.UPDATE_FUEL:
        raise exceptions.FuelQAVariableNotSet('UPDATE_FUEL', 'True')
    try:
        pack_path = '/var/www/nailgun/fuel-agent/'
        full_pack_path = os.path.join(pack_path, 'fuel-agent*.noarch.rpm')
        ssh.upload_to_remote(
            ip=ssh.admin_ip,
            source=settings.UPDATE_FUEL_PATH.rstrip('/'),
            target=pack_path)

        # Update fuel-agent on master node
        cmd = "rpm -q fuel-agent"
        old_package = ssh.execute_on_remote(ssh.admin_ip, cmd)['stdout_str']
        cmd = "rpm -qp {0}".format(full_pack_path)
        new_package = ssh.execute_on_remote(ssh.admin_ip, cmd)['stdout_str']
        logger.info("Updating package {0} with {1}"
                    .format(old_package, new_package))

        if old_package != new_package:
            logger.info("Updating fuel-agent package on master node")
            logger.info('Try to install package {0}'.format(
                new_package))
            cmd = "rpm -Uvh --oldpackage {0}".format(full_pack_path)
            ssh.execute_on_remote(ssh.admin_ip, cmd)

            cmd = "rpm -q fuel-agent"
            installed_package = ssh.execute_on_remote(
                ssh.admin_ip, cmd)['stdout_str']

            assert_equal(installed_package, new_package,
                         "The new package {0} was not installed".
                         format(new_package))

    except Exception as e:
        logger.error("Could not upload package {e}".format(e=e))
        raise
Beispiel #23
0
def hiera_json_out(node_ip, parameter):
    hiera_cmd = "ruby -rhiera -rjson -e \"h = Hiera.new(); " \
                "Hiera.logger = 'noop'; " \
                "puts JSON.dump(h.lookup(\'{0}\', " \
                "[], {{}}, nil, nil))\"".format(parameter)
    ssh_manager = SSHManager()
    config = ssh_manager.execute_on_remote(
        ip=node_ip,
        cmd=hiera_cmd,
        jsonify=True,
        err_msg='Cannot get floating ranges')['stdout_json']
    return config
Beispiel #24
0
def hiera_json_out(node_ip, parameter):
    hiera_cmd = "ruby -rhiera -rjson -e \"h = Hiera.new(); " \
                "Hiera.logger = 'noop'; " \
                "puts JSON.dump(h.lookup(\'{0}\', " \
                "[], {{}}, nil, nil))\"".format(parameter)
    ssh_manager = SSHManager()
    config = ssh_manager.execute_on_remote(
        ip=node_ip,
        cmd=hiera_cmd,
        jsonify=True,
        err_msg='Cannot get floating ranges')['stdout_json']
    return config
Beispiel #25
0
def replace_fuel_agent_rpm():
    """Replaced fuel_agent.rpm on master node with fuel_agent.rpm
    from review
    """
    ssh = SSHManager()
    logger.info("Patching fuel-agent")
    if not settings.UPDATE_FUEL:
        raise exceptions.FuelQAVariableNotSet('UPDATE_FUEL', 'True')
    try:
        pack_path = '/var/www/nailgun/fuel-agent/'
        full_pack_path = os.path.join(pack_path, 'fuel-agent*.noarch.rpm')
        ssh.upload_to_remote(ip=ssh.admin_ip,
                             source=settings.UPDATE_FUEL_PATH.rstrip('/'),
                             target=pack_path)

        # Update fuel-agent on master node
        cmd = "rpm -q fuel-agent"
        old_package = ssh.execute_on_remote(ssh.admin_ip, cmd)['stdout_str']
        cmd = "rpm -qp {0}".format(full_pack_path)
        new_package = ssh.execute_on_remote(ssh.admin_ip, cmd)['stdout_str']
        logger.info("Updating package {0} with {1}".format(
            old_package, new_package))

        if old_package != new_package:
            logger.info("Updating fuel-agent package on master node")
            logger.info('Try to install package {0}'.format(new_package))
            cmd = "rpm -Uvh --oldpackage {0}".format(full_pack_path)
            ssh.execute_on_remote(ssh.admin_ip, cmd)

            cmd = "rpm -q fuel-agent"
            installed_package = ssh.execute_on_remote(ssh.admin_ip,
                                                      cmd)['stdout_str']

            assert_equal(
                installed_package, new_package,
                "The new package {0} was not installed".format(new_package))

    except Exception as e:
        logger.error("Could not upload package {e}".format(e=e))
        raise
Beispiel #26
0
def patch_and_assemble_ubuntu_bootstrap(environment):
    """Replaced initramfs.img in /var/www/nailgun/
    with newly_builded from review
    environment - Environment Model object - self.env
    """
    logger.info("Update fuel-agent code and assemble new ubuntu bootstrap")
    ssh = SSHManager()
    if not settings.UPDATE_FUEL:
        raise Exception("{} variable don't exist".format(settings.UPDATE_FUEL))
    try:
        pack_path = '/var/www/nailgun/fuel-agent-review/'
        ssh.upload_to_remote(ip=ssh.admin_ip,
                             source=settings.FUEL_AGENT_REPO_PATH.rstrip('/'),
                             target=pack_path)
        # renew code in bootstrap

        # Step 1 - install squashfs-tools
        cmd = "yum install -y squashfs-tools"
        ssh.execute_on_remote(ip=ssh.admin_ip, cmd=cmd)

        # Step 2 - unpack bootstrap
        bootstrap = "/var/www/nailgun/bootstraps/active_bootstrap"
        bootstrap_var = "/var/root.squashfs"

        cmd = "unsquashfs -d /var/root.squashfs {}/root.squashfs".format(
            bootstrap)
        ssh.execute_on_remote(ip=ssh.admin_ip, cmd=cmd)

        # Step 3 - replace fuel-agent code in unpacked bootstrap
        agent_path = "/usr/lib/python2.7/dist-packages/fuel_agent"
        bootstrap_file = bootstrap + "/root.squashfs"
        cmd = ("rsync -r {2}fuel_agent/* {0}{1}/;"
               "mv {3} /var/root.squashfs.old;").format(
                   bootstrap_var, agent_path, pack_path, bootstrap_file)
        ssh.execute_on_remote(ip=ssh.admin_ip, cmd=cmd)

        # Step 4 - assemble new bootstrap
        compression = "-comp xz"
        no_progress_bar = "-no-progress"
        no_append = "-noappend"
        image_rebuild = "mksquashfs {0} {1} {2} {3} {4}".format(
            bootstrap_var, bootstrap_file, compression, no_progress_bar,
            no_append)
        ssh.execute_on_remote(ip=ssh.admin_ip, cmd=image_rebuild)
        checkers.check_file_exists(ssh.admin_ip, '{0}'.format(bootstrap_file))
    except Exception as e:
        logger.error("Could not upload package {e}".format(e=e))
        raise
Beispiel #27
0
def replace_centos_bootstrap(environment):
    """Replaced initramfs.img in /var/www/nailgun/
    with re-builded with review code
    environment - Environment Model object - self.env
    """
    logger.info("Updating bootstrap")
    ssh = SSHManager()
    if not settings.UPDATE_FUEL:
        raise Exception("{} variable don't exist"
                        .format(settings.UPDATE_FUEL))
    rebuilded_bootstrap = '/var/initramfs.img.updated'
    with environment.d_env.get_admin_remote() as remote:
        checkers.check_file_exists(
            remote,
            '{0}'.format(rebuilded_bootstrap))
    logger.info("Assigning new bootstrap from {}".format(rebuilded_bootstrap))
    bootstrap = "/var/www/nailgun/bootstrap"
    cmd = ("mv {0}/initramfs.img /var/initramfs.img;"
           "cp /var/initramfs.img.updated {0}/initramfs.img;"
           "chmod +r {0}/initramfs.img;").format(bootstrap)
    ssh.execute_on_remote(ip=ssh.admin_ip, cmd=cmd)
    cmd = "cobbler sync"
    ssh.execute_on_remote(ip=ssh.admin_ip, cmd=cmd)
Beispiel #28
0
 def mcollective_nodes_online(self):
     nodes_uids = set([
         str(node['id']) for node in
         self.fuel_web.client.list_cluster_nodes(self.cluster_id)
     ])
     ssh_manager = SSHManager()
     out = ssh_manager.execute_on_remote(ip=ssh_manager.admin_ip,
                                         cmd='mco find',
                                         assert_ec_equal=[0,
                                                          1])['stdout_str']
     ready_nodes_uids = set(out.split('\n'))
     unavailable_nodes = nodes_uids - ready_nodes_uids
     logger.debug('Nodes {0} are not reacheable via'
                  ' mcollective'.format(unavailable_nodes))
     return not unavailable_nodes
 def mcollective_nodes_online(self):
     nodes_uids = set(
         [str(node['id']) for node in
          self.fuel_web.client.list_cluster_nodes(self.cluster_id)]
     )
     ssh_manager = SSHManager()
     out = ssh_manager.execute_on_remote(
         ip=ssh_manager.admin_ip,
         cmd='mco find',
         assert_ec_equal=[0, 1]
     )['stdout_str']
     ready_nodes_uids = set(out.split('\n'))
     unavailable_nodes = nodes_uids - ready_nodes_uids
     logger.debug('Nodes {0} are not reacheable via'
                  ' mcollective'.format(unavailable_nodes))
     return not unavailable_nodes
Beispiel #30
0
    def rebalance_swift_ring(controller_ip, retry_count=5, sleep=600):
        """Check Swift ring and rebalance it if needed.

        Replication should be performed on primary controller node.
        Retry check several times. Wait for replication due to LP1498368.
        """
        ssh = SSHManager()
        cmd = "/usr/local/bin/swift-rings-rebalance.sh"
        logger.debug('Check swift ring and rebalance it.')
        for _ in xrange(retry_count):
            try:
                checkers.check_swift_ring(controller_ip)
                break
            except AssertionError:
                result = ssh.execute_on_remote(ip=controller_ip, cmd=cmd)
                logger.debug("command execution result is {0}".format(result))
        else:
            checkers.check_swift_ring(controller_ip)
Beispiel #31
0
    def rebalance_swift_ring(controller_ip, retry_count=5, sleep=600):
        """Check Swift ring and rebalance it if needed.

        Replication should be performed on primary controller node.
        Retry check several times. Wait for replication due to LP1498368.
        """
        ssh = SSHManager()
        cmd = "/usr/local/bin/swift-rings-rebalance.sh"
        logger.debug('Check swift ring and rebalance it.')
        for _ in xrange(retry_count):
            try:
                checkers.check_swift_ring(controller_ip)
                break
            except AssertionError:
                result = ssh.execute_on_remote(ip=controller_ip, cmd=cmd)
                logger.debug("command execution result is {0}".format(result))
        else:
            checkers.check_swift_ring(controller_ip)
Beispiel #32
0
def check_package_version_injected_in_bootstraps(package,
                                                 cluster_id=None,
                                                 ironic=None):

    ssh = SSHManager()
    try:
        pack_path = '/var/www/nailgun/{}/'.format(package)
        ssh.upload_to_remote(ip=ssh.admin_ip,
                             source=settings.UPDATE_FUEL_PATH.rstrip('/'),
                             target=pack_path)
    except Exception:
        logger.exception("Could not upload package")
        raise

    # Step 1 - unpack active bootstrap
    logger.info("unpack active bootstrap")

    if ironic:
        bootstrap = "/var/www/nailgun/bootstrap/ironic/{}".format(cluster_id)
    else:
        bootstrap = "/var/www/nailgun/bootstraps/active_bootstrap"
    bootstrap_var = "/var/root.squashfs"

    cmd = "unsquashfs -d {} {}/root.squashfs".format(bootstrap_var, bootstrap)
    ssh.execute_on_remote(ip=ssh.admin_ip, cmd=cmd)

    # Step 2 - check package version
    logger.info("check package {} version injected in ubuntu bootstrap".format(
        package))

    cmd = "ls {}|grep {} |grep deb |cut -f 2 -d '_'".format(pack_path, package)

    package_from_review = ssh.execute_on_remote(ip=ssh.admin_ip,
                                                cmd=cmd)['stdout_str']

    logger.info("package from review is {}".format(package_from_review))

    awk_pattern = "awk '{print $2}'"
    cmd = "chroot {}/ /bin/bash -c \"dpkg -s {}\"|grep Version|{}".format(
        bootstrap_var, package, awk_pattern)
    installed_package = ssh.execute_on_remote(ip=ssh.admin_ip,
                                              cmd=cmd)['stdout_str']
    logger.info("injected package is {}".format(installed_package))

    assert_equal(
        installed_package, package_from_review,
        "The new package {0} wasn't injected in bootstrap".format(
            package_from_review))

    # Step 3 - remove unpacked bootstrap
    cmd = "rm -rf {}".format(bootstrap_var)
    ssh.execute_on_remote(ip=ssh.admin_ip, cmd=cmd)
Beispiel #33
0
class EnvironmentModel(object):
    """EnvironmentModel."""  # TODO documentation

    __metaclass__ = SingletonMeta

    def __init__(self, config=None):
        if not hasattr(self, "_virt_env"):
            self._virt_env = None
        if not hasattr(self, "_fuel_web"):
            self._fuel_web = None
        self._config = config
        self.ssh_manager = SSHManager()
        self.ssh_manager.initialize(
            self.get_admin_node_ip(),
            login=settings.SSH_CREDENTIALS["login"],
            password=settings.SSH_CREDENTIALS["password"],
        )
        self.admin_actions = AdminActions()
        self.base_actions = BaseActions()
        self.cobbler_actions = CobblerActions()
        self.nailgun_actions = NailgunActions()
        self.postgres_actions = PostgresActions()
        self.fuel_bootstrap_actions = FuelBootstrapCliActions()

    @property
    def fuel_web(self):
        if self._fuel_web is None:
            self._fuel_web = FuelWebClient(self)
        return self._fuel_web

    def __repr__(self):
        klass, obj_id = type(self), hex(id(self))
        if getattr(self, "_fuel_web"):
            ip = self.fuel_web.admin_node_ip
        else:
            ip = None
        return "[{klass}({obj_id}), ip:{ip}]".format(klass=klass, obj_id=obj_id, ip=ip)

    @property
    def admin_node_ip(self):
        return self.fuel_web.admin_node_ip

    @property
    def collector(self):
        return CollectorClient(settings.ANALYTICS_IP, "api/v1/json")

    @logwrap
    def add_syslog_server(self, cluster_id, port=5514):
        self.fuel_web.add_syslog_server(cluster_id, self.d_env.router(), port)

    def bootstrap_nodes(self, devops_nodes, timeout=settings.BOOTSTRAP_TIMEOUT, skip_timesync=False):
        """Lists registered nailgun nodes
        Start vms and wait until they are registered on nailgun.
        :rtype : List of registered nailgun nodes
        """
        # self.dhcrelay_check()

        for node in devops_nodes:
            logger.info("Bootstrapping node: {}".format(node.name))
            node.start()
            # TODO(aglarendil): LP#1317213 temporary sleep
            # remove after better fix is applied
            time.sleep(5)

        with TimeStat("wait_for_nodes_to_start_and_register_in_nailgun"):
            wait(lambda: all(self.nailgun_nodes(devops_nodes)), 15, timeout)

        if not skip_timesync:
            self.sync_time()
        return self.nailgun_nodes(devops_nodes)

    def sync_time(self, nodes_names=None, skip_sync=False):
        if nodes_names is None:
            roles = ["fuel_master", "fuel_slave"]
            nodes_names = [
                node.name for node in self.d_env.get_nodes() if node.role in roles and node.driver.node_active(node)
            ]
        logger.info(
            "Please wait while time on nodes: {0} " "will be synchronized".format(", ".join(sorted(nodes_names)))
        )
        new_time = sync_time(self.d_env, nodes_names, skip_sync)
        for name in sorted(new_time):
            logger.info("New time on '{0}' = {1}".format(name, new_time[name]))

    @logwrap
    def get_admin_node_ip(self):
        return str(self.d_env.nodes().admin.get_ip_address_by_network_name(self.d_env.admin_net))

    @logwrap
    def get_ebtables(self, cluster_id, devops_nodes):
        return Ebtables(self.get_target_devs(devops_nodes), self.fuel_web.client.get_cluster_vlans(cluster_id))

    def get_keys(self, node, custom=None, build_images=None, iso_connect_as="cdrom"):
        params = {
            "ks": "hd:LABEL=Mirantis_Fuel:/ks.cfg" if iso_connect_as == "usb" else "cdrom:/ks.cfg",
            "repo": "hd:LABEL=Mirantis_Fuel:/",  # only required for USB boot
            "ip": node.get_ip_address_by_network_name(self.d_env.admin_net),
            "mask": self.d_env.get_network(name=self.d_env.admin_net).ip.netmask,
            "gw": self.d_env.router(),
            "hostname": "".join((settings.FUEL_MASTER_HOSTNAME, settings.DNS_SUFFIX)),
            "nat_interface": self.d_env.nat_interface,
            "dns1": settings.DNS,
            "showmenu": "no",
            "wait_for_external_config": "yes",
            "build_images": "1" if build_images else "0",
        }
        if iso_connect_as == "usb":
            keys = (
                "<Wait>\n"  # USB boot uses boot_menu=yes for master node
                "<F12>\n"
                "2\n"
                "<Esc><Enter>\n"
                "<Wait>\n"
                "vmlinuz initrd=initrd.img ks=%(ks)s\n"
                " repo=%(repo)s\n"
                " ip=%(ip)s\n"
                " netmask=%(mask)s\n"
                " gw=%(gw)s\n"
                " dns1=%(dns1)s\n"
                " hostname=%(hostname)s\n"
                " dhcp_interface=%(nat_interface)s\n"
                " showmenu=%(showmenu)s\n"
                " wait_for_external_config=%(wait_for_external_config)s\n"
                " build_images=%(build_images)s\n"
                " <Enter>\n"
            ) % params
        else:  # cdrom case is default
            keys = (
                "<Wait>\n"
                "<Wait>\n"
                "<Wait>\n"
                "<Esc>\n"
                "<Wait>\n"
                "vmlinuz initrd=initrd.img ks=%(ks)s\n"
                " ip=%(ip)s\n"
                " netmask=%(mask)s\n"
                " gw=%(gw)s\n"
                " dns1=%(dns1)s\n"
                " hostname=%(hostname)s\n"
                " dhcp_interface=%(nat_interface)s\n"
                " showmenu=%(showmenu)s\n"
                " wait_for_external_config=%(wait_for_external_config)s\n"
                " build_images=%(build_images)s\n"
                " <Enter>\n"
            ) % params
        if MASTER_IS_CENTOS7:
            # CentOS 7 is pretty stable with admin iface.
            # TODO(akostrikov) add tests for menu items/kernel parameters
            # TODO(akostrikov) refactor it.
            iface = "enp0s3"
            if iso_connect_as == "usb":
                keys = (
                    "<Wait>\n"  # USB boot uses boot_menu=yes for master node
                    "<F12>\n"
                    "2\n"
                    "<Esc><Enter>\n"
                    "<Wait>\n"
                    "vmlinuz initrd=initrd.img ks=%(ks)s\n"
                    " repo=%(repo)s\n"
                    " ip=%(ip)s::%(gw)s:%(mask)s:%(hostname)s"
                    ":{iface}:off::: dns1=%(dns1)s"
                    " showmenu=%(showmenu)s\n"
                    " wait_for_external_config=%(wait_for_external_config)s\n"
                    " build_images=%(build_images)s\n"
                    " <Enter>\n".format(iface=iface)
                ) % params
            else:  # cdrom case is default
                keys = (
                    "<Wait>\n"
                    "<Wait>\n"
                    "<Wait>\n"
                    "<Esc>\n"
                    "<Wait>\n"
                    "vmlinuz initrd=initrd.img ks=%(ks)s\n"
                    " ip=%(ip)s::%(gw)s:%(mask)s:%(hostname)s"
                    ":{iface}:off::: dns1=%(dns1)s"
                    " showmenu=%(showmenu)s\n"
                    " wait_for_external_config=%(wait_for_external_config)s\n"
                    " build_images=%(build_images)s\n"
                    " <Enter>\n".format(iface=iface)
                ) % params
        return keys

    def get_target_devs(self, devops_nodes):
        return [
            interface.target_dev
            for interface in [val for var in map(lambda node: node.interfaces, devops_nodes) for val in var]
        ]

    @property
    def d_env(self):
        if self._virt_env is None:
            if not self._config:
                try:
                    return Environment.get(name=settings.ENV_NAME)
                except Exception:
                    self._virt_env = Environment.describe_environment(boot_from=settings.ADMIN_BOOT_DEVICE)
                    self._virt_env.define()
            else:
                try:
                    return Environment.get(name=self._config["template"]["devops_settings"]["env_name"])
                except Exception:
                    self._virt_env = Environment.create_environment(full_config=self._config)
                    self._virt_env.define()
        return self._virt_env

    def resume_environment(self):
        self.d_env.resume()
        admin = self.d_env.nodes().admin

        try:
            admin.await(self.d_env.admin_net, timeout=30, by_port=8000)
        except Exception as e:
            logger.warning("From first time admin isn't reverted: " "{0}".format(e))
            admin.destroy()
            logger.info("Admin node was destroyed. Wait 10 sec.")
            time.sleep(10)

            admin.start()
            logger.info("Admin node started second time.")
            self.d_env.nodes().admin.await(self.d_env.admin_net)
            self.set_admin_ssh_password()
            self.admin_actions.wait_for_fuel_ready(timeout=600)

            # set collector address in case of admin node destroy
            if settings.FUEL_STATS_ENABLED:
                self.nailgun_actions.set_collector_address(
                    settings.FUEL_STATS_HOST, settings.FUEL_STATS_PORT, settings.FUEL_STATS_SSL
                )
                # Restart statsenderd in order to apply new collector address
                self.nailgun_actions.force_fuel_stats_sending()
                self.fuel_web.client.send_fuel_stats(enabled=True)
                logger.info(
                    "Enabled sending of statistics to {0}:{1}".format(
                        settings.FUEL_STATS_HOST, settings.FUEL_STATS_PORT
                    )
                )
        self.set_admin_ssh_password()
        self.admin_actions.wait_for_fuel_ready()

    def make_snapshot(self, snapshot_name, description="", is_make=False):
        if settings.MAKE_SNAPSHOT or is_make:
            self.d_env.suspend(verbose=False)
            time.sleep(10)

            self.d_env.snapshot(snapshot_name, force=True, description=description)
            revert_info(snapshot_name, self.get_admin_node_ip(), description)

        if settings.FUEL_STATS_CHECK:
            self.resume_environment()

    def nailgun_nodes(self, devops_nodes):
        return map(lambda node: self.fuel_web.get_nailgun_node_by_devops_node(node), devops_nodes)

    def check_slaves_are_ready(self):
        devops_nodes = [node for node in self.d_env.nodes().slaves if node.driver.node_active(node)]
        # Bug: 1455753
        time.sleep(30)

        for node in devops_nodes:
            try:
                wait(lambda: self.fuel_web.get_nailgun_node_by_devops_node(node)["online"], timeout=60 * 6)
            except TimeoutError:
                raise TimeoutError("Node {0} does not become online".format(node.name))
        return True

    def revert_snapshot(self, name, skip_timesync=False):
        if not self.d_env.has_snapshot(name):
            return False

        logger.info("We have snapshot with such name: %s" % name)

        logger.info("Reverting the snapshot '{0}' ....".format(name))
        self.d_env.revert(name)

        logger.info("Resuming the snapshot '{0}' ....".format(name))
        self.resume_environment()

        if not skip_timesync:
            self.sync_time()
        try:
            _wait(self.fuel_web.client.get_releases, expected=EnvironmentError, timeout=300)
        except exceptions.Unauthorized:
            self.set_admin_keystone_password()
            self.fuel_web.get_nailgun_version()

        _wait(lambda: self.check_slaves_are_ready(), timeout=60 * 6)
        return True

    def set_admin_ssh_password(self):
        new_login = settings.SSH_CREDENTIALS["login"]
        new_password = settings.SSH_CREDENTIALS["password"]
        try:
            self.ssh_manager.execute_on_remote(ip=self.ssh_manager.admin_ip, cmd="date")
            logger.debug("Accessing admin node using SSH: SUCCESS")
        except Exception:
            logger.debug("Accessing admin node using SSH credentials:" " FAIL, trying to change password from default")
            self.ssh_manager.initialize(admin_ip=self.ssh_manager.admin_ip, login="******", password="******")
            self.ssh_manager.execute_on_remote(
                ip=self.ssh_manager.admin_ip, cmd='echo -e "{1}\\n{1}" | passwd {0}'.format(new_login, new_password)
            )
            self.ssh_manager.initialize(admin_ip=self.ssh_manager.admin_ip, login=new_login, password=new_password)
            self.ssh_manager.update_connection(ip=self.ssh_manager.admin_ip, login=new_login, password=new_password)
            logger.debug("Admin node password has changed.")
        logger.info("Admin node login name: '{0}' , password: '******'".format(new_login, new_password))

    def set_admin_keystone_password(self):
        try:
            self.fuel_web.client.get_releases()
        # TODO(akostrikov) CENTOS7 except exceptions.Unauthorized:
        except:
            self.ssh_manager.execute_on_remote(
                ip=self.ssh_manager.admin_ip,
                cmd="fuel user --newpass {0} --change-password".format(settings.KEYSTONE_CREDS["password"]),
            )
            logger.info(
                'New Fuel UI (keystone) username: "******", password: "******"'.format(
                    settings.KEYSTONE_CREDS["username"], settings.KEYSTONE_CREDS["password"]
                )
            )

    def insert_cdrom_tray(self):
        # This is very rude implementation and it SHOULD be changes after
        # implementation this feature in fuel-devops
        name = "{}_{}".format(settings.ENV_NAME, self.d_env.nodes().admin.name)
        NAME_SIZE = 80
        if len(name) > NAME_SIZE:
            hash_str = str(hash(name))
            name = (hash_str + name)[:NAME_SIZE]

        cmd = """EDITOR="sed -i s/tray=\\'open\\'//" virsh edit {}""".format(name)
        subprocess.check_call(cmd, shell=True)

    def reinstall_master_node(self):
        """Erase boot sector and run setup_environment"""
        with self.d_env.get_admin_remote() as remote:
            erase_data_from_hdd(remote, mount_point="/boot")
            remote.execute("/sbin/shutdown")
        self.d_env.nodes().admin.destroy()
        self.insert_cdrom_tray()
        self.setup_environment()

    def setup_environment(
        self,
        custom=settings.CUSTOM_ENV,
        build_images=settings.BUILD_IMAGES,
        iso_connect_as=settings.ADMIN_BOOT_DEVICE,
        security=settings.SECURITY_TEST,
    ):
        # start admin node
        admin = self.d_env.nodes().admin
        if iso_connect_as == "usb":
            admin.disk_devices.get(device="disk", bus="usb").volume.upload(settings.ISO_PATH)
        else:  # cdrom is default
            admin.disk_devices.get(device="cdrom").volume.upload(settings.ISO_PATH)
        self.d_env.start(self.d_env.nodes().admins)
        logger.info("Waiting for admin node to start up")
        wait(lambda: admin.driver.node_active(admin), 60)
        logger.info("Proceed with installation")
        # update network parameters at boot screen
        admin.send_keys(self.get_keys(admin, custom=custom, build_images=build_images, iso_connect_as=iso_connect_as))
        self.wait_for_provisioning()
        self.set_admin_ssh_password()
        self.wait_for_external_config()
        if custom:
            self.setup_customisation()
        if security:
            nessus_node = NessusActions(self.d_env)
            nessus_node.add_nessus_node()
        # wait while installation complete

        self.admin_actions.modify_configs(self.d_env.router())
        self.kill_wait_for_external_config()
        self.wait_bootstrap()

        if settings.UPDATE_FUEL:
            # Update Ubuntu packages
            self.admin_actions.upload_packages(
                local_packages_dir=settings.UPDATE_FUEL_PATH,
                centos_repo_path=None,
                ubuntu_repo_path=settings.LOCAL_MIRROR_UBUNTU,
            )

        self.admin_actions.wait_for_fuel_ready()
        time.sleep(10)
        self.set_admin_keystone_password()
        self.sync_time(["admin"])
        if settings.UPDATE_MASTER:
            if settings.UPDATE_FUEL_MIRROR:
                for i, url in enumerate(settings.UPDATE_FUEL_MIRROR):
                    conf_file = "/etc/yum.repos.d/temporary-{}.repo".format(i)
                    cmd = (
                        "echo -e"
                        " '[temporary-{0}]\nname="
                        "temporary-{0}\nbaseurl={1}/"
                        "\ngpgcheck=0\npriority="
                        "1' > {2}"
                    ).format(i, url, conf_file)

                    self.ssh_manager.execute(ip=self.ssh_manager.admin_ip, cmd=cmd)
            self.admin_install_updates()
        if settings.MULTIPLE_NETWORKS:
            self.describe_other_admin_interfaces(admin)
        if not MASTER_IS_CENTOS7:
            self.nailgun_actions.set_collector_address(
                settings.FUEL_STATS_HOST, settings.FUEL_STATS_PORT, settings.FUEL_STATS_SSL
            )
            # Restart statsenderd to apply settings(Collector address)
            self.nailgun_actions.force_fuel_stats_sending()
        if settings.FUEL_STATS_ENABLED and not MASTER_IS_CENTOS7:
            self.fuel_web.client.send_fuel_stats(enabled=True)
            logger.info(
                "Enabled sending of statistics to {0}:{1}".format(settings.FUEL_STATS_HOST, settings.FUEL_STATS_PORT)
            )
        if settings.PATCHING_DISABLE_UPDATES:
            cmd = (
                "find /etc/yum.repos.d/ -type f -regextype posix-egrep"
                " -regex '.*/mos[0-9,\.]+\-(updates|security).repo' | "
                "xargs -n1 -i sed '$aenabled=0' -i {}"
            )
            self.ssh_manager.execute_on_remote(ip=self.ssh_manager.admin_ip, cmd=cmd)

    @update_rpm_packages
    @upload_manifests
    def setup_customisation(self):
        logger.info("Installing custom packages/manifests " "before master node bootstrap...")

    @logwrap
    def wait_for_provisioning(self, timeout=settings.WAIT_FOR_PROVISIONING_TIMEOUT):
        _wait(
            lambda: _tcp_ping(self.d_env.nodes().admin.get_ip_address_by_network_name(self.d_env.admin_net), 22),
            timeout=timeout,
        )

    @logwrap
    def wait_for_external_config(self, timeout=120):
        check_cmd = "pkill -0 -f wait_for_external_config"

        if MASTER_IS_CENTOS7:
            self.ssh_manager.execute(ip=self.ssh_manager.admin_ip, cmd=check_cmd)
        else:
            wait(
                lambda: self.ssh_manager.execute(ip=self.ssh_manager.admin_ip, cmd=check_cmd)["exit_code"] == 0,
                timeout=timeout,
            )

    @logwrap
    def kill_wait_for_external_config(self):
        kill_cmd = 'pkill -f "^wait_for_external_config"'
        check_cmd = 'pkill -0 -f "^wait_for_external_config"; [[ $? -eq 1 ]]'
        self.ssh_manager.execute_on_remote(ip=self.ssh_manager.admin_ip, cmd=kill_cmd)
        self.ssh_manager.execute_on_remote(ip=self.ssh_manager.admin_ip, cmd=check_cmd)

    def wait_bootstrap(self):
        logger.info("Waiting while bootstrapping is in progress")
        log_path = "/var/log/puppet/bootstrap_admin_node.log"
        logger.info("Puppet timeout set in {0}".format(float(settings.PUPPET_TIMEOUT)))
        with self.d_env.get_admin_remote() as admin_remote:
            wait(
                lambda: not admin_remote.execute("grep 'Fuel node deployment' '%s'" % log_path)["exit_code"],
                timeout=(float(settings.PUPPET_TIMEOUT)),
            )
            result = admin_remote.execute("grep 'Fuel node deployment " "complete' '%s'" % log_path)["exit_code"]
        if result != 0:
            raise Exception("Fuel node deployment failed.")
        self.bootstrap_image_check()

    def dhcrelay_check(self):
        # CentOS 7 is pretty stable with admin iface.
        # TODO(akostrikov) refactor it.
        iface = "enp0s3"
        command = "dhcpcheck discover " "--ifaces {iface} " "--repeat 3 " "--timeout 10".format(iface=iface)

        out = self.ssh_manager.execute(ip=self.ssh_manager.admin_ip, cmd=command)["stdout"]

        assert_true(self.get_admin_node_ip() in "".join(out), "dhcpcheck doesn't discover master ip")

    def bootstrap_image_check(self):
        fuel_settings = self.admin_actions.get_fuel_settings()
        if fuel_settings["BOOTSTRAP"]["flavor"].lower() != "ubuntu":
            logger.warning("Default image for bootstrap " "is not based on Ubuntu!")
            return

        bootstrap_images = self.ssh_manager.execute_on_remote(
            ip=self.ssh_manager.admin_ip, cmd="fuel-bootstrap --quiet list"
        )["stdout"]
        assert_true(
            any("active" in line for line in bootstrap_images),
            "Ubuntu bootstrap image wasn't built and activated! "
            "See logs in /var/log/fuel-bootstrap-image-build.log "
            "for details.",
        )

    def admin_install_pkg(self, pkg_name):
        """Install a package <pkg_name> on the admin node"""
        remote_status = self.ssh_manager.execute(ip=self.ssh_manager.admin_ip, cmd="rpm -q {0}'".format(pkg_name))
        if remote_status["exit_code"] == 0:
            logger.info("Package '{0}' already installed.".format(pkg_name))
        else:
            logger.info("Installing package '{0}' ...".format(pkg_name))
            remote_status = self.ssh_manager.execute(
                ip=self.ssh_manager.admin_ip, cmd="yum -y install {0}".format(pkg_name)
            )
            logger.info(
                "Installation of the package '{0}' has been"
                " completed with exit code {1}".format(pkg_name, remote_status["exit_code"])
            )
        return remote_status["exit_code"]

    def admin_run_service(self, service_name):
        """Start a service <service_name> on the admin node"""

        self.ssh_manager.execute(ip=self.ssh_manager.admin_ip, cmd="service {0} start".format(service_name))
        remote_status = self.ssh_manager.execute(
            ip=self.ssh_manager.admin_ip, cmd="service {0} status".format(service_name)
        )
        if any("running..." in status for status in remote_status["stdout"]):
            logger.info("Service '{0}' is running".format(service_name))
        else:
            logger.info(
                "Service '{0}' failed to start"
                " with exit code {1} :\n{2}".format(service_name, remote_status["exit_code"], remote_status["stdout"])
            )

    # Execute yum updates
    # If updates installed,
    # then `bootstrap_admin_node.sh;`
    def admin_install_updates(self):
        logger.info("Searching for updates..")
        update_command = "yum clean expire-cache; yum update -y"

        update_result = self.ssh_manager.execute(ip=self.ssh_manager.admin_ip, cmd=update_command)

        logger.info('Result of "{1}" command on master node: ' "{0}".format(update_result, update_command))
        assert_equal(int(update_result["exit_code"]), 0, "Packages update failed, " "inspect logs for details")

        # Check if any packets were updated and update was successful
        for str_line in update_result["stdout"]:
            match_updated_count = re.search("Upgrade(?:\s*)(\d+).*Package", str_line)
            if match_updated_count:
                updates_count = match_updated_count.group(1)
            match_complete_message = re.search("(Complete!)", str_line)
            match_no_updates = re.search("No Packages marked for Update", str_line)

        if (not match_updated_count or match_no_updates) and not match_complete_message:
            logger.warning("No updates were found or update was incomplete.")
            return
        logger.info("{0} packet(s) were updated".format(updates_count))

        cmd = "bootstrap_admin_node.sh;"

        result = self.ssh_manager.execute(ip=self.ssh_manager.admin_ip, cmd=cmd)
        logger.info('Result of "{1}" command on master node: ' "{0}".format(result, cmd))
        assert_equal(int(result["exit_code"]), 0, "bootstrap failed, " "inspect logs for details")

    # Modifies a resolv.conf on the Fuel master node and returns
    # its original content.
    # * adds 'nameservers' at start of resolv.conf if merge=True
    # * replaces resolv.conf with 'nameservers' if merge=False
    def modify_resolv_conf(self, nameservers=None, merge=True):
        if nameservers is None:
            nameservers = []

        resolv_conf = self.ssh_manager.execute(ip=self.ssh_manager.admin_ip, cmd="cat /etc/resolv.conf")
        assert_equal(
            0,
            resolv_conf["exit_code"],
            'Executing "{0}" on the admin node has failed with: {1}'.format(
                "cat /etc/resolv.conf", resolv_conf["stderr"]
            ),
        )
        if merge:
            nameservers.extend(resolv_conf["stdout"])
        resolv_keys = ["search", "domain", "nameserver"]
        resolv_new = "".join("{0}\n".format(ns) for ns in nameservers if any(x in ns for x in resolv_keys))
        logger.debug('echo "{0}" > /etc/resolv.conf'.format(resolv_new))
        echo_cmd = 'echo "{0}" > /etc/resolv.conf'.format(resolv_new)
        echo_result = self.ssh_manager.execute(ip=self.ssh_manager.admin_ip, cmd=echo_cmd)
        assert_equal(
            0,
            echo_result["exit_code"],
            'Executing "{0}" on the admin node has failed with: {1}'.format(echo_cmd, echo_result["stderr"]),
        )
        return resolv_conf["stdout"]

    @logwrap
    def execute_remote_cmd(self, remote, cmd, exit_code=0):
        result = remote.execute(cmd)
        assert_equal(result["exit_code"], exit_code, 'Failed to execute "{0}" on remote host: {1}'.format(cmd, result))
        return result["stdout"]

    @logwrap
    def describe_other_admin_interfaces(self, admin):
        admin_networks = [iface.network.name for iface in admin.interfaces]
        iface_name = None
        for i, network_name in enumerate(admin_networks):
            if "admin" in network_name and "admin" != network_name:
                # This will be replaced with actual interface labels
                # form fuel-devops
                iface_name = "enp0s" + str(i + 3)
                logger.info(
                    "Describe Fuel admin node interface {0} for " "network {1}".format(iface_name, network_name)
                )
                self.describe_admin_interface(iface_name, network_name)

        if iface_name:
            return self.ssh_manager.execute(ip=self.ssh_manager.admin_ip, cmd="cobbler sync")

    @logwrap
    def describe_admin_interface(self, admin_if, network_name):
        admin_net_object = self.d_env.get_network(name=network_name)
        admin_network = admin_net_object.ip.network
        admin_netmask = admin_net_object.ip.netmask
        admin_ip = str(self.d_env.nodes().admin.get_ip_address_by_network_name(network_name))
        logger.info(
            (
                "Parameters for admin interface configuration: "
                "Network - {0}, Netmask - {1}, Interface - {2}, "
                "IP Address - {3}"
            ).format(admin_network, admin_netmask, admin_if, admin_ip)
        )
        add_admin_ip = (
            "DEVICE={0}\\n"
            "ONBOOT=yes\\n"
            "NM_CONTROLLED=no\\n"
            "USERCTL=no\\n"
            "PEERDNS=no\\n"
            "BOOTPROTO=static\\n"
            "IPADDR={1}\\n"
            "NETMASK={2}\\n"
        ).format(admin_if, admin_ip, admin_netmask)
        cmd = (
            'echo -e "{0}" > /etc/sysconfig/network-scripts/ifcfg-{1};' "ifup {1}; ip -o -4 a s {1} | grep -w {2}"
        ).format(add_admin_ip, admin_if, admin_ip)
        logger.debug("Trying to assign {0} IP to the {1} on master node...".format(admin_ip, admin_if))

        result = self.ssh_manager.execute(ip=self.ssh_manager.admin_ip, cmd=cmd)
        assert_equal(
            result["exit_code"], 0, ("Failed to assign second admin " "IP address on master node: {0}").format(result)
        )
        logger.debug("Done: {0}".format(result["stdout"]))

        # TODO for ssh manager
        multiple_networks_hacks.configure_second_admin_dhcp(self.ssh_manager.admin_ip, admin_if)
        multiple_networks_hacks.configure_second_admin_firewall(
            self.ssh_manager.admin_ip, admin_network, admin_netmask, admin_if, self.get_admin_node_ip()
        )

    @logwrap
    def get_masternode_uuid(self):
        return self.postgres_actions.run_query(
            db="nailgun", query="select master_node_uid from master_node_settings limit 1;"
        )
class EnvironmentModel(six.with_metaclass(SingletonMeta, object)):
    """EnvironmentModel."""  # TODO documentation

    def __init__(self, config=None):
        if not hasattr(self, "_virt_env"):
            self._virt_env = None
        if not hasattr(self, "_fuel_web"):
            self._fuel_web = None
        self._config = config
        self.ssh_manager = SSHManager()
        self.ssh_manager.initialize(
            self.get_admin_node_ip(),
            admin_login=settings.SSH_FUEL_CREDENTIALS["login"],
            admin_password=settings.SSH_FUEL_CREDENTIALS["password"],
            slave_login=settings.SSH_SLAVE_CREDENTIALS["login"],
            slave_password=settings.SSH_SLAVE_CREDENTIALS["password"],
        )
        self.admin_actions = AdminActions()
        self.base_actions = BaseActions()
        self.cobbler_actions = CobblerActions()
        self.nailgun_actions = NailgunActions()
        self.postgres_actions = PostgresActions()
        self.fuel_bootstrap_actions = FuelBootstrapCliActions()

    @property
    def fuel_web(self):
        if self._fuel_web is None:
            self._fuel_web = FuelWebClient(self)
        return self._fuel_web

    def __repr__(self):
        klass, obj_id = type(self), hex(id(self))
        if getattr(self, "_fuel_web"):
            ip = self.fuel_web.admin_node_ip
        else:
            ip = None
        return "[{klass}({obj_id}), ip:{ip}]".format(klass=klass, obj_id=obj_id, ip=ip)

    @property
    def admin_node_ip(self):
        return self.fuel_web.admin_node_ip

    @property
    def collector(self):
        return CollectorClient(settings.ANALYTICS_IP, "api/v1/json")

    @logwrap
    def add_syslog_server(self, cluster_id, port=5514):
        self.fuel_web.add_syslog_server(cluster_id, self.d_env.router(), port)

    def bootstrap_nodes(self, devops_nodes, timeout=settings.BOOTSTRAP_TIMEOUT, skip_timesync=False):
        """Lists registered nailgun nodes
        Start vms and wait until they are registered on nailgun.
        :rtype : List of registered nailgun nodes
        """
        # self.dhcrelay_check()

        for node in devops_nodes:
            logger.info("Bootstrapping node: {}".format(node.name))
            node.start()
            # TODO(aglarendil): LP#1317213 temporary sleep
            # remove after better fix is applied
            time.sleep(5)

        with TimeStat("wait_for_nodes_to_start_and_register_in_nailgun"):
            wait(
                lambda: all(self.nailgun_nodes(devops_nodes)),
                15,
                timeout,
                timeout_msg="Bootstrap timeout for nodes: {}" "".format([node.name for node in devops_nodes]),
            )

        if not skip_timesync:
            self.sync_time()
        return self.nailgun_nodes(devops_nodes)

    def sync_time(self, nodes_names=None, skip_sync=False):
        if nodes_names is None:
            roles = ["fuel_master", "fuel_slave"]
            nodes_names = [
                node.name for node in self.d_env.get_nodes() if node.role in roles and node.driver.node_active(node)
            ]
        logger.info(
            "Please wait while time on nodes: {0} " "will be synchronized".format(", ".join(sorted(nodes_names)))
        )
        new_time = sync_time(self.d_env, nodes_names, skip_sync)
        for name in sorted(new_time):
            logger.info("New time on '{0}' = {1}".format(name, new_time[name]))

    @logwrap
    def get_admin_node_ip(self):
        return str(self.d_env.nodes().admin.get_ip_address_by_network_name(self.d_env.admin_net))

    @logwrap
    def get_ebtables(self, cluster_id, devops_nodes):
        return Ebtables(self.get_target_devs(devops_nodes), self.fuel_web.client.get_cluster_vlans(cluster_id))

    def get_keys(self, node, custom=None, build_images=None, iso_connect_as="cdrom"):
        params = {
            "device_label": settings.ISO_LABEL,
            "iface": iface_alias("eth0"),
            "ip": node.get_ip_address_by_network_name(self.d_env.admin_net),
            "mask": self.d_env.get_network(name=self.d_env.admin_net).ip.netmask,
            "gw": self.d_env.router(),
            "hostname": "".join((settings.FUEL_MASTER_HOSTNAME, settings.DNS_SUFFIX)),
            "nat_interface": self.d_env.nat_interface,
            "nameserver": settings.DNS,
            "showmenu": "yes" if settings.SHOW_FUELMENU else "no",
            "wait_for_external_config": "yes",
            "build_images": "1" if build_images else "0",
            "MASTER_NODE_EXTRA_PACKAGES": settings.MASTER_NODE_EXTRA_PACKAGES,
        }
        # TODO(akostrikov) add tests for menu items/kernel parameters
        # TODO(akostrikov) refactor it.
        if iso_connect_as == "usb":
            keys = "<Wait>\n" "<F12>\n" "2\n"  # USB boot uses boot_menu=yes for master node
        else:  # cdrom is default
            keys = "<Wait>\n" "<Wait>\n" "<Wait>\n"

        keys += (
            "<Esc>\n"
            "<Wait>\n"
            "vmlinuz initrd=initrd.img"
            " inst.ks=cdrom:LABEL=%(device_label)s:/ks.cfg"
            " inst.repo=cdrom:LABEL=%(device_label)s:/"
            " ip=%(ip)s::%(gw)s:%(mask)s:%(hostname)s"
            ":%(iface)s:off::: nameserver=%(nameserver)s"
            " showmenu=%(showmenu)s\n"
            " wait_for_external_config=%(wait_for_external_config)s"
            " build_images=%(build_images)s\n"
            " MASTER_NODE_EXTRA_PACKAGES='%(MASTER_NODE_EXTRA_PACKAGES)s'\n"
            " <Enter>\n"
        ) % params
        return keys

    @staticmethod
    def get_target_devs(devops_nodes):
        return [
            interface.target_dev
            for interface in [val for var in map(lambda node: node.interfaces, devops_nodes) for val in var]
        ]

    @property
    def d_env(self):
        if self._virt_env is None:
            if not self._config:
                try:
                    return Environment.get(name=settings.ENV_NAME)
                except Exception:
                    self._virt_env = Environment.describe_environment(boot_from=settings.ADMIN_BOOT_DEVICE)
                    self._virt_env.define()
            else:
                try:
                    return Environment.get(name=self._config["template"]["devops_settings"]["env_name"])
                except Exception:
                    self._virt_env = Environment.create_environment(full_config=self._config)
                    self._virt_env.define()
        return self._virt_env

    def resume_environment(self):
        self.d_env.resume()
        admin = self.d_env.nodes().admin

        self.ssh_manager.clean_all_connections()

        try:
            admin.await(self.d_env.admin_net, timeout=30, by_port=8000)
        except Exception as e:
            logger.warning("From first time admin isn't reverted: " "{0}".format(e))
            admin.destroy()
            logger.info("Admin node was destroyed. Wait 10 sec.")
            time.sleep(10)

            admin.start()
            logger.info("Admin node started second time.")
            self.d_env.nodes().admin.await(self.d_env.admin_net)
            self.set_admin_ssh_password()
            self.admin_actions.wait_for_fuel_ready(timeout=600)

            # set collector address in case of admin node destroy
            if settings.FUEL_STATS_ENABLED:
                self.nailgun_actions.set_collector_address(
                    settings.FUEL_STATS_HOST, settings.FUEL_STATS_PORT, settings.FUEL_STATS_SSL
                )
                # Restart statsenderd in order to apply new collector address
                self.nailgun_actions.force_fuel_stats_sending()
                self.fuel_web.client.send_fuel_stats(enabled=True)
                logger.info(
                    "Enabled sending of statistics to {0}:{1}".format(
                        settings.FUEL_STATS_HOST, settings.FUEL_STATS_PORT
                    )
                )
        self.set_admin_ssh_password()
        self.admin_actions.wait_for_fuel_ready()

    def make_snapshot(self, snapshot_name, description="", is_make=False):
        if settings.MAKE_SNAPSHOT or is_make:
            self.d_env.suspend()
            time.sleep(10)

            self.d_env.snapshot(snapshot_name, force=True, description=description)
            revert_info(snapshot_name, self.get_admin_node_ip(), description)

        if settings.FUEL_STATS_CHECK:
            self.resume_environment()

    def nailgun_nodes(self, devops_nodes):
        return [self.fuel_web.get_nailgun_node_by_devops_node(node) for node in devops_nodes]

    def check_slaves_are_ready(self):
        devops_nodes = [node for node in self.d_env.nodes().slaves if node.driver.node_active(node)]
        # Bug: 1455753
        time.sleep(30)

        self.fuel_web.wait_nodes_get_online_state(devops_nodes, timeout=60 * 6)
        return True

    def revert_snapshot(self, name, skip_timesync=False, skip_slaves_check=False):
        if not self.d_env.has_snapshot(name):
            return False

        logger.info("We have snapshot with such name: {:s}".format(name))

        logger.info("Reverting the snapshot '{0}' ....".format(name))
        self.d_env.revert(name)

        logger.info("Resuming the snapshot '{0}' ....".format(name))
        self.resume_environment()

        if not skip_timesync:
            self.sync_time()
        else:
            self.sync_time(["admin"])
        try:
            with QuietLogger(upper_log_level=logging.CRITICAL):
                # TODO(astudenov): add timeout_msg
                wait_pass(
                    self.fuel_web.client.get_releases,
                    expected=(exceptions.RetriableConnectionFailure, exceptions.UnknownConnectionError),
                    timeout=300,
                )
        except exceptions.Unauthorized:
            self.set_admin_keystone_password()
            self.fuel_web.get_nailgun_version()

        if not skip_slaves_check:
            # TODO(astudenov): add timeout_msg
            wait_pass(lambda: self.check_slaves_are_ready(), timeout=60 * 6)
        return True

    def set_admin_ssh_password(self):
        new_login = settings.SSH_FUEL_CREDENTIALS["login"]
        new_password = settings.SSH_FUEL_CREDENTIALS["password"]
        try:
            self.ssh_manager.execute_on_remote(ip=self.ssh_manager.admin_ip, cmd="date")
            logger.debug("Accessing admin node using SSH: SUCCESS")
        except Exception:
            logger.debug("Accessing admin node using SSH credentials:" " FAIL, trying to change password from default")
            self.ssh_manager.initialize(
                admin_ip=self.ssh_manager.admin_ip,
                admin_login="******",
                admin_password="******",
                slave_login=settings.SSH_SLAVE_CREDENTIALS["login"],
                slave_password=settings.SSH_SLAVE_CREDENTIALS["password"],
            )
            self.ssh_manager.execute_on_remote(
                ip=self.ssh_manager.admin_ip, cmd='echo -e "{1}\\n{1}" | passwd {0}'.format(new_login, new_password)
            )
            self.ssh_manager.initialize(
                admin_ip=self.ssh_manager.admin_ip,
                admin_login=new_login,
                admin_password=new_password,
                slave_login=settings.SSH_SLAVE_CREDENTIALS["login"],
                slave_password=settings.SSH_SLAVE_CREDENTIALS["password"],
            )
            self.ssh_manager.update_connection(ip=self.ssh_manager.admin_ip, login=new_login, password=new_password)
            logger.debug("Admin node password has changed.")
        logger.info("Admin node login name: '{0}' , password: '******'".format(new_login, new_password))

    def set_admin_keystone_password(self):
        try:
            self.fuel_web.client.get_releases()
        # TODO(akostrikov) CENTOS7 except exceptions.Unauthorized:
        except:
            self.ssh_manager.execute_on_remote(
                ip=self.ssh_manager.admin_ip,
                cmd="fuel user --newpass {0} --change-password".format(settings.KEYSTONE_CREDS["password"]),
            )
            config_file = self.ssh_manager.execute_on_remote(
                ip=self.ssh_manager.admin_ip, cmd="ls -1 $HOME/.config/fuel/fuel_client.yaml"
            )["stdout_str"]

            with YamlEditor(config_file, ip=self.admin_node_ip) as editor:
                editor.content["OS_USERNAME"] = settings.KEYSTONE_CREDS["username"]
                editor.content["OS_PASSWORD"] = settings.KEYSTONE_CREDS["password"]

            with YamlEditor(settings.FUEL_SETTINGS_YAML, ip=self.admin_node_ip) as editor:
                editor.content["FUEL_ACCESS"]["user"] = settings.KEYSTONE_CREDS["username"]
                editor.content["FUEL_ACCESS"]["password"] = settings.KEYSTONE_CREDS["password"]

            logger.info(
                'New Fuel UI (keystone) username: "******", password: "******"'.format(
                    settings.KEYSTONE_CREDS["username"], settings.KEYSTONE_CREDS["password"]
                )
            )

    def setup_environment(
        self,
        custom=settings.CUSTOM_ENV,
        build_images=settings.BUILD_IMAGES,
        iso_connect_as=settings.ADMIN_BOOT_DEVICE,
        security=settings.SECURITY_TEST,
    ):
        # Create environment and start the Fuel master node
        admin = self.d_env.nodes().admin
        self.d_env.start([admin])

        logger.info("Waiting for admin node to start up")
        wait(lambda: admin.driver.node_active(admin), 60, timeout_msg="Admin node startup timeout")
        logger.info("Proceed with installation")
        # update network parameters at boot screen
        admin.send_keys(self.get_keys(admin, custom=custom, build_images=build_images, iso_connect_as=iso_connect_as))
        if settings.SHOW_FUELMENU:
            self.wait_for_fuelmenu()
        else:
            self.wait_for_provisioning()

        self.set_admin_ssh_password()

        self.wait_for_external_config()
        if custom:
            self.setup_customisation()
        if security:
            nessus_node = NessusActions(self.d_env)
            nessus_node.add_nessus_node()
        # wait while installation complete

        self.admin_actions.modify_configs(self.d_env.router())
        if CUSTOM_FUEL_SETTING_YAML:
            self.admin_actions.update_fuel_setting_yaml(CUSTOM_FUEL_SETTING_YAML)
        self.kill_wait_for_external_config()
        self.wait_bootstrap()
        self.admin_actions.wait_for_fuel_ready()

    @logwrap
    def enable_force_https(self, admin_node_ip):
        cmd = """
        echo -e '"SSL":\n  "force_https": "true"' >> /etc/fuel/astute.yaml
        """
        self.ssh_manager.execute_on_remote(admin_node_ip, cmd)
        cmd = 'find / -name "nginx_services.pp"'
        puppet_manifest = self.ssh_manager.execute_on_remote(admin_node_ip, cmd)["stdout"][0].strip()
        cmd = "puppet apply {0}".format(puppet_manifest)
        self.ssh_manager.execute_on_remote(admin_node_ip, cmd)
        cmd = """
        systemctl status nginx.service |
        awk 'match($0, /\s+Active:.*\((\w+)\)/, a) {print a[1]}'
        """
        wait(
            lambda: (self.ssh_manager.execute_on_remote(admin_node_ip, cmd)["stdout"][0] != "dead"),
            interval=10,
            timeout=30,
            timeout_msg="Nginx service is dead after trying to enable " "it with the command: {}".format(cmd),
        )

    # pylint: disable=no-self-use
    @update_rpm_packages
    @upload_manifests
    def setup_customisation(self):
        logger.info("Installing custom packages/manifests " "before master node bootstrap...")

    # pylint: enable=no-self-use

    @logwrap
    def wait_for_provisioning(self, timeout=settings.WAIT_FOR_PROVISIONING_TIMEOUT):
        # TODO(astudenov): add timeout_msg
        wait_pass(
            lambda: tcp_ping_(self.d_env.nodes().admin.get_ip_address_by_network_name(self.d_env.admin_net), 22),
            timeout=timeout,
        )

    @logwrap
    def wait_for_fuelmenu(self, timeout=settings.WAIT_FOR_PROVISIONING_TIMEOUT):
        def check_ssh_connection():
            """Try to close fuelmenu and check ssh connection"""
            try:
                tcp_ping_(self.d_env.nodes().admin.get_ip_address_by_network_name(self.d_env.admin_net), 22)
            except Exception:
                #  send F8 trying to exit fuelmenu
                self.d_env.nodes().admin.send_keys("<F8>\n")
                return False
            return True

        wait(
            check_ssh_connection,
            interval=30,
            timeout=timeout,
            timeout_msg="Fuelmenu hasn't appeared during allocated timeout",
        )

    @logwrap
    def wait_for_external_config(self, timeout=120):

        wait(
            lambda: self.ssh_manager.exists_on_remote(self.ssh_manager.admin_ip, "/var/lock/wait_for_external_config"),
            timeout=600,
            timeout_msg="wait_for_external_config lock file timeout " "while bootstrapping the Fuel master node",
        )

        check_cmd = "pkill -0 -f wait_for_external_config"

        wait(
            lambda: self.ssh_manager.execute(ip=self.ssh_manager.admin_ip, cmd=check_cmd)["exit_code"] == 0,
            timeout=timeout,
            timeout_msg="wait_for_external_config process timeout " "while bootstrapping the Fuel master node",
        )

    @logwrap
    def kill_wait_for_external_config(self):
        kill_cmd = 'pkill -f "^wait_for_external_config"'
        check_cmd = 'pkill -0 -f "^wait_for_external_config"; [[ $? -eq 1 ]]'
        self.ssh_manager.execute_on_remote(ip=self.ssh_manager.admin_ip, cmd=kill_cmd)
        self.ssh_manager.execute_on_remote(ip=self.ssh_manager.admin_ip, cmd=check_cmd)

    def wait_bootstrap(self):
        logger.info("Waiting while bootstrapping is in progress")
        log_path = "/var/log/puppet/bootstrap_admin_node.log"
        logger.info("Running bootstrap (timeout: {0})".format(float(settings.ADMIN_NODE_BOOTSTRAP_TIMEOUT)))
        with TimeStat("admin_node_bootsrap_time", is_uniq=True):
            wait(
                lambda: self.ssh_manager.execute(
                    ip=self.ssh_manager.admin_ip, cmd="grep 'Fuel node deployment' '{:s}'".format(log_path)
                )["exit_code"]
                == 0,
                timeout=(float(settings.ADMIN_NODE_BOOTSTRAP_TIMEOUT)),
                timeout_msg="Fuel master node bootstrap timeout, " "please check the log {}".format(log_path),
            )
        result = self.ssh_manager.execute(
            ip=self.ssh_manager.admin_ip, cmd="grep 'Fuel node deployment " "complete' '{:s}'".format(log_path)
        )["exit_code"]
        if result != 0:
            raise Exception("Fuel node deployment failed.")
        self.bootstrap_image_check()

    def dhcrelay_check(self):
        # CentOS 7 is pretty stable with admin iface.
        # TODO(akostrikov) refactor it.
        iface = iface_alias("eth0")
        command = "dhcpcheck discover " "--ifaces {iface} " "--repeat 3 " "--timeout 10".format(iface=iface)

        out = self.ssh_manager.execute(ip=self.ssh_manager.admin_ip, cmd=command)["stdout"]

        assert_true(self.get_admin_node_ip() in "".join(out), "dhcpcheck doesn't discover master ip")

    def bootstrap_image_check(self):
        fuel_settings = self.admin_actions.get_fuel_settings()
        if fuel_settings["BOOTSTRAP"]["flavor"].lower() != "ubuntu":
            logger.warning("Default image for bootstrap " "is not based on Ubuntu!")
            return

        bootstrap_images = self.ssh_manager.execute_on_remote(
            ip=self.ssh_manager.admin_ip, cmd="fuel-bootstrap --quiet list"
        )["stdout"]
        assert_true(
            any("active" in line for line in bootstrap_images),
            "Ubuntu bootstrap image wasn't built and activated! "
            "See logs in /var/log/fuel-bootstrap-image-build.log "
            "for details.",
        )

    def admin_install_pkg(self, pkg_name):
        """Install a package <pkg_name> on the admin node"""
        remote_status = self.ssh_manager.execute(ip=self.ssh_manager.admin_ip, cmd="rpm -q {0}'".format(pkg_name))
        if remote_status["exit_code"] == 0:
            logger.info("Package '{0}' already installed.".format(pkg_name))
        else:
            logger.info("Installing package '{0}' ...".format(pkg_name))
            remote_status = self.ssh_manager.execute(
                ip=self.ssh_manager.admin_ip, cmd="yum -y install {0}".format(pkg_name)
            )
            logger.info(
                "Installation of the package '{0}' has been"
                " completed with exit code {1}".format(pkg_name, remote_status["exit_code"])
            )
        return remote_status["exit_code"]

    def admin_run_service(self, service_name):
        """Start a service <service_name> on the admin node"""

        self.ssh_manager.execute(ip=self.ssh_manager.admin_ip, cmd="service {0} start".format(service_name))
        remote_status = self.ssh_manager.execute(
            ip=self.ssh_manager.admin_ip, cmd="service {0} status".format(service_name)
        )
        if any("running..." in status for status in remote_status["stdout"]):
            logger.info("Service '{0}' is running".format(service_name))
        else:
            logger.info(
                "Service '{0}' failed to start"
                " with exit code {1} :\n{2}".format(service_name, remote_status["exit_code"], remote_status["stdout"])
            )

    # Execute yum updates
    # If updates installed,
    # then `bootstrap_admin_node.sh;`
    def admin_install_updates(self):
        logger.info("Searching for updates..")
        update_command = "yum clean expire-cache && " "yum update -y 2>>/var/log/yum-update-error.log"

        logger.info("Performing yum clean and update commands")
        update_result = self.ssh_manager.execute_on_remote(
            ip=self.ssh_manager.admin_ip, cmd=update_command, err_msg="Packages update failed, inspect logs for details"
        )

        logger.info("Packages were updated successfully")

        # Check if any packets were updated and update was successful
        match_updated_count = re.search(r"Upgrade\s+(\d+)\s+Package", update_result["stdout_str"])
        # In case of package replacement, the new one is marked as
        # installed and the old one as removed
        match_installed_count = re.search(r"Install\s+(\d+)\s+Package", update_result["stdout_str"])
        match_complete_message = re.search(r"Complete!", update_result["stdout_str"])

        match_no_updates = re.search("No Packages marked for Update", update_result["stdout_str"])

        if match_no_updates or not match_complete_message or not (match_updated_count or match_installed_count):
            logger.warning("No updates were found or update was incomplete.")
            return

        updates_count = 0

        if match_updated_count:
            updates_count += int(match_updated_count.group(1))

        if match_installed_count:
            updates_count += int(match_installed_count.group(1))

        logger.info("{0} package(s) were updated".format(updates_count))

        logger.warning(
            "'bootstrap_admin_node.sh' is used for applying 9.x release."
            "It should be replaced with proper procedure when it will be "
            "merged - https://review.openstack.org/#/c/346119/ "
        )
        # this is temporary solution for disabling 50min timeout;
        # should be removed when the main script for 9.0->9.x will be merged
        self.ssh_manager.execute_on_remote(
            ip=self.ssh_manager.admin_ip,
            cmd="sed -i "
            '"s/wait_for_external_config=yes/wait_for_external_config=no/"'
            " /etc/fuel/bootstrap_admin_node.conf",
            raise_on_assert=False,
        )
        # devops is creating ssh connection without associated tty
        # which leads to broken fuelmenu. we do not need to call fuelmenu
        # while applying 9.x so turn it off
        # this is only addition to fix for fuel-devops for unlocking usb thread
        # without devops bump
        self.ssh_manager.execute_on_remote(
            ip=self.ssh_manager.admin_ip,
            cmd="sed -i " '"s/showmenu=yes/showmenu=no/"' " /etc/fuel/bootstrap_admin_node.conf",
            raise_on_assert=False,
        )
        # end of temporary solutions

        cmd = "bootstrap_admin_node.sh;"

        self.ssh_manager.execute_on_remote(
            ip=self.ssh_manager.admin_ip, cmd=cmd, err_msg="bootstrap failed, inspect logs for details"
        )
        logger.info("bootstrap successfull")

    # Modifies a resolv.conf on the Fuel master node and returns
    # its original content.
    # * adds 'nameservers' at start of resolv.conf if merge=True
    # * replaces resolv.conf with 'nameservers' if merge=False
    def modify_resolv_conf(self, nameservers=None, merge=True):
        if nameservers is None:
            nameservers = []

        resolv_conf = self.ssh_manager.execute(ip=self.ssh_manager.admin_ip, cmd="cat /etc/resolv.conf")
        assert_equal(
            0,
            resolv_conf["exit_code"],
            'Executing "{0}" on the admin node has failed with: {1}'.format(
                "cat /etc/resolv.conf", resolv_conf["stderr"]
            ),
        )
        if merge:
            nameservers.extend(resolv_conf["stdout"])
        resolv_keys = ["search", "domain", "nameserver"]
        resolv_new = "".join("{0}\n".format(ns) for ns in nameservers if any(x in ns for x in resolv_keys))
        logger.debug('echo "{0}" > /etc/resolv.conf'.format(resolv_new))
        echo_cmd = 'echo "{0}" > /etc/resolv.conf'.format(resolv_new)
        echo_result = self.ssh_manager.execute(ip=self.ssh_manager.admin_ip, cmd=echo_cmd)
        assert_equal(
            0,
            echo_result["exit_code"],
            'Executing "{0}" on the admin node has failed with: {1}'.format(echo_cmd, echo_result["stderr"]),
        )
        return resolv_conf["stdout"]

    @staticmethod
    @logwrap
    def execute_remote_cmd(remote, cmd, exit_code=0):
        msg = (
            "execute_remote_cmd() is old deprecated method, "
            "which should not be used anymore. "
            "please use remote.check_call() instead.\n"
            "Starting from fuel-devops 2.9.22 this methods will return all "
            "required data.\n"
            "{}".format("".join(traceback.format_stack()))
        )
        warn(msg, DeprecationWarning)
        logger.warning(msg)
        logger.critical("This method could be deleted on 01.09.2016 " "without any announcement!")
        result = remote.check_call(command=cmd, expected=[exit_code])
        return result["stdout"]

    @logwrap
    def describe_other_admin_interfaces(self, admin):
        admin_networks = [iface.network.name for iface in admin.interfaces]
        iface_name = None
        for i, network_name in enumerate(admin_networks):
            if "admin" in network_name and "admin" != network_name:
                # This will be replaced with actual interface labels
                # form fuel-devops
                iface_name = "enp0s" + str(i + 3)
                logger.info(
                    "Describe Fuel admin node interface {0} for " "network {1}".format(iface_name, network_name)
                )
                self.describe_admin_interface(iface_name, network_name)

        if iface_name:
            return self.ssh_manager.execute(ip=self.ssh_manager.admin_ip, cmd="cobbler sync")

    @logwrap
    def describe_admin_interface(self, admin_if, network_name):
        admin_net_object = self.d_env.get_network(name=network_name)
        admin_network = admin_net_object.ip.network
        admin_netmask = admin_net_object.ip.netmask
        admin_ip = str(self.d_env.nodes().admin.get_ip_address_by_network_name(network_name))
        logger.info(
            (
                "Parameters for admin interface configuration: "
                "Network - {0}, Netmask - {1}, Interface - {2}, "
                "IP Address - {3}"
            ).format(admin_network, admin_netmask, admin_if, admin_ip)
        )
        add_admin_ip = (
            "DEVICE={0}\\n"
            "ONBOOT=yes\\n"
            "NM_CONTROLLED=no\\n"
            "USERCTL=no\\n"
            "PEERDNS=no\\n"
            "BOOTPROTO=static\\n"
            "IPADDR={1}\\n"
            "NETMASK={2}\\n"
        ).format(admin_if, admin_ip, admin_netmask)
        cmd = (
            'echo -e "{0}" > /etc/sysconfig/network-scripts/ifcfg-{1};' "ifup {1}; ip -o -4 a s {1} | grep -w {2}"
        ).format(add_admin_ip, admin_if, admin_ip)
        logger.debug("Trying to assign {0} IP to the {1} on master node...".format(admin_ip, admin_if))

        result = self.ssh_manager.execute(ip=self.ssh_manager.admin_ip, cmd=cmd)
        assert_equal(
            result["exit_code"], 0, ("Failed to assign second admin " "IP address on master node: {0}").format(result)
        )
        logger.debug("Done: {0}".format(result["stdout"]))

        # TODO for ssh manager
        multiple_networks_hacks.configure_second_admin_dhcp(self.ssh_manager.admin_ip, admin_if)
        multiple_networks_hacks.configure_second_admin_firewall(
            self.ssh_manager.admin_ip, admin_network, admin_netmask, admin_if, self.get_admin_node_ip()
        )

    @logwrap
    def get_masternode_uuid(self):
        return self.postgres_actions.run_query(
            db="nailgun", query="select master_node_uid from master_node_settings limit 1;"
        )
Beispiel #35
0
    def centos_setup_fuel(self, hostname):
        logger.info("upload fuel-release packet")
        if not settings.FUEL_RELEASE_PATH:
            raise exceptions.FuelQAVariableNotSet('FUEL_RELEASE_PATH', '/path')
        try:
            ssh = SSHManager()
            pack_path = '/tmp/'
            full_pack_path = os.path.join(pack_path,
                                          'fuel-release*.noarch.rpm')
            ssh.upload_to_remote(ip=ssh.admin_ip,
                                 source=settings.FUEL_RELEASE_PATH.rstrip('/'),
                                 target=pack_path)

        except Exception:
            logger.exception("Could not upload package")

        logger.debug("Update host information")
        cmd = "echo HOSTNAME={} >> /etc/sysconfig/network".format(hostname)
        ssh.execute_on_remote(ssh.admin_ip, cmd=cmd)

        cmd = "echo {0} {1} {2} >> /etc/hosts".format(
            ssh.admin_ip, hostname, settings.FUEL_MASTER_HOSTNAME)

        ssh.execute_on_remote(ssh.admin_ip, cmd=cmd)

        cmd = "hostname {}".format(hostname)
        ssh.execute_on_remote(ssh.admin_ip, cmd=cmd)

        logger.debug("setup MOS repositories")
        cmd = "rpm -ivh {}".format(full_pack_path)
        ssh.execute_on_remote(ssh.admin_ip, cmd=cmd)

        cmd = "yum install -y fuel-setup"
        ssh.execute_on_remote(ssh.admin_ip, cmd=cmd)

        cmd = "yum install -y screen"
        ssh.execute_on_remote(ssh.admin_ip, cmd=cmd)

        logger.info("Install Fuel services")

        cmd = "screen -dm bash -c 'showmenu=no wait_for_external_config=yes " \
              "bootstrap_admin_node.sh'"
        ssh.execute_on_remote(ssh.admin_ip, cmd=cmd)

        self.env.wait_for_external_config()
        self.env.admin_actions.modify_configs(self.env.d_env.router())
        self.env.kill_wait_for_external_config()

        self.env.wait_bootstrap()

        logger.debug("Check Fuel services")
        self.env.admin_actions.wait_for_fuel_ready()

        logger.debug("post-installation configuration of Fuel services")
        self.fuel_post_install_actions()
Beispiel #36
0
    def centos_setup_fuel(self, hostname):
        logger.info("upload fuel-release packet")
        if not settings.FUEL_RELEASE_PATH:
            raise exceptions.FuelQAVariableNotSet('FUEL_RELEASE_PATH', '/path')
        try:
            ssh = SSHManager()
            pack_path = '/tmp/'
            full_pack_path = os.path.join(pack_path,
                                          'fuel-release*.noarch.rpm')
            ssh.upload_to_remote(
                ip=ssh.admin_ip,
                source=settings.FUEL_RELEASE_PATH.rstrip('/'),
                target=pack_path)

        except Exception:
            logger.exception("Could not upload package")

        logger.debug("Update host information")
        cmd = "echo HOSTNAME={} >> /etc/sysconfig/network".format(hostname)
        ssh.execute_on_remote(ssh.admin_ip, cmd=cmd)

        cmd = "echo {0} {1} {2} >> /etc/hosts".format(
            ssh.admin_ip,
            hostname,
            settings.FUEL_MASTER_HOSTNAME)

        ssh.execute_on_remote(ssh.admin_ip, cmd=cmd)

        cmd = "hostname {}".format(hostname)
        ssh.execute_on_remote(ssh.admin_ip, cmd=cmd)

        logger.debug("setup MOS repositories")
        cmd = "rpm -ivh {}".format(full_pack_path)
        ssh.execute_on_remote(ssh.admin_ip, cmd=cmd)

        cmd = "yum install -y fuel-setup"
        ssh.execute_on_remote(ssh.admin_ip, cmd=cmd)

        cmd = "yum install -y screen"
        ssh.execute_on_remote(ssh.admin_ip, cmd=cmd)

        logger.info("Install Fuel services")

        cmd = "screen -dm bash -c 'showmenu=no wait_for_external_config=yes " \
              "bootstrap_admin_node.sh'"
        ssh.execute_on_remote(ssh.admin_ip, cmd=cmd)

        self.env.wait_for_external_config()
        self.env.admin_actions.modify_configs(self.env.d_env.router())
        self.env.kill_wait_for_external_config()

        self.env.wait_bootstrap()

        logger.debug("Check Fuel services")
        self.env.admin_actions.wait_for_fuel_ready()

        logger.debug("post-installation configuration of Fuel services")
        self.fuel_post_install_actions()
Beispiel #37
0
class CustomRepo(object):
    """CustomRepo."""  # TODO documentation

    def __init__(self):
        self.ssh_manager = SSHManager()
        self.ip = self.ssh_manager.admin_ip
        self.path_scripts = ('{0}/fuelweb_test/helpers/'
                             .format(os.environ.get("WORKSPACE", "./")))
        self.remote_path_scripts = '/tmp/'
        self.ubuntu_script = 'regenerate_ubuntu_repo'
        self.centos_script = 'regenerate_centos_repo'
        self.local_mirror_ubuntu = settings.LOCAL_MIRROR_UBUNTU
        self.local_mirror_centos = settings.LOCAL_MIRROR_CENTOS
        self.ubuntu_release = settings.UBUNTU_RELEASE
        self.centos_supported_archs = ['noarch', 'x86_64']
        self.pkgs_list = []

        self.custom_pkgs_mirror_path = ''
        if settings.OPENSTACK_RELEASE_UBUNTU in settings.OPENSTACK_RELEASE:
            # Trying to determine the root of Ubuntu repository
            pkgs_path = settings.CUSTOM_PKGS_MIRROR.split('/dists/')
            if len(pkgs_path) == 2:
                self.custom_pkgs_mirror = pkgs_path[0]
                self.custom_pkgs_mirror_path = '/dists/{}'.format(pkgs_path[1])
            else:
                self.custom_pkgs_mirror = settings.CUSTOM_PKGS_MIRROR
        else:
            self.custom_pkgs_mirror = settings.CUSTOM_PKGS_MIRROR

    def prepare_repository(self):
        """Prepare admin node to packages testing

        Scenario:
            1. Temporary set nameserver to local router on admin node
            2. Install tools to manage rpm/deb repository
            3. Retrieve list of packages from custom repository
            4. Download packages to local rpm/deb repository
            5. Update .yaml file with new packages version
            6. Re-generate repo using shell scripts on admin node

        """
        # Check necessary settings and revert a snapshot
        if not self.custom_pkgs_mirror:
            return
        logger.info("Custom mirror with new packages: {0}"
                    .format(settings.CUSTOM_PKGS_MIRROR))

        if settings.OPENSTACK_RELEASE_UBUNTU in settings.OPENSTACK_RELEASE:
            # Ubuntu
            master_tools = ['dpkg', 'dpkg-devel', 'dpkg-dev']
            self.install_tools(master_tools)
            self.get_pkgs_list_ubuntu()
            pkgs_local_path = ('{0}/pool/'
                               .format(self.local_mirror_ubuntu))
            self.download_pkgs(pkgs_local_path)
            self.regenerate_repo(self.ubuntu_script, self.local_mirror_ubuntu)
        else:
            # CentOS
            master_tools = ['createrepo']
            self.install_tools(master_tools)
            self.get_pkgs_list_centos()
            pkgs_local_path = '{0}/Packages/'.format(self.local_mirror_centos)
            self.download_pkgs(pkgs_local_path)
            self.regenerate_repo(self.centos_script, self.local_mirror_centos)

    # Install tools to masternode
    def install_tools(self, master_tools=None):
        if master_tools is None:
            master_tools = []
        logger.info("Installing necessary tools for {0}"
                    .format(settings.OPENSTACK_RELEASE))
        for master_tool in master_tools:
            exit_code = install_pkg_2(
                ip=self.ip,
                pkg_name=master_tool
            )
            assert_equal(0, exit_code, 'Cannot install package {0} '
                         'on admin node.'.format(master_tool))

    # Ubuntu: Creating list of packages from the additional mirror
    def get_pkgs_list_ubuntu(self):
        url = "{0}/{1}/Packages".format(self.custom_pkgs_mirror,
                                        self.custom_pkgs_mirror_path)
        logger.info("Retrieving additional packages from the custom mirror:"
                    " {0}".format(url))
        try:
            pkgs_release = urllib2.urlopen(url).read()
        except (urllib2.HTTPError, urllib2.URLError):
            logger.error(traceback.format_exc())
            url_gz = '{0}.gz'.format(url)
            logger.info(
                "Retrieving additional packages from the custom mirror:"
                " {0}".format(url_gz))
            try:
                pkgs_release_gz = urllib2.urlopen(url_gz).read()
            except (urllib2.HTTPError, urllib2.URLError):
                logger.error(traceback.format_exc())
                raise
            try:
                d = zlib.decompressobj(zlib.MAX_WBITS | 32)
                pkgs_release = d.decompress(pkgs_release_gz)
            except Exception:
                logger.error('Ubuntu mirror error: Could not decompress {0}\n'
                             '{1}'.format(url_gz, traceback.format_exc()))
                raise

        packages = (pkg for pkg in pkgs_release.split("\n\n") if pkg)
        for package in packages:
            upkg = {pstr.split()[0].lower(): ''.join(pstr.split()[1:])
                    for pstr in package.split("\n") if pstr[0].strip()}

            upkg_keys = ["package:", "version:", "filename:"]
            assert_equal(True, all(x in upkg for x in upkg_keys),
                         'Missing one of the statements ["Package:", '
                         '"Version:", "Filename:"] in {0}'.format(url))
            # TODO: add dependencies list to upkg
            self.pkgs_list.append(upkg)

    # Centos: Creating list of packages from the additional mirror
    def get_pkgs_list_centos(self):
        logger.info("Retrieving additional packages from the custom mirror:"
                    " {0}".format(self.custom_pkgs_mirror))
        url = "{0}/repodata/repomd.xml".format(self.custom_pkgs_mirror)
        try:
            repomd_data = urllib2.urlopen(url).read()
        except (urllib2.HTTPError, urllib2.URLError):
            logger.error(traceback.format_exc())
            raise
        # Remove namespace attribute before parsing XML
        repomd_data = re.sub(' xmlns="[^"]+"', '', repomd_data, count=1)
        tree_repomd_data = ElementTree.fromstring(repomd_data)
        lists_location = ''
        for repomd in tree_repomd_data.findall('data'):
            if repomd.get('type') == 'primary':
                repomd_location = repomd.find('location')
                lists_location = repomd_location.get('href')

        assert_equal(True, lists_location is not '', 'CentOS mirror error:'
                     ' Could not parse {0}\nlists_location = "{1}"\n{2}'
                     .format(url, lists_location, traceback.format_exc()))
        url = "{0}/{1}".format(self.custom_pkgs_mirror, lists_location)
        try:
            lists_data = urllib2.urlopen(url).read()
        except (urllib2.HTTPError, urllib2.URLError):
            logger.error(traceback.format_exc())
            raise
        if '.xml.gz' in lists_location:
            try:
                d = zlib.decompressobj(zlib.MAX_WBITS | 32)
                lists_data = d.decompress(lists_data)
            except Exception:
                logger.error('CentOS mirror error: Could not decompress {0}\n'
                             '{1}'.format(url, traceback.format_exc()))
                raise

        # Remove namespace attribute before parsing XML
        lists_data = re.sub(' xmlns="[^"]+"', '', lists_data, count=1)

        tree_lists_data = ElementTree.fromstring(lists_data)

        for flist in tree_lists_data.findall('package'):
            if flist.get('type') == 'rpm':
                flist_arch = flist.find('arch').text
                if flist_arch in self.centos_supported_archs:
                    flist_name = flist.find('name').text
                    flist_location = flist.find('location')
                    flist_file = flist_location.get('href')
                    flist_version = flist.find('version')
                    flist_ver = '{0}-{1}'.format(flist_version.get('ver'),
                                                 flist_version.get('rel'))
                    cpkg = {'package:': flist_name,
                            'version:': flist_ver,
                            'filename:': flist_file}
                    # TODO: add dependencies list to cpkg
                    self.pkgs_list.append(cpkg)

    # Download packages (local_folder)
    def download_pkgs(self, pkgs_local_path):
        # Process the packages list:
        total_pkgs = len(self.pkgs_list)
        logger.info('Found {0} custom package(s)'.format(total_pkgs))

        for npkg, pkg in enumerate(self.pkgs_list):
            # TODO: Previous versions of the updating packages must be removed
            # to avoid unwanted packet manager dependencies resolution
            # (when some package still depends on other package which
            # is not going to be installed)

            logger.info('({0}/{1}) Downloading package: {2}/{3}'
                        .format(npkg + 1, total_pkgs,
                                self.custom_pkgs_mirror,
                                pkg["filename:"]))

            pkg_ext = pkg["filename:"].split('.')[-1]
            if pkg_ext == 'deb':
                path_suff = 'main/'
            elif pkg_ext == 'udeb':
                path_suff = 'debian-installer/'
            else:
                path_suff = ''

            wget_cmd = "wget --no-verbose --directory-prefix {0} {1}/{2}"\
                       .format(pkgs_local_path + path_suff,
                               self.custom_pkgs_mirror,
                               pkg["filename:"])
            wget_result = self.ssh_manager.execute(
                ip=self.ip,
                cmd=wget_cmd
            )
            assert_equal(0, wget_result['exit_code'],
                         self.assert_msg(wget_cmd, wget_result['stderr']))

    # Upload regenerate* script to masternode (script name)
    def regenerate_repo(self, regenerate_script, local_mirror_path):
        # Uploading scripts that prepare local repositories:
        # 'regenerate_centos_repo' and 'regenerate_ubuntu_repo'
        try:
            self.ssh_manager.upload_to_remote(
                ip=self.ip,
                source='{0}/{1}'.format(self.path_scripts, regenerate_script),
                target=self.remote_path_scripts
            )
            self.ssh_manager.execute_on_remote(
                ip=self.ip,
                cmd='chmod 755 {0}/{1}'.format(self.remote_path_scripts,
                                               regenerate_script)
            )
        except Exception:
            logger.error('Could not upload scripts for updating repositories.'
                         '\n{0}'.format(traceback.format_exc()))
            raise

        # Update the local repository using previously uploaded script.
        script_cmd = '{0}/{1} {2} {3}'.format(self.remote_path_scripts,
                                              regenerate_script,
                                              local_mirror_path,
                                              self.ubuntu_release)
        script_result = self.ssh_manager.execute(
            ip=self.ip,
            cmd=script_cmd
        )
        assert_equal(0, script_result['exit_code'],
                     self.assert_msg(script_cmd, script_result['stderr']))

        logger.info('Local repository {0} has been updated successfully.'
                    .format(local_mirror_path))

    def assert_msg(self, cmd, err):
        return 'Executing \'{0}\' on the admin node has failed with: {1}'\
               .format(cmd, err)

    def check_puppet_logs(self):
        logger.info("Check puppet logs for packages with unmet dependencies.")
        if settings.OPENSTACK_RELEASE_UBUNTU in settings.OPENSTACK_RELEASE:
            err_deps = self.check_puppet_logs_ubuntu()
        else:
            err_deps = self.check_puppet_logs_centos()

        for err_deps_key in err_deps.keys():
            logger.info('Error: Package: {0} has unmet dependencies:'
                        .format(err_deps_key))
            for dep in err_deps[err_deps_key]:
                logger.info('        {0}'.format(dep.strip()))
        logger.info("Check puppet logs completed.")

    def check_puppet_logs_ubuntu(self):
        """ Check puppet-agent.log files on all nodes for package
            dependency errors during a cluster deployment (ubuntu)"""

        err_start = 'The following packages have unmet dependencies:'
        err_end = ('Unable to correct problems,'
                   ' you have held broken packages.')
        cmd = ('fgrep -h -e " Depends: " -e "{0}" -e "{1}" '
               '/var/log/docker-logs/remote/node-*/'
               'puppet*.log'.format(err_start, err_end))
        result = self.ssh_manager.execute(
            ip=self.ip,
            cmd=cmd
        )['stdout']

        err_deps = {}
        err_deps_key = ''
        err_deps_flag = False

        # Forming a dictionary of package names
        # with sets of required packages.
        for res_str in result:
            if err_deps_flag:
                if err_end in res_str:
                    err_deps_flag = False
                elif ": Depends:" in res_str:
                    str0, str1, str2 = res_str.partition(': Depends:')
                    err_deps_key = ''.join(str0.split()[-1:])
                    if err_deps_key not in err_deps:
                        err_deps[err_deps_key] = set()
                    if 'but it is not' in str2 or 'is to be installed' in str2:
                        err_deps[err_deps_key].add('Depends:{0}'
                                                   .format(str2))
                elif 'Depends:' in res_str and err_deps_key:
                    str0, str1, str2 = res_str.partition('Depends:')
                    if 'but it is not' in str2 or 'is to be installed' in str2:
                        err_deps[err_deps_key].add(str1 + str2)
                else:
                    err_deps_key = ''
            elif err_start in res_str:
                err_deps_flag = True

        return err_deps

    def check_puppet_logs_centos(self):
        """ Check puppet-agent.log files on all nodes for package
            dependency errors during a cluster deployment (centos)"""

        cmd = ('fgrep -h -e "Error: Package: " -e " Requires: " /var/log/'
               'docker-logs/remote/node-*/puppet*.log')
        result = self.ssh_manager.execute(
            ip=self.ip,
            cmd=cmd
        )['stdout']

        err_deps = {}
        err_deps_key = ''

        # Forming a dictionary of package names
        # with sets of required packages.
        for res_str in result:
            if 'Error: Package:' in res_str:
                err_deps_key = res_str.partition('Error: Package: ')[2]
                if err_deps_key not in err_deps:
                    err_deps[err_deps_key] = set()
            elif ' Requires: ' in res_str and err_deps_key:
                str0, str1, str2 = res_str.partition(' Requires: ')
                err_deps[err_deps_key].add(str1 + str2)
            else:
                err_deps_key = ''

        return err_deps
Beispiel #38
0
def update_ostf():
    logger.info("Uploading new package from {0}".format(
        settings.UPDATE_FUEL_PATH))
    ssh = SSHManager()
    pack_path = '/var/www/nailgun/fuel-ostf/'
    full_pack_path = os.path.join(pack_path, 'fuel-ostf*.noarch.rpm')
    ssh.upload_to_remote(ssh.admin_ip,
                         source=settings.UPDATE_FUEL_PATH.rstrip('/'),
                         target=pack_path)

    # Check old fuel-ostf package
    cmd = "rpm -q fuel-ostf"

    old_package = ssh.execute_on_remote(ssh.admin_ip, cmd=cmd)['stdout_str']
    logger.info('Current package version of '
                'fuel-ostf: {0}'.format(old_package))

    cmd = "rpm -qp {0}".format(full_pack_path)
    new_package = ssh.execute_on_remote(ssh.admin_ip, cmd=cmd)['stdout_str']
    logger.info('Package from review {0}'.format(new_package))

    if old_package == new_package:
        logger.info('Package {0} is installed'.format(new_package))
        return

    cmd = "service ostf stop"
    ssh.execute_on_remote(ssh.admin_ip, cmd=cmd)
    cmd = "service ostf status"
    helpers.wait(lambda: "dead" in ssh.execute_on_remote(
        ssh.admin_ip, cmd=cmd, raise_on_assert=False, assert_ec_equal=[3])[
            'stdout_str'],
                 timeout=60)
    logger.info("OSTF status: inactive")
    cmd = "rpm -e fuel-ostf"
    ssh.execute_on_remote(ssh.admin_ip, cmd=cmd)
    cmd = "rpm -Uvh --oldpackage {0}".format(full_pack_path)
    ssh.execute_on_remote(ssh.admin_ip, cmd=cmd)
    cmd = "rpm -q fuel-ostf"
    installed_package = ssh.execute_on_remote(ssh.admin_ip,
                                              cmd=cmd)['stdout_str']

    assert_equal(
        installed_package, new_package,
        "The new package {0} was not installed. Actual {1}".format(
            new_package, installed_package))
    cmd = "service ostf start"
    ssh.execute_on_remote(ssh.admin_ip, cmd=cmd)
    cmd = "service ostf status"
    helpers.wait(lambda: "running" in ssh.execute_on_remote(
        ssh.admin_ip, cmd=cmd)['stdout_str'],
                 timeout=60)
    cmd = "curl -s -o /dev/null -w '%{http_code}' http://127.0.0.1:8777"
    helpers.wait(lambda: "401" in ssh.execute_on_remote(
        ssh.admin_ip, cmd=cmd, raise_on_assert=False)['stdout_str'],
                 timeout=60)
    logger.info("OSTF status: RUNNING")
Beispiel #39
0
class CustomRepo(object):
    """CustomRepo."""  # TODO documentation

    def __init__(self):
        self.ssh_manager = SSHManager()
        self.ip = self.ssh_manager.admin_ip
        self.path_scripts = ('{0}/fuelweb_test/helpers/'
                             .format(os.environ.get("WORKSPACE", "./")))
        self.remote_path_scripts = '/tmp/'
        self.ubuntu_script = 'regenerate_ubuntu_repo'
        self.centos_script = 'regenerate_centos_repo'
        self.local_mirror_ubuntu = settings.LOCAL_MIRROR_UBUNTU
        self.local_mirror_centos = settings.LOCAL_MIRROR_CENTOS
        self.ubuntu_release = settings.UBUNTU_RELEASE
        self.centos_supported_archs = ['noarch', 'x86_64']
        self.pkgs_list = []

        self.custom_pkgs_mirror_path = ''
        if settings.OPENSTACK_RELEASE_UBUNTU in settings.OPENSTACK_RELEASE:
            # Trying to determine the root of Ubuntu repository
            pkgs_path = settings.CUSTOM_PKGS_MIRROR.split('/dists/')
            if len(pkgs_path) == 2:
                self.custom_pkgs_mirror = pkgs_path[0]
                self.custom_pkgs_mirror_path = '/dists/{}'.format(pkgs_path[1])
            else:
                self.custom_pkgs_mirror = settings.CUSTOM_PKGS_MIRROR
        else:
            self.custom_pkgs_mirror = settings.CUSTOM_PKGS_MIRROR

    def prepare_repository(self):
        """Prepare admin node to packages testing

        Scenario:
            1. Temporary set nameserver to local router on admin node
            2. Install tools to manage rpm/deb repository
            3. Retrieve list of packages from custom repository
            4. Download packages to local rpm/deb repository
            5. Update .yaml file with new packages version
            6. Re-generate repo using shell scripts on admin node

        """
        # Check necessary settings and revert a snapshot
        if not self.custom_pkgs_mirror:
            return
        logger.info("Custom mirror with new packages: {0}"
                    .format(settings.CUSTOM_PKGS_MIRROR))

        if settings.OPENSTACK_RELEASE_UBUNTU in settings.OPENSTACK_RELEASE:
            # Ubuntu
            master_tools = ['dpkg', 'dpkg-devel', 'dpkg-dev']
            self.install_tools(master_tools)
            self.get_pkgs_list_ubuntu()
            pkgs_local_path = ('{0}/pool/'
                               .format(self.local_mirror_ubuntu))
            self.download_pkgs(pkgs_local_path)
            self.regenerate_repo(self.ubuntu_script, self.local_mirror_ubuntu)
        else:
            # CentOS
            master_tools = ['createrepo']
            self.install_tools(master_tools)
            self.get_pkgs_list_centos()
            pkgs_local_path = '{0}/Packages/'.format(self.local_mirror_centos)
            self.download_pkgs(pkgs_local_path)
            self.regenerate_repo(self.centos_script, self.local_mirror_centos)

    # Install tools to masternode
    def install_tools(self, master_tools=None):
        if master_tools is None:
            master_tools = []
        logger.info("Installing necessary tools for {0}"
                    .format(settings.OPENSTACK_RELEASE))
        for master_tool in master_tools:
            exit_code = install_pkg_2(
                ip=self.ip,
                pkg_name=master_tool
            )
            assert_equal(0, exit_code, 'Cannot install package {0} '
                         'on admin node.'.format(master_tool))

    # Ubuntu: Creating list of packages from the additional mirror
    def get_pkgs_list_ubuntu(self):
        url = "{0}/{1}/Packages".format(self.custom_pkgs_mirror,
                                        self.custom_pkgs_mirror_path)
        logger.info("Retrieving additional packages from the custom mirror:"
                    " {0}".format(url))
        try:
            pkgs_release = urlopen(url).read()
        except (HTTPError, URLError):
            logger.error(traceback.format_exc())
            url_gz = '{0}.gz'.format(url)
            logger.info(
                "Retrieving additional packages from the custom mirror:"
                " {0}".format(url_gz))
            try:
                pkgs_release_gz = urlopen(url_gz).read()
            except (HTTPError, URLError):
                logger.error(traceback.format_exc())
                raise
            try:
                d = zlib.decompressobj(zlib.MAX_WBITS | 32)
                pkgs_release = d.decompress(pkgs_release_gz)
            except Exception:
                logger.error('Ubuntu mirror error: Could not decompress {0}\n'
                             '{1}'.format(url_gz, traceback.format_exc()))
                raise

        packages = (pkg for pkg in pkgs_release.split("\n\n") if pkg)
        for package in packages:
            upkg = {pstr.split()[0].lower(): ''.join(pstr.split()[1:])
                    for pstr in package.split("\n") if pstr[0].strip()}

            upkg_keys = ["package:", "version:", "filename:"]
            assert_equal(True, all(x in upkg for x in upkg_keys),
                         'Missing one of the statements ["Package:", '
                         '"Version:", "Filename:"] in {0}'.format(url))
            # TODO: add dependencies list to upkg
            self.pkgs_list.append(upkg)

    # Centos: Creating list of packages from the additional mirror
    def get_pkgs_list_centos(self):
        logger.info("Retrieving additional packages from the custom mirror:"
                    " {0}".format(self.custom_pkgs_mirror))
        url = "{0}/repodata/repomd.xml".format(self.custom_pkgs_mirror)
        try:
            repomd_data = urlopen(url).read()
        except (HTTPError, URLError):
            logger.error(traceback.format_exc())
            raise
        # Remove namespace attribute before parsing XML
        repomd_data = re.sub(' xmlns="[^"]+"', '', repomd_data, count=1)
        tree_repomd_data = ElementTree.fromstring(repomd_data)
        lists_location = ''
        for repomd in tree_repomd_data.findall('data'):
            if repomd.get('type') == 'primary':
                repomd_location = repomd.find('location')
                lists_location = repomd_location.get('href')

        assert_equal(True, lists_location is not '', 'CentOS mirror error:'
                     ' Could not parse {0}\nlists_location = "{1}"\n{2}'
                     .format(url, lists_location, traceback.format_exc()))
        url = "{0}/{1}".format(self.custom_pkgs_mirror, lists_location)
        try:
            lists_data = urlopen(url).read()
        except (HTTPError, URLError):
            logger.error(traceback.format_exc())
            raise
        if '.xml.gz' in lists_location:
            try:
                d = zlib.decompressobj(zlib.MAX_WBITS | 32)
                lists_data = d.decompress(lists_data)
            except Exception:
                logger.error('CentOS mirror error: Could not decompress {0}\n'
                             '{1}'.format(url, traceback.format_exc()))
                raise

        # Remove namespace attribute before parsing XML
        lists_data = re.sub(' xmlns="[^"]+"', '', lists_data, count=1)

        tree_lists_data = ElementTree.fromstring(lists_data)

        for flist in tree_lists_data.findall('package'):
            if flist.get('type') == 'rpm':
                flist_arch = flist.find('arch').text
                if flist_arch in self.centos_supported_archs:
                    flist_name = flist.find('name').text
                    flist_location = flist.find('location')
                    flist_file = flist_location.get('href')
                    flist_version = flist.find('version')
                    flist_ver = '{0}-{1}'.format(flist_version.get('ver'),
                                                 flist_version.get('rel'))
                    cpkg = {'package:': flist_name,
                            'version:': flist_ver,
                            'filename:': flist_file}
                    # TODO: add dependencies list to cpkg
                    self.pkgs_list.append(cpkg)

    # Download packages (local_folder)
    def download_pkgs(self, pkgs_local_path):
        # Process the packages list:
        total_pkgs = len(self.pkgs_list)
        logger.info('Found {0} custom package(s)'.format(total_pkgs))

        for npkg, pkg in enumerate(self.pkgs_list):
            # TODO: Previous versions of the updating packages must be removed
            # to avoid unwanted packet manager dependencies resolution
            # (when some package still depends on other package which
            # is not going to be installed)

            logger.info('({0}/{1}) Downloading package: {2}/{3}'
                        .format(npkg + 1, total_pkgs,
                                self.custom_pkgs_mirror,
                                pkg["filename:"]))

            pkg_ext = pkg["filename:"].split('.')[-1]
            if pkg_ext == 'deb':
                path_suff = 'main/'
            elif pkg_ext == 'udeb':
                path_suff = 'debian-installer/'
            else:
                path_suff = ''

            wget_cmd = "wget --no-verbose --directory-prefix {0} {1}/{2}"\
                       .format(pkgs_local_path + path_suff,
                               self.custom_pkgs_mirror,
                               pkg["filename:"])
            wget_result = self.ssh_manager.execute(
                ip=self.ip,
                cmd=wget_cmd
            )
            assert_equal(0, wget_result['exit_code'],
                         self.assert_msg(wget_cmd, wget_result['stderr']))

    # Upload regenerate* script to masternode (script name)
    def regenerate_repo(self, regenerate_script, local_mirror_path):
        # Uploading scripts that prepare local repositories:
        # 'regenerate_centos_repo' and 'regenerate_ubuntu_repo'
        try:
            self.ssh_manager.upload_to_remote(
                ip=self.ip,
                source='{0}/{1}'.format(self.path_scripts, regenerate_script),
                target=self.remote_path_scripts
            )
            self.ssh_manager.execute_on_remote(
                ip=self.ip,
                cmd='chmod 755 {0}/{1}'.format(self.remote_path_scripts,
                                               regenerate_script)
            )
        except Exception:
            logger.error('Could not upload scripts for updating repositories.'
                         '\n{0}'.format(traceback.format_exc()))
            raise

        # Update the local repository using previously uploaded script.
        script_cmd = '{0}/{1} {2} {3}'.format(self.remote_path_scripts,
                                              regenerate_script,
                                              local_mirror_path,
                                              self.ubuntu_release)
        script_result = self.ssh_manager.execute(
            ip=self.ip,
            cmd=script_cmd
        )
        assert_equal(0, script_result['exit_code'],
                     self.assert_msg(script_cmd, script_result['stderr']))

        logger.info('Local repository {0} has been updated successfully.'
                    .format(local_mirror_path))

    @staticmethod
    def assert_msg(cmd, err):
        return 'Executing \'{0}\' on the admin node has failed with: {1}'\
               .format(cmd, err)

    def check_puppet_logs(self):
        logger.info("Check puppet logs for packages with unmet dependencies.")
        if settings.OPENSTACK_RELEASE_UBUNTU in settings.OPENSTACK_RELEASE:
            err_deps = self.check_puppet_logs_ubuntu()
        else:
            err_deps = self.check_puppet_logs_centos()

        for err_deps_key in err_deps.keys():
            logger.info('Error: Package: {0} has unmet dependencies:'
                        .format(err_deps_key))
            for dep in err_deps[err_deps_key]:
                logger.info('        {0}'.format(dep.strip()))
        logger.info("Check puppet logs completed.")

    def check_puppet_logs_ubuntu(self):
        """ Check puppet-agent.log files on all nodes for package
            dependency errors during a cluster deployment (ubuntu)"""

        err_start = 'The following packages have unmet dependencies:'
        err_end = ('Unable to correct problems,'
                   ' you have held broken packages.')
        cmd = ('fgrep -h -e " Depends: " -e "{0}" -e "{1}" '
               '/var/log/remote/node-*/'
               'puppet*.log'.format(err_start, err_end))
        result = self.ssh_manager.execute(
            ip=self.ip,
            cmd=cmd
        )['stdout']

        err_deps = {}
        err_deps_key = ''
        err_deps_flag = False

        # Forming a dictionary of package names
        # with sets of required packages.
        for res_str in result:
            if err_deps_flag:
                if err_end in res_str:
                    err_deps_flag = False
                elif ": Depends:" in res_str:
                    str0, str1, str2 = res_str.partition(': Depends:')
                    err_deps_key = ''.join(str0.split()[-1:])
                    if err_deps_key not in err_deps:
                        err_deps[err_deps_key] = set()
                    if 'but it is not' in str2 or 'is to be installed' in str2:
                        err_deps[err_deps_key].add('Depends:{0}'
                                                   .format(str2))
                elif 'Depends:' in res_str and err_deps_key:
                    str0, str1, str2 = res_str.partition('Depends:')
                    if 'but it is not' in str2 or 'is to be installed' in str2:
                        err_deps[err_deps_key].add(str1 + str2)
                else:
                    err_deps_key = ''
            elif err_start in res_str:
                err_deps_flag = True

        return err_deps

    def check_puppet_logs_centos(self):
        """ Check puppet-agent.log files on all nodes for package
            dependency errors during a cluster deployment (centos)"""

        cmd = ('fgrep -h -e "Error: Package: " -e " Requires: " /var/log/'
               'remote/node-*/puppet*.log')
        result = self.ssh_manager.execute(
            ip=self.ip,
            cmd=cmd
        )['stdout']

        err_deps = {}
        err_deps_key = ''

        # Forming a dictionary of package names
        # with sets of required packages.
        for res_str in result:
            if 'Error: Package:' in res_str:
                err_deps_key = res_str.partition('Error: Package: ')[2]
                if err_deps_key not in err_deps:
                    err_deps[err_deps_key] = set()
            elif ' Requires: ' in res_str and err_deps_key:
                _, str1, str2 = res_str.partition(' Requires: ')
                err_deps[err_deps_key].add(str1 + str2)
            else:
                err_deps_key = ''

        return err_deps
Beispiel #40
0
class EnvironmentModel(six.with_metaclass(SingletonMeta, object)):
    """EnvironmentModel."""  # TODO documentation

    def __init__(self, config=None):
        if not hasattr(self, "_virt_env"):
            self._virt_env = None
        if not hasattr(self, "_fuel_web"):
            self._fuel_web = None
        self._config = config
        self.ssh_manager = SSHManager()
        self.ssh_manager.initialize(
            self.get_admin_node_ip(),
            admin_login=settings.SSH_FUEL_CREDENTIALS['login'],
            admin_password=settings.SSH_FUEL_CREDENTIALS['password'],
            slave_login=settings.SSH_SLAVE_CREDENTIALS['login'],
            slave_password=settings.SSH_SLAVE_CREDENTIALS['password']
        )
        self.admin_actions = AdminActions()
        self.base_actions = BaseActions()
        self.cobbler_actions = CobblerActions()
        self.nailgun_actions = NailgunActions()
        self.postgres_actions = PostgresActions()
        self.fuel_bootstrap_actions = FuelBootstrapCliActions()

    @property
    def fuel_web(self):
        if self._fuel_web is None:
            self._fuel_web = FuelWebClient(self)
        return self._fuel_web

    def __repr__(self):
        klass, obj_id = type(self), hex(id(self))
        if getattr(self, '_fuel_web'):
            ip = self.fuel_web.admin_node_ip
        else:
            ip = None
        return "[{klass}({obj_id}), ip:{ip}]".format(klass=klass,
                                                     obj_id=obj_id,
                                                     ip=ip)

    @property
    def admin_node_ip(self):
        return self.fuel_web.admin_node_ip

    @property
    def collector(self):
        return CollectorClient(settings.ANALYTICS_IP, 'api/v1/json')

    @logwrap
    def add_syslog_server(self, cluster_id, port=5514):
        self.fuel_web.add_syslog_server(
            cluster_id, self.d_env.router(), port)

    def bootstrap_nodes(self, devops_nodes, timeout=settings.BOOTSTRAP_TIMEOUT,
                        skip_timesync=False):
        """Lists registered nailgun nodes
        Start vms and wait until they are registered on nailgun.
        :rtype : List of registered nailgun nodes
        """
        # self.dhcrelay_check()

        for node in devops_nodes:
            logger.info("Bootstrapping node: {}".format(node.name))
            node.start()
            # TODO(aglarendil): LP#1317213 temporary sleep
            # remove after better fix is applied
            time.sleep(5)

        with TimeStat("wait_for_nodes_to_start_and_register_in_nailgun"):
            wait(lambda: all(self.nailgun_nodes(devops_nodes)), 15, timeout,
                 timeout_msg='Bootstrap timeout for nodes: {}'
                             ''.format([node.name for node in devops_nodes]))

        wait_pass(
            lambda: checkers.validate_minimal_amount_nodes(
                nodes=self.nailgun_nodes(devops_nodes),
                expected_amount=len(devops_nodes)
            ),
            timeout=30)

        if not skip_timesync:
            self.sync_time()

        return self.nailgun_nodes(devops_nodes)

    def sync_time(self, nodes_names=None, skip_sync=False):
        if nodes_names is None:
            roles = ['fuel_master', 'fuel_slave']
            nodes_names = [node.name for node in self.d_env.get_nodes()
                           if node.role in roles and
                           node.driver.node_active(node)]
        logger.info("Please wait while time on nodes: {0} "
                    "will be synchronized"
                    .format(', '.join(sorted(nodes_names))))
        new_time = sync_time(self.d_env, nodes_names, skip_sync)
        for name in sorted(new_time):
            logger.info("New time on '{0}' = {1}".format(name, new_time[name]))

    @logwrap
    def get_admin_node_ip(self):
        return str(
            self.d_env.nodes().admin.get_ip_address_by_network_name('admin'))

    @logwrap
    def get_ebtables(self, cluster_id, devops_nodes):
        return Ebtables(self.get_target_devs(devops_nodes),
                        self.fuel_web.client.get_cluster_vlans(cluster_id))

    def get_keys(self, node, custom=None, build_images=None,
                 iso_connect_as='cdrom'):
        params = {
            'device_label': settings.ISO_LABEL,
            'iface': iface_alias('eth0'),
            'ip': node.get_ip_address_by_network_name('admin'),
            'mask': self.d_env.get_network(name='admin').ip.netmask,
            'gw': self.d_env.router(),
            'hostname': ''.join((settings.FUEL_MASTER_HOSTNAME,
                                 settings.DNS_SUFFIX)),
            'nat_interface': '',
            'nameserver': settings.DNS,
            'showmenu': 'yes' if settings.SHOW_FUELMENU else 'no',
            'wait_for_external_config': 'yes',
            'build_images': '1' if build_images else '0',
            'MASTER_NODE_EXTRA_PACKAGES': settings.MASTER_NODE_EXTRA_PACKAGES
        }
        # TODO(akostrikov) add tests for menu items/kernel parameters
        # TODO(akostrikov) refactor it.
        if iso_connect_as == 'usb':
            keys = (
                "<Wait>\n"  # USB boot uses boot_menu=yes for master node
                "<F12>\n"
                "2\n"
            )
        else:  # cdrom is default
            keys = (
                "<Wait>\n"
                "<Wait>\n"
                "<Wait>\n"
            )

        keys += (
            "<Esc>\n"
            "<Wait>\n"
            "vmlinuz initrd=initrd.img"
            " inst.ks=cdrom:LABEL=%(device_label)s:/ks.cfg"
            " inst.repo=cdrom:LABEL=%(device_label)s:/"
            " ip=%(ip)s::%(gw)s:%(mask)s:%(hostname)s"
            ":%(iface)s:off::: nameserver=%(nameserver)s"
            " showmenu=%(showmenu)s\n"
            " wait_for_external_config=%(wait_for_external_config)s"
            " build_images=%(build_images)s\n"
            " MASTER_NODE_EXTRA_PACKAGES='%(MASTER_NODE_EXTRA_PACKAGES)s'\n"
            " <Enter>\n"
        ) % params
        return keys

    @staticmethod
    def get_target_devs(devops_nodes):
        return [
            interface.target_dev for interface in [
                val for var in map(lambda node: node.interfaces, devops_nodes)
                for val in var]]

    @property
    def d_env(self):
        if self._virt_env is None:
            if not self._config:
                try:
                    return Environment.get(name=settings.ENV_NAME)
                except Exception:
                    self._virt_env = Environment.describe_environment(
                        boot_from=settings.ADMIN_BOOT_DEVICE)
                    self._virt_env.define()
            else:
                try:
                    return Environment.get(name=self._config[
                        'template']['devops_settings']['env_name'])
                except Exception:
                    self._virt_env = Environment.create_environment(
                        full_config=self._config)
                    self._virt_env.define()
        return self._virt_env

    def resume_environment(self):
        self.d_env.resume()
        admin = self.d_env.nodes().admin

        self.ssh_manager.clean_all_connections()

        try:
            admin.await('admin', timeout=30, by_port=8000)
        except Exception as e:
            logger.warning("From first time admin isn't reverted: "
                           "{0}".format(e))
            admin.destroy()
            logger.info('Admin node was destroyed. Wait 10 sec.')
            time.sleep(10)

            admin.start()
            logger.info('Admin node started second time.')
            self.d_env.nodes().admin.await('admin')
            self.set_admin_ssh_password()
            self.admin_actions.wait_for_fuel_ready(timeout=600)

            # set collector address in case of admin node destroy
            if settings.FUEL_STATS_ENABLED:
                self.nailgun_actions.set_collector_address(
                    settings.FUEL_STATS_HOST,
                    settings.FUEL_STATS_PORT,
                    settings.FUEL_STATS_SSL)
                # Restart statsenderd in order to apply new collector address
                self.nailgun_actions.force_fuel_stats_sending()
                self.fuel_web.client.send_fuel_stats(enabled=True)
                logger.info('Enabled sending of statistics to {0}:{1}'.format(
                    settings.FUEL_STATS_HOST, settings.FUEL_STATS_PORT
                ))
        self.set_admin_ssh_password()
        self.admin_actions.wait_for_fuel_ready()

    def make_snapshot(self, snapshot_name, description="", is_make=False):
        if settings.MAKE_SNAPSHOT or is_make:
            self.d_env.suspend()
            time.sleep(10)

            self.d_env.snapshot(snapshot_name, force=True,
                                description=description)
            revert_info(snapshot_name, self.get_admin_node_ip(), description)

        if settings.FUEL_STATS_CHECK:
            self.resume_environment()

    def nailgun_nodes(self, devops_nodes):
        return [self.fuel_web.get_nailgun_node_by_devops_node(node)
                for node in devops_nodes]

    def check_slaves_are_ready(self):
        devops_nodes = [node for node in self.d_env.nodes().slaves
                        if node.driver.node_active(node)]
        # Bug: 1455753
        time.sleep(30)

        self.fuel_web.wait_nodes_get_online_state(devops_nodes, timeout=60 * 6)
        return True

    def revert_snapshot(self, name, skip_timesync=False,
                        skip_slaves_check=False):
        if not self.d_env.has_snapshot(name):
            return False

        logger.info('We have snapshot with such name: {:s}'.format(name))

        logger.info("Reverting the snapshot '{0}' ....".format(name))
        self.d_env.revert(name)

        logger.info("Resuming the snapshot '{0}' ....".format(name))
        self.resume_environment()

        if not skip_timesync:
            self.sync_time()
        else:
            self.sync_time(['admin'])
        try:
            with QuietLogger(upper_log_level=logging.CRITICAL):
                # TODO(astudenov): add timeout_msg
                wait_pass(
                    self.fuel_web.client.get_releases,
                    expected=(
                        exceptions.RetriableConnectionFailure,
                        exceptions.UnknownConnectionError),
                    timeout=300)
        except exceptions.Unauthorized:
            self.set_admin_keystone_password()
            self.fuel_web.get_nailgun_version()

        if not skip_slaves_check:
            # TODO(astudenov): add timeout_msg
            wait_pass(lambda: self.check_slaves_are_ready(), timeout=60 * 6)
        return True

    def set_admin_ssh_password(self):
        new_login = settings.SSH_FUEL_CREDENTIALS['login']
        new_password = settings.SSH_FUEL_CREDENTIALS['password']
        try:
            self.ssh_manager.execute_on_remote(
                ip=self.ssh_manager.admin_ip,
                cmd='date'
            )
            logger.debug('Accessing admin node using SSH: SUCCESS')
        except Exception:
            logger.debug('Accessing admin node using SSH credentials:'
                         ' FAIL, trying to change password from default')
            self.ssh_manager.initialize(
                admin_ip=self.ssh_manager.admin_ip,
                admin_login='******',
                admin_password='******',
                slave_login=settings.SSH_SLAVE_CREDENTIALS['login'],
                slave_password=settings.SSH_SLAVE_CREDENTIALS['password']
            )
            self.ssh_manager.execute_on_remote(
                ip=self.ssh_manager.admin_ip,
                cmd='echo -e "{1}\\n{1}" | passwd {0}'.format(new_login,
                                                              new_password)
            )
            self.ssh_manager.initialize(
                admin_ip=self.ssh_manager.admin_ip,
                admin_login=new_login,
                admin_password=new_password,
                slave_login=settings.SSH_SLAVE_CREDENTIALS['login'],
                slave_password=settings.SSH_SLAVE_CREDENTIALS['password']
            )
            self.ssh_manager.update_connection(
                ip=self.ssh_manager.admin_ip,
                login=new_login,
                password=new_password
            )
            logger.debug("Admin node password has changed.")
        logger.info("Admin node login name: '{0}' , password: '******'".
                    format(new_login, new_password))

    def set_admin_keystone_password(self):
        try:
            self.fuel_web.client.get_releases()
        # TODO(akostrikov) CENTOS7 except exceptions.Unauthorized:
        except:
            self.ssh_manager.execute_on_remote(
                ip=self.ssh_manager.admin_ip,
                cmd='fuel user --newpass {0} --change-password'.format(
                    settings.KEYSTONE_CREDS['password'])
            )
            config_file = self.ssh_manager.execute_on_remote(
                ip=self.ssh_manager.admin_ip,
                cmd='ls -1 $HOME/.config/fuel/fuel_client.yaml')['stdout_str']

            with YamlEditor(config_file, ip=self.admin_node_ip) as editor:
                editor.content["OS_USERNAME"] = \
                    settings.KEYSTONE_CREDS['username']
                editor.content["OS_PASSWORD"] = \
                    settings.KEYSTONE_CREDS['password']

            with YamlEditor(settings.FUEL_SETTINGS_YAML,
                            ip=self.admin_node_ip) as editor:
                editor.content["FUEL_ACCESS"]['user'] = \
                    settings.KEYSTONE_CREDS['username']
                editor.content["FUEL_ACCESS"]['password'] = \
                    settings.KEYSTONE_CREDS['password']

            logger.info(
                'New Fuel UI (keystone) username: "******", password: "******"'
                .format(settings.KEYSTONE_CREDS['username'],
                        settings.KEYSTONE_CREDS['password']))

    def setup_environment(self, custom=settings.CUSTOM_ENV,
                          build_images=settings.BUILD_IMAGES,
                          iso_connect_as=settings.ADMIN_BOOT_DEVICE,
                          security=settings.SECURITY_TEST):
        # Create environment and start the Fuel master node
        admin = self.d_env.nodes().admin
        self.d_env.start([admin])

        def provision_admin(admin_node):
            logger.info("Waiting for admin node to start up")
            wait(lambda: admin.driver.node_active(admin_node), 60,
                 timeout_msg='Admin node startup timeout')
            logger.info("Proceed with installation")
            # update network parameters at boot screen
            admin_node.send_keys(self.get_keys(
                admin_node,
                custom=custom,
                build_images=build_images,
                iso_connect_as=iso_connect_as))
            if settings.SHOW_FUELMENU:
                self.wait_for_fuelmenu()
            else:
                self.wait_for_provisioning()

        try:
            provision_admin(admin)
        except Exception as e:
            logger.info('Master node restart: LP1587411')
            logger.info('Exception is: {e}'.format(e=e))
            admin.reset()
            provision_admin(admin)

        self.set_admin_ssh_password()

        self.wait_for_external_config()
        if custom:
            self.setup_customisation()
        if security:
            nessus_node = NessusActions(self.d_env)
            nessus_node.add_nessus_node()
        # wait while installation complete

        self.admin_actions.modify_configs(self.d_env.router())
        if CUSTOM_FUEL_SETTING_YAML:
            self.admin_actions.update_fuel_setting_yaml(
                CUSTOM_FUEL_SETTING_YAML)
        self.kill_wait_for_external_config()
        self.wait_bootstrap()
        self.admin_actions.wait_for_fuel_ready()

    @logwrap
    def enable_force_https(self, admin_node_ip):
        cmd = """
        echo -e '"SSL":\n  "force_https": "true"' >> /etc/fuel/astute.yaml
        """
        self.ssh_manager.execute_on_remote(admin_node_ip, cmd)
        cmd = "find / -name \"nginx_services.pp\""
        puppet_manifest = \
            self.ssh_manager.execute_on_remote(
                admin_node_ip, cmd)['stdout'][0].strip()
        cmd = 'puppet apply {0}'.format(puppet_manifest)
        self.ssh_manager.execute_on_remote(admin_node_ip, cmd)
        cmd = """
        systemctl status nginx.service |
        awk 'match($0, /\s+Active:.*\((\w+)\)/, a) {print a[1]}'
        """
        wait(lambda: (
             self.ssh_manager.execute_on_remote(
                 admin_node_ip, cmd)['stdout'][0] != 'dead'), interval=10,
             timeout=30,
             timeout_msg='Nginx service is dead after trying to enable '
                         'it with the command: {}'.format(cmd))

    # pylint: disable=no-self-use
    @update_rpm_packages
    @upload_manifests
    def setup_customisation(self):
        logger.info('Installing custom packages/manifests '
                    'before master node bootstrap...')
    # pylint: enable=no-self-use

    @logwrap
    def wait_for_provisioning(self,
                              timeout=settings.WAIT_FOR_PROVISIONING_TIMEOUT):
        # TODO(astudenov): add timeout_msg
        wait_pass(lambda: tcp_ping_(
            self.d_env.nodes(
            ).admin.get_ip_address_by_network_name
            ('admin'), 22), timeout=timeout)

    @logwrap
    def wait_for_fuelmenu(self,
                          timeout=settings.WAIT_FOR_PROVISIONING_TIMEOUT):

        def check_ssh_connection():
            """Try to close fuelmenu and check ssh connection"""
            try:
                tcp_ping_(
                    self.d_env.nodes().admin.get_ip_address_by_network_name(
                        'admin'),
                    22)
            except Exception:
                #  send F8 trying to exit fuelmenu
                self.d_env.nodes().admin.send_keys("<F8>\n")
                return False
            return True

        wait(check_ssh_connection, interval=30, timeout=timeout,
             timeout_msg="Fuelmenu hasn't appeared during allocated timeout")

    @logwrap
    def wait_for_external_config(self, timeout=120):

        wait(lambda: self.ssh_manager.exists_on_remote(
            self.ssh_manager.admin_ip,
            '/var/lock/wait_for_external_config'),
            timeout=600,
            timeout_msg='wait_for_external_config lock file timeout '
                        'while bootstrapping the Fuel master node')

        check_cmd = 'pkill -0 -f wait_for_external_config'

        wait(
            lambda: self.ssh_manager.execute(
                ip=self.ssh_manager.admin_ip,
                cmd=check_cmd)['exit_code'] == 0,
            timeout=timeout,
            timeout_msg='wait_for_external_config process timeout '
                        'while bootstrapping the Fuel master node')

    @logwrap
    def kill_wait_for_external_config(self):
        kill_cmd = 'pkill -f "^wait_for_external_config"'
        check_cmd = 'pkill -0 -f "^wait_for_external_config"; [[ $? -eq 1 ]]'
        self.ssh_manager.execute_on_remote(
            ip=self.ssh_manager.admin_ip,
            cmd=kill_cmd
        )
        self.ssh_manager.execute_on_remote(
            ip=self.ssh_manager.admin_ip,
            cmd=check_cmd
        )

    def wait_bootstrap(self):
        logger.info("Waiting while bootstrapping is in progress")
        log_path = "/var/log/puppet/bootstrap_admin_node.log"
        logger.info("Running bootstrap (timeout: {0})".format(
            float(settings.ADMIN_NODE_BOOTSTRAP_TIMEOUT)))
        with TimeStat("admin_node_bootsrap_time", is_uniq=True):
            wait(
                lambda: self.ssh_manager.execute(
                    ip=self.ssh_manager.admin_ip,
                    cmd="grep 'Fuel node deployment' '{:s}'".format(log_path)
                )['exit_code'] == 0,
                timeout=(float(settings.ADMIN_NODE_BOOTSTRAP_TIMEOUT)),
                timeout_msg='Fuel master node bootstrap timeout, '
                            'please check the log {}'.format(log_path)
            )
        result = self.ssh_manager.execute(
            ip=self.ssh_manager.admin_ip,
            cmd="grep 'Fuel node deployment "
            "complete' '{:s}'".format(log_path))['exit_code']
        if result != 0:
            raise Exception('Fuel node deployment failed.')
        self.bootstrap_image_check()

    def dhcrelay_check(self):
        # CentOS 7 is pretty stable with admin iface.
        # TODO(akostrikov) refactor it.
        iface = iface_alias('eth0')
        command = "dhcpcheck discover " \
                  "--ifaces {iface} " \
                  "--repeat 3 " \
                  "--timeout 10".format(iface=iface)

        out = self.ssh_manager.execute(
            ip=self.ssh_manager.admin_ip,
            cmd=command
        )['stdout']

        assert_true(self.get_admin_node_ip() in "".join(out),
                    "dhcpcheck doesn't discover master ip")

    def bootstrap_image_check(self):
        fuel_settings = self.admin_actions.get_fuel_settings()
        if fuel_settings['BOOTSTRAP']['flavor'].lower() != 'ubuntu':
            logger.warning('Default image for bootstrap '
                           'is not based on Ubuntu!')
            return

        bootstrap_images = self.ssh_manager.execute_on_remote(
            ip=self.ssh_manager.admin_ip,
            cmd='fuel-bootstrap --quiet list'
        )['stdout']
        assert_true(any('active' in line for line in bootstrap_images),
                    'Ubuntu bootstrap image wasn\'t built and activated! '
                    'See logs in /var/log/fuel-bootstrap-image-build.log '
                    'for details.')

    def admin_install_pkg(self, pkg_name):
        """Install a package <pkg_name> on the admin node"""
        remote_status = self.ssh_manager.execute(
            ip=self.ssh_manager.admin_ip,
            cmd="rpm -q {0}'".format(pkg_name)
        )
        if remote_status['exit_code'] == 0:
            logger.info("Package '{0}' already installed.".format(pkg_name))
        else:
            logger.info("Installing package '{0}' ...".format(pkg_name))
            remote_status = self.ssh_manager.execute(
                ip=self.ssh_manager.admin_ip,
                cmd="yum -y install {0}".format(pkg_name)
            )
            logger.info("Installation of the package '{0}' has been"
                        " completed with exit code {1}"
                        .format(pkg_name, remote_status['exit_code']))
        return remote_status['exit_code']

    def admin_run_service(self, service_name):
        """Start a service <service_name> on the admin node"""

        self.ssh_manager.execute(
            ip=self.ssh_manager.admin_ip,
            cmd="service {0} start".format(service_name)
        )
        remote_status = self.ssh_manager.execute(
            ip=self.ssh_manager.admin_ip,
            cmd="service {0} status".format(service_name)
        )
        if any('running...' in status for status in remote_status['stdout']):
            logger.info("Service '{0}' is running".format(service_name))
        else:
            logger.info("Service '{0}' failed to start"
                        " with exit code {1} :\n{2}"
                        .format(service_name,
                                remote_status['exit_code'],
                                remote_status['stdout']))

    def admin_install_updates(self):
        """Update packages using yum and install updates via
        update-master-node.sh tool"""
        logger.info('Searching for updates..')
        update_command = 'yum clean expire-cache && ' \
                         'yum update -y 2>>/var/log/yum-update-error.log'

        logger.info('Performing yum clean and update commands')
        update_result = self.ssh_manager.execute_on_remote(
            ip=self.ssh_manager.admin_ip,
            cmd=update_command,
            err_msg='Packages update failed, inspect logs for details')

        logger.info('Packages were updated successfully')

        # Check if any packets were updated and update was successful
        match_updated_count = re.search(r'Upgrade\s+(\d+)\s+Package',
                                        update_result['stdout_str'])
        # In case of package replacement, the new one is marked as
        # installed and the old one as removed
        match_installed_count = re.search(r'Install\s+(\d+)\s+Package',
                                          update_result['stdout_str'])
        match_complete_message = re.search(r'Complete!',
                                           update_result['stdout_str'])

        match_no_updates = re.search("No Packages marked for Update",
                                     update_result['stdout_str'])

        if match_no_updates or not match_complete_message \
                or not (match_updated_count or match_installed_count):
            logger.warning('No updates were found or update was incomplete.')
            return

        updates_count = 0

        if match_updated_count:
            updates_count += int(match_updated_count.group(1))

        if match_installed_count:
            updates_count += int(match_installed_count.group(1))

        logger.info('{0} package(s) were updated'.format(updates_count))

        logger.info('Applying updates via update-master-node.sh')
        cmd = '/usr/share/fuel-utils/update-master-node.sh'

        self.ssh_manager.execute_on_remote(
            ip=self.ssh_manager.admin_ip,
            cmd=cmd,
            err_msg='Update failed, inspect logs for details',
        )
        logger.info('Update successful')

    # Modifies a resolv.conf on the Fuel master node and returns
    # its original content.
    # * adds 'nameservers' at start of resolv.conf if merge=True
    # * replaces resolv.conf with 'nameservers' if merge=False
    def modify_resolv_conf(self, nameservers=None, merge=True):
        if nameservers is None:
            nameservers = []

        with self.ssh_manager.open_on_remote(
                ip=self.ssh_manager.admin_ip,
                path='/etc/resolv.conf',
        ) as f:
            resolv_conf = f.readlines()

        if merge:
            nameservers.extend(resolv_conf)
        resolv_keys = ['search', 'domain', 'nameserver']
        resolv_new = "".join(
            '{0}\n'.format(ns) for ns in nameservers
            if any(x in ns for x in resolv_keys))
        with self.ssh_manager.open_on_remote(
                ip=self.ssh_manager.admin_ip,
                path='/etc/resolv.conf',
                mode='w'
        ) as f:
            f.write(resolv_new)

        return resolv_conf

    @logwrap
    def describe_other_admin_interfaces(self, admin):
        admin_networks = [iface.network.name for iface in admin.interfaces]
        iface_name = None
        for i, network_name in enumerate(admin_networks):
            if 'admin' in network_name and 'admin' != network_name:
                # This will be replaced with actual interface labels
                # form fuel-devops
                iface_name = 'enp0s' + str(i + 3)
                logger.info("Describe Fuel admin node interface {0} for "
                            "network {1}".format(iface_name, network_name))
                self.describe_admin_interface(iface_name, network_name)

        if iface_name:
            return self.ssh_manager.execute(
                ip=self.ssh_manager.admin_ip,
                cmd="cobbler sync")

    @logwrap
    def describe_admin_interface(self, admin_if, network_name):
        admin_net_object = self.d_env.get_network(name=network_name)
        admin_network = admin_net_object.ip.network
        admin_netmask = admin_net_object.ip.netmask
        admin_ip = str(self.d_env.nodes(
        ).admin.get_ip_address_by_network_name(network_name))
        logger.info(('Parameters for admin interface configuration: '
                     'Network - {0}, Netmask - {1}, Interface - {2}, '
                     'IP Address - {3}').format(admin_network,
                                                admin_netmask,
                                                admin_if,
                                                admin_ip))
        add_admin_ip = ('DEVICE={0}\\n'
                        'ONBOOT=yes\\n'
                        'NM_CONTROLLED=no\\n'
                        'USERCTL=no\\n'
                        'PEERDNS=no\\n'
                        'BOOTPROTO=static\\n'
                        'IPADDR={1}\\n'
                        'NETMASK={2}\\n').format(admin_if,
                                                 admin_ip,
                                                 admin_netmask)
        cmd = ('echo -e "{0}" > /etc/sysconfig/network-scripts/ifcfg-{1};'
               'ifup {1}; ip -o -4 a s {1} | grep -w {2}').format(
            add_admin_ip, admin_if, admin_ip)
        logger.debug('Trying to assign {0} IP to the {1} on master node...'.
                     format(admin_ip, admin_if))

        result = self.ssh_manager.execute(
            ip=self.ssh_manager.admin_ip,
            cmd=cmd
        )
        assert_equal(result['exit_code'], 0, ('Failed to assign second admin '
                     'IP address on master node: {0}').format(result))
        logger.debug('Done: {0}'.format(result['stdout']))

        # TODO for ssh manager
        multiple_networks_hacks.configure_second_admin_dhcp(
            self.ssh_manager.admin_ip,
            admin_if
        )
        multiple_networks_hacks.configure_second_admin_firewall(
            self.ssh_manager.admin_ip,
            admin_network,
            admin_netmask,
            admin_if,
            self.get_admin_node_ip()
        )

    @logwrap
    def get_masternode_uuid(self):
        return self.postgres_actions.run_query(
            db='nailgun',
            query="select master_node_uid from master_node_settings limit 1;")
Beispiel #41
0
def replace_fuel_nailgun_rpm():
    """
    Replace fuel_nailgun*.rpm from review
    """
    logger.info("Patching fuel-nailgun")
    ssh = SSHManager()
    if not settings.UPDATE_FUEL:
        raise exceptions.FuelQAVariableNotSet('UPDATE_FUEL', 'True')
    pack_path = '/var/www/nailgun/fuel-nailgun/'

    full_pack_path = os.path.join(pack_path, 'fuel-nailgun*.noarch.rpm')
    logger.info('Package path {0}'.format(full_pack_path))
    ssh.upload_to_remote(ip=ssh.admin_ip,
                         source=settings.UPDATE_FUEL_PATH.rstrip('/'),
                         target=pack_path)

    # Check old fuel-nailgun package
    cmd = "rpm -q fuel-nailgun"

    old_package = ssh.execute_on_remote(ip=ssh.admin_ip, cmd=cmd)['stdout_str']
    logger.info('Current package version of '
                'fuel-nailgun: {0}'.format(old_package))

    cmd = "rpm -qp {0}".format(full_pack_path)
    new_package = ssh.execute_on_remote(ip=ssh.admin_ip, cmd=cmd)['stdout_str']
    logger.info("Updating package {0} with {1}".format(old_package,
                                                       new_package))

    if old_package == new_package:
        logger.debug('Looks like package from review '
                     'was installed during setups of master node')
        return

    # stop services
    service_list = ['assassind', 'receiverd', 'nailgun', 'statsenderd']
    for service in service_list:
        ssh.execute_on_remote(ip=ssh.admin_ip,
                              cmd='systemctl stop {0}'.format(service))
    logger.info('statistic services {0}'.format(get_oswl_services_names()))
    # stop statistic services
    for service in get_oswl_services_names():
        ssh.execute_on_remote(ip=ssh.admin_ip,
                              cmd='systemctl stop {0}'.format(service))

    # Drop nailgun db manage.py dropdb
    cmd = 'manage.py dropdb'
    ssh.execute_on_remote(ssh.admin_ip, cmd)

    # Delete package
    logger.info("Delete package {0}".format(old_package))
    cmd = "rpm -e fuel-nailgun"
    ssh.execute_on_remote(ssh.admin_ip, cmd)

    logger.info("Install package {0}".format(new_package))

    cmd = "rpm -Uvh --oldpackage {0}".format(full_pack_path)

    ssh.execute_on_remote(ssh.admin_ip, cmd)

    cmd = "rpm -q fuel-nailgun"
    installed_package = ssh.execute_on_remote(ssh.admin_ip, cmd)['stdout_str']

    assert_equal(installed_package, new_package,
                 "The new package {0} was not installed".format(new_package))

    cmd = ('puppet apply --debug '
           '/etc/puppet/modules/fuel/examples/nailgun.pp')
    ssh.execute_on_remote(ssh.admin_ip, cmd)
    cmd_sync = 'fuel release --sync-deployment-tasks --dir /etc/puppet/'
    ssh.execute_on_remote(ssh.admin_ip, cmd=cmd_sync)
Beispiel #42
0
def update_ostf():
    logger.info("Uploading new package from {0}".format(
        settings.UPDATE_FUEL_PATH))
    ssh = SSHManager()
    pack_path = '/var/www/nailgun/fuel-ostf/'
    full_pack_path = os.path.join(pack_path, 'fuel-ostf*.noarch.rpm')
    ssh.upload_to_remote(
        ssh.admin_ip,
        source=settings.UPDATE_FUEL_PATH.rstrip('/'), target=pack_path)

    # Check old fuel-ostf package
    cmd = "rpm -q fuel-ostf"

    old_package = ssh.execute_on_remote(ssh.admin_ip, cmd=cmd)['stdout_str']
    logger.info(
        'Current package version of '
        'fuel-ostf: {0}'.format(old_package))

    cmd = "rpm -qp {0}".format(full_pack_path)
    new_package = ssh.execute_on_remote(ssh.admin_ip, cmd=cmd)['stdout_str']
    logger.info('Package from review {0}'.format(new_package))

    if old_package == new_package:
        logger.info('Package {0} is installed'.format(new_package))
        return

    cmd = "service ostf stop"
    ssh.execute_on_remote(ssh.admin_ip, cmd=cmd)
    cmd = "service ostf status"
    helpers.wait(lambda: "dead" in ssh.execute_on_remote(
        ssh.admin_ip, cmd=cmd,
        raise_on_assert=False,
        assert_ec_equal=[3])['stdout_str'], timeout=60)
    logger.info("OSTF status: inactive")
    cmd = "rpm -e fuel-ostf"
    ssh.execute_on_remote(ssh.admin_ip, cmd=cmd)
    cmd = "rpm -Uvh --oldpackage {0}".format(full_pack_path)
    ssh.execute_on_remote(ssh.admin_ip, cmd=cmd)
    cmd = "rpm -q fuel-ostf"
    installed_package = ssh.execute_on_remote(
        ssh.admin_ip, cmd=cmd)['stdout_str']

    assert_equal(
        installed_package, new_package,
        "The new package {0} was not installed. Actual {1}".format(
            new_package, installed_package))
    cmd = "service ostf start"
    ssh.execute_on_remote(ssh.admin_ip, cmd=cmd)
    cmd = "service ostf status"
    helpers.wait(
        lambda: "running" in
        ssh.execute_on_remote(ssh.admin_ip, cmd=cmd)['stdout_str'],
        timeout=60)
    cmd = "curl -s -o /dev/null -w '%{http_code}' http://127.0.0.1:8777"
    helpers.wait(
        lambda: "401" in ssh.execute_on_remote(
            ssh.admin_ip, cmd=cmd, raise_on_assert=False)['stdout_str'],
        timeout=60)
    logger.info("OSTF status: RUNNING")
Beispiel #43
0
def check_package_version_injected_in_bootstraps(
        package,
        cluster_id=None,
        ironic=None):

    ssh = SSHManager()
    try:
        pack_path = '/var/www/nailgun/{}/'.format(package)
        ssh.upload_to_remote(
            ip=ssh.admin_ip,
            source=settings.UPDATE_FUEL_PATH.rstrip('/'),
            target=pack_path)
    except Exception:
        logger.exception("Could not upload package")
        raise

    # Step 1 - unpack active bootstrap
    logger.info("unpack active bootstrap")

    if ironic:
        bootstrap = "/var/www/nailgun/bootstrap/ironic/{}".format(cluster_id)
    else:
        bootstrap = "/var/www/nailgun/bootstraps/active_bootstrap"
    bootstrap_var = "/var/root.squashfs"

    cmd = "unsquashfs -d {} {}/root.squashfs".format(
        bootstrap_var, bootstrap)
    ssh.execute_on_remote(
        ip=ssh.admin_ip,
        cmd=cmd)

    # Step 2 - check package version
    logger.info(
        "check package {} version injected in ubuntu bootstrap".format(
            package))

    cmd = "ls {}|grep {} |grep deb |cut -f 2 -d '_'".format(
        pack_path, package)

    package_from_review = ssh.execute_on_remote(
        ip=ssh.admin_ip,
        cmd=cmd)['stdout_str']

    logger.info("package from review is {}".format(package_from_review))

    awk_pattern = "awk '{print $2}'"
    cmd = "chroot {}/ /bin/bash -c \"dpkg -s {}\"|grep Version|{}".format(
        bootstrap_var, package, awk_pattern)
    installed_package = ssh.execute_on_remote(
        ip=ssh.admin_ip,
        cmd=cmd)['stdout_str']
    logger.info("injected package is {}".format(installed_package))

    assert_equal(installed_package, package_from_review,
                 "The new package {0} wasn't injected in bootstrap".format(
                     package_from_review))

    # Step 3 - remove unpacked bootstrap
    cmd = "rm -rf {}".format(bootstrap_var)
    ssh.execute_on_remote(
        ip=ssh.admin_ip,
        cmd=cmd)
Beispiel #44
0
def replace_fuel_nailgun_rpm():
    """
    Replace fuel_nailgun*.rpm from review
    """
    logger.info("Patching fuel-nailgun")
    ssh = SSHManager()
    if not settings.UPDATE_FUEL:
        raise exceptions.FuelQAVariableNotSet('UPDATE_FUEL', 'True')
    pack_path = '/var/www/nailgun/fuel-nailgun/'

    full_pack_path = os.path.join(pack_path,
                                  'fuel-nailgun*.noarch.rpm')
    logger.info('Package path {0}'.format(full_pack_path))
    ssh.upload_to_remote(
        ip=ssh.admin_ip,
        source=settings.UPDATE_FUEL_PATH.rstrip('/'), target=pack_path)

    # Check old fuel-nailgun package
    cmd = "rpm -q fuel-nailgun"

    old_package = ssh.execute_on_remote(
        ip=ssh.admin_ip, cmd=cmd)['stdout_str']
    logger.info(
        'Current package version of '
        'fuel-nailgun: {0}'.format(old_package))

    cmd = "rpm -qp {0}".format(full_pack_path)
    new_package = ssh.execute_on_remote(
        ip=ssh.admin_ip, cmd=cmd)['stdout_str']
    logger.info("Updating package {0} with {1}".format(
        old_package, new_package))

    if old_package == new_package:
        logger.debug('Looks like package from review '
                     'was installed during setups of master node')
        return

    # stop services
    service_list = ['assassind', 'receiverd', 'nailgun', 'statsenderd']
    for service in service_list:
        ssh.execute_on_remote(
            ip=ssh.admin_ip, cmd='systemctl stop {0}'.format(service))
    logger.info('statistic services {0}'.format(get_oswl_services_names()))
    # stop statistic services
    for service in get_oswl_services_names():
        ssh.execute_on_remote(
            ip=ssh.admin_ip, cmd='systemctl stop {0}'.format(service))

    # Drop nailgun db manage.py dropdb
    cmd = 'manage.py dropdb'
    ssh.execute_on_remote(ssh.admin_ip, cmd)

    # Delete package
    logger.info("Delete package {0}".format(old_package))
    cmd = "rpm -e fuel-nailgun"
    ssh.execute_on_remote(ssh.admin_ip, cmd)

    logger.info("Install package {0}".format(new_package))

    cmd = "rpm -Uvh --oldpackage {0}".format(full_pack_path)

    ssh.execute_on_remote(ssh.admin_ip, cmd)

    cmd = "rpm -q fuel-nailgun"
    installed_package = ssh.execute_on_remote(ssh.admin_ip, cmd)['stdout_str']

    assert_equal(installed_package, new_package,
                 "The new package {0} was not installed".format(new_package))

    cmd = ('puppet apply --debug '
           '/etc/puppet/modules/fuel/examples/nailgun.pp')
    ssh.execute_on_remote(ssh.admin_ip, cmd)
    cmd_sync = 'fuel release --sync-deployment-tasks --dir /etc/puppet/'
    ssh.execute_on_remote(ssh.admin_ip, cmd=cmd_sync)
Beispiel #45
0
class EnvironmentModel(object):
    """EnvironmentModel."""  # TODO documentation

    def __init__(self, config=None):
        if not hasattr(self, "_virt_env"):
            self._virt_env = None
        if not hasattr(self, "_fuel_web"):
            self._fuel_web = None
        self._config = config
        self.ssh_manager = SSHManager()
        self.ssh_manager.initialize(
            self.get_admin_node_ip(),
            login=settings.SSH_CREDENTIALS['login'],
            password=settings.SSH_CREDENTIALS['password'])
        self.admin_actions = AdminActions()
        self.base_actions = BaseActions()
        self.cobbler_actions = CobblerActions()
        self.nailgun_actions = NailgunActions()
        self.postgres_actions = PostgresActions()
        self.fuel_bootstrap_actions = FuelBootstrapCliActions()

    @property
    def fuel_web(self):
        if self._fuel_web is None:
            self._fuel_web = FuelWebClient(self)
        return self._fuel_web

    def __repr__(self):
        klass, obj_id = type(self), hex(id(self))
        if getattr(self, '_fuel_web'):
            ip = self.fuel_web.admin_node_ip
        else:
            ip = None
        return "[{klass}({obj_id}), ip:{ip}]".format(klass=klass,
                                                     obj_id=obj_id,
                                                     ip=ip)

    @property
    def admin_node_ip(self):
        return self.fuel_web.admin_node_ip

    @property
    def collector(self):
        return CollectorClient(settings.ANALYTICS_IP, 'api/v1/json')

    @logwrap
    def add_syslog_server(self, cluster_id, port=5514):
        self.fuel_web.add_syslog_server(cluster_id, self.d_env.router(), port)

    def bootstrap_nodes(self,
                        devops_nodes,
                        timeout=settings.BOOTSTRAP_TIMEOUT,
                        skip_timesync=False):
        """Lists registered nailgun nodes
        Start vms and wait until they are registered on nailgun.
        :rtype : List of registered nailgun nodes
        """
        # self.dhcrelay_check()

        for node in devops_nodes:
            logger.info("Bootstrapping node: {}".format(node.name))
            node.start()
            # TODO(aglarendil): LP#1317213 temporary sleep
            # remove after better fix is applied
            time.sleep(5)

        with TimeStat("wait_for_nodes_to_start_and_register_in_nailgun"):
            wait(lambda: all(self.nailgun_nodes(devops_nodes)), 15, timeout)

        if not skip_timesync:
            self.sync_time()
        return self.nailgun_nodes(devops_nodes)

    def sync_time(self, nodes_names=None, skip_sync=False):
        if nodes_names is None:
            roles = ['fuel_master', 'fuel_slave']
            nodes_names = [
                node.name for node in self.d_env.get_nodes()
                if node.role in roles and node.driver.node_active(node)
            ]
        logger.info("Please wait while time on nodes: {0} "
                    "will be synchronized".format(', '.join(
                        sorted(nodes_names))))
        new_time = sync_time(self.d_env, nodes_names, skip_sync)
        for name in sorted(new_time):
            logger.info("New time on '{0}' = {1}".format(name, new_time[name]))

    @logwrap
    def get_admin_node_ip(self):
        return str(self.d_env.nodes().admin.get_ip_address_by_network_name(
            self.d_env.admin_net))

    @logwrap
    def get_ebtables(self, cluster_id, devops_nodes):
        return Ebtables(self.get_target_devs(devops_nodes),
                        self.fuel_web.client.get_cluster_vlans(cluster_id))

    def get_keys(self,
                 node,
                 custom=None,
                 build_images=None,
                 iso_connect_as='cdrom'):
        params = {
            'device_label':
            settings.ISO_LABEL,
            'iface':
            iface_alias('eth0'),
            'ip':
            node.get_ip_address_by_network_name(self.d_env.admin_net),
            'mask':
            self.d_env.get_network(name=self.d_env.admin_net).ip.netmask,
            'gw':
            self.d_env.router(),
            'hostname':
            ''.join((settings.FUEL_MASTER_HOSTNAME, settings.DNS_SUFFIX)),
            'nat_interface':
            self.d_env.nat_interface,
            'nameserver':
            settings.DNS,
            'showmenu':
            'yes' if settings.SHOW_FUELMENU else 'no',
            'wait_for_external_config':
            'yes',
            'build_images':
            '1' if build_images else '0'
        }
        # TODO(akostrikov) add tests for menu items/kernel parameters
        # TODO(akostrikov) refactor it.
        if iso_connect_as == 'usb':
            keys = (
                "<Wait>\n"  # USB boot uses boot_menu=yes for master node
                "<F12>\n"
                "2\n")
        else:  # cdrom is default
            keys = ("<Wait>\n" "<Wait>\n" "<Wait>\n")

        keys += ("<Esc>\n"
                 "<Wait>\n"
                 "vmlinuz initrd=initrd.img"
                 " inst.ks=cdrom:LABEL=%(device_label)s:/ks.cfg"
                 " inst.repo=cdrom:LABEL=%(device_label)s:/"
                 " ip=%(ip)s::%(gw)s:%(mask)s:%(hostname)s"
                 ":%(iface)s:off::: nameserver=%(nameserver)s"
                 " showmenu=%(showmenu)s\n"
                 " wait_for_external_config=%(wait_for_external_config)s"
                 " build_images=%(build_images)s\n"
                 " <Enter>\n") % params
        return keys

    @staticmethod
    def get_target_devs(devops_nodes):
        return [
            interface.target_dev for interface in [
                val for var in map(lambda node: node.interfaces, devops_nodes)
                for val in var
            ]
        ]

    @property
    def d_env(self):
        if self._virt_env is None:
            if not self._config:
                try:
                    return Environment.get(name=settings.ENV_NAME)
                except Exception:
                    self._virt_env = Environment.describe_environment(
                        boot_from=settings.ADMIN_BOOT_DEVICE)
                    self._virt_env.define()
            else:
                try:
                    return Environment.get(name=self._config['template']
                                           ['devops_settings']['env_name'])
                except Exception:
                    self._virt_env = Environment.create_environment(
                        full_config=self._config)
                    self._virt_env.define()
        return self._virt_env

    def resume_environment(self):
        self.d_env.resume()
        admin = self.d_env.nodes().admin

        self.ssh_manager.clean_all_connections()

        try:
            admin. await (self.d_env.admin_net, timeout=30, by_port=8000)
        except Exception as e:
            logger.warning("From first time admin isn't reverted: "
                           "{0}".format(e))
            admin.destroy()
            logger.info('Admin node was destroyed. Wait 10 sec.')
            time.sleep(10)

            admin.start()
            logger.info('Admin node started second time.')
            self.d_env.nodes().admin. await (self.d_env.admin_net)
            self.set_admin_ssh_password()
            self.admin_actions.wait_for_fuel_ready(timeout=600)

            # set collector address in case of admin node destroy
            if settings.FUEL_STATS_ENABLED:
                self.nailgun_actions.set_collector_address(
                    settings.FUEL_STATS_HOST, settings.FUEL_STATS_PORT,
                    settings.FUEL_STATS_SSL)
                # Restart statsenderd in order to apply new collector address
                self.nailgun_actions.force_fuel_stats_sending()
                self.fuel_web.client.send_fuel_stats(enabled=True)
                logger.info('Enabled sending of statistics to {0}:{1}'.format(
                    settings.FUEL_STATS_HOST, settings.FUEL_STATS_PORT))
        self.set_admin_ssh_password()
        self.admin_actions.wait_for_fuel_ready()

    def make_snapshot(self, snapshot_name, description="", is_make=False):
        if settings.MAKE_SNAPSHOT or is_make:
            self.d_env.suspend()
            time.sleep(10)

            self.d_env.snapshot(snapshot_name,
                                force=True,
                                description=description)
            revert_info(snapshot_name, self.get_admin_node_ip(), description)

        if settings.FUEL_STATS_CHECK:
            self.resume_environment()

    def nailgun_nodes(self, devops_nodes):
        return [
            self.fuel_web.get_nailgun_node_by_devops_node(node)
            for node in devops_nodes
        ]

    def check_slaves_are_ready(self):
        devops_nodes = [
            node for node in self.d_env.nodes().slaves
            if node.driver.node_active(node)
        ]
        # Bug: 1455753
        time.sleep(30)

        for node in devops_nodes:
            try:
                wait(lambda: self.fuel_web.get_nailgun_node_by_devops_node(
                    node)['online'],
                     timeout=60 * 6)
            except TimeoutError:
                raise TimeoutError("Node {0} does not become online".format(
                    node.name))
        return True

    def revert_snapshot(self,
                        name,
                        skip_timesync=False,
                        skip_slaves_check=False):
        if not self.d_env.has_snapshot(name):
            return False

        logger.info('We have snapshot with such name: {:s}'.format(name))

        logger.info("Reverting the snapshot '{0}' ....".format(name))
        self.d_env.revert(name)

        logger.info("Resuming the snapshot '{0}' ....".format(name))
        self.resume_environment()

        if not skip_timesync:
            self.sync_time()
        try:
            _wait(self.fuel_web.client.get_releases,
                  expected=EnvironmentError,
                  timeout=300)
        except exceptions.Unauthorized:
            self.set_admin_keystone_password()
            self.fuel_web.get_nailgun_version()

        if not skip_slaves_check:
            _wait(lambda: self.check_slaves_are_ready(), timeout=60 * 6)
        return True

    def set_admin_ssh_password(self):
        new_login = settings.SSH_CREDENTIALS['login']
        new_password = settings.SSH_CREDENTIALS['password']
        try:
            self.ssh_manager.execute_on_remote(ip=self.ssh_manager.admin_ip,
                                               cmd='date')
            logger.debug('Accessing admin node using SSH: SUCCESS')
        except Exception:
            logger.debug('Accessing admin node using SSH credentials:'
                         ' FAIL, trying to change password from default')
            self.ssh_manager.initialize(admin_ip=self.ssh_manager.admin_ip,
                                        login='******',
                                        password='******')
            self.ssh_manager.execute_on_remote(
                ip=self.ssh_manager.admin_ip,
                cmd='echo -e "{1}\\n{1}" | passwd {0}'.format(
                    new_login, new_password))
            self.ssh_manager.initialize(admin_ip=self.ssh_manager.admin_ip,
                                        login=new_login,
                                        password=new_password)
            self.ssh_manager.update_connection(ip=self.ssh_manager.admin_ip,
                                               login=new_login,
                                               password=new_password)
            logger.debug("Admin node password has changed.")
        logger.info("Admin node login name: '{0}' , password: '******'".format(
            new_login, new_password))

    def set_admin_keystone_password(self):
        try:
            self.fuel_web.client.get_releases()
        # TODO(akostrikov) CENTOS7 except exceptions.Unauthorized:
        except:
            self.ssh_manager.execute_on_remote(
                ip=self.ssh_manager.admin_ip,
                cmd='fuel user --newpass {0} --change-password'.format(
                    settings.KEYSTONE_CREDS['password']))
            logger.info(
                'New Fuel UI (keystone) username: "******", password: "******"'.
                format(settings.KEYSTONE_CREDS['username'],
                       settings.KEYSTONE_CREDS['password']))

    def insert_cdrom_tray(self):
        # This is very rude implementation and it SHOULD be changes after
        # implementation this feature in fuel-devops
        name = "{}_{}".format(settings.ENV_NAME, self.d_env.nodes().admin.name)
        name_size = 80
        if len(name) > name_size:
            hash_str = str(hash(name))
            name = (hash_str + name)[:name_size]

        cmd = """EDITOR="sed -i s/tray=\\'open\\'//" virsh edit {}""".format(
            name)
        subprocess.check_call(cmd, shell=True)

    def reinstall_master_node(self):
        """Erase boot sector and run setup_environment"""
        with self.d_env.get_admin_remote() as remote:
            erase_data_from_hdd(remote, mount_point='/boot')
            remote.execute("/sbin/shutdown")
        self.d_env.nodes().admin.destroy()
        self.insert_cdrom_tray()
        self.setup_environment()

    def setup_environment(self,
                          custom=settings.CUSTOM_ENV,
                          build_images=settings.BUILD_IMAGES,
                          iso_connect_as=settings.ADMIN_BOOT_DEVICE,
                          security=settings.SECURITY_TEST,
                          force_ssl=settings.FORCE_HTTPS_MASTER_NODE):
        # Create environment and start the Fuel master node
        admin = self.d_env.nodes().admin
        self.d_env.start([admin])

        logger.info("Waiting for admin node to start up")
        wait(lambda: admin.driver.node_active(admin), 60)
        logger.info("Proceed with installation")
        # update network parameters at boot screen
        admin.send_keys(
            self.get_keys(admin,
                          custom=custom,
                          build_images=build_images,
                          iso_connect_as=iso_connect_as))
        if settings.SHOW_FUELMENU:
            self.wait_for_fuelmenu()
        else:
            self.wait_for_provisioning()

        self.set_admin_ssh_password()

        self.wait_for_external_config()
        if custom:
            self.setup_customisation()
        if security:
            nessus_node = NessusActions(self.d_env)
            nessus_node.add_nessus_node()
        # wait while installation complete

        self.admin_actions.modify_configs(self.d_env.router())
        self.kill_wait_for_external_config()
        self.wait_bootstrap()

        if settings.UPDATE_FUEL:
            # Update Ubuntu packages
            self.admin_actions.upload_packages(
                local_packages_dir=settings.UPDATE_FUEL_PATH,
                centos_repo_path=None,
                ubuntu_repo_path=settings.LOCAL_MIRROR_UBUNTU)

        self.admin_actions.wait_for_fuel_ready()
        time.sleep(10)
        self.set_admin_keystone_password()
        self.sync_time(['admin'])
        if settings.UPDATE_MASTER:
            if settings.UPDATE_FUEL_MIRROR:
                for i, url in enumerate(settings.UPDATE_FUEL_MIRROR):
                    conf_file = '/etc/yum.repos.d/temporary-{}.repo'.format(i)
                    cmd = ("echo -e"
                           " '[temporary-{0}]\nname="
                           "temporary-{0}\nbaseurl={1}/"
                           "\ngpgcheck=0\npriority="
                           "1' > {2}").format(i, url, conf_file)

                    self.ssh_manager.execute(ip=self.ssh_manager.admin_ip,
                                             cmd=cmd)
            self.admin_install_updates()
        if settings.MULTIPLE_NETWORKS:
            self.describe_other_admin_interfaces(admin)
        self.nailgun_actions.set_collector_address(settings.FUEL_STATS_HOST,
                                                   settings.FUEL_STATS_PORT,
                                                   settings.FUEL_STATS_SSL)
        # Restart statsenderd to apply settings(Collector address)
        self.nailgun_actions.force_fuel_stats_sending()
        if settings.FUEL_STATS_ENABLED:
            self.fuel_web.client.send_fuel_stats(enabled=True)
            logger.info('Enabled sending of statistics to {0}:{1}'.format(
                settings.FUEL_STATS_HOST, settings.FUEL_STATS_PORT))
        if settings.PATCHING_DISABLE_UPDATES:
            cmd = "find /etc/yum.repos.d/ -type f -regextype posix-egrep" \
                  " -regex '.*/mos[0-9,\.]+\-(updates|security).repo' | " \
                  "xargs -n1 -i sed '$aenabled=0' -i {}"
            self.ssh_manager.execute_on_remote(ip=self.ssh_manager.admin_ip,
                                               cmd=cmd)
        if settings.DISABLE_OFFLOADING:
            logger.info('========================================'
                        'Applying workaround for bug #1526544'
                        '========================================')
            # Disable TSO offloading for every network interface
            # that is not virtual (loopback, bridges, etc)
            ifup_local = (
                """#!/bin/bash\n"""
                """if [[ -z "${1}" ]]; then\n"""
                """  exit\n"""
                """fi\n"""
                """devpath=$(readlink -m /sys/class/net/${1})\n"""
                """if [[ "${devpath}" == /sys/devices/virtual/* ]]; then\n"""
                """  exit\n"""
                """fi\n"""
                """ethtool -K ${1} tso off\n""")
            cmd = ("echo -e '{0}' | sudo tee /sbin/ifup-local;"
                   "sudo chmod +x /sbin/ifup-local;").format(ifup_local)
            self.ssh_manager.execute_on_remote(ip=self.ssh_manager.admin_ip,
                                               cmd=cmd)
            cmd = ('for ifname in $(ls /sys/class/net); do '
                   'sudo /sbin/ifup-local ${ifname}; done')
            self.ssh_manager.execute_on_remote(ip=self.ssh_manager.admin_ip,
                                               cmd=cmd)
            # Log interface settings
            cmd = ('for ifname in $(ls /sys/class/net); do '
                   '([[ $(readlink -e /sys/class/net/${ifname}) == '
                   '/sys/devices/virtual/* ]] '
                   '|| ethtool -k ${ifname}); done')
            result = self.ssh_manager.execute_on_remote(
                ip=self.ssh_manager.admin_ip, cmd=cmd)
            logger.debug('Offloading settings:\n{0}\n'.format(''.join(
                result['stdout'])))
            if force_ssl:
                self.enable_force_https(self.ssh_manager.admin_ip)

    @logwrap
    def enable_force_https(self, admin_node_ip):
        cmd = """
        echo -e '"SSL":\n  "force_https": "true"' >> /etc/fuel/astute.yaml
        """
        self.ssh_manager.execute_on_remote(admin_node_ip, cmd)
        cmd = "find / -name \"nginx_services.pp\""
        puppet_manifest = \
            self.ssh_manager.execute_on_remote(
                admin_node_ip, cmd)['stdout'][0].strip()
        cmd = 'puppet apply {0}'.format(puppet_manifest)
        self.ssh_manager.execute_on_remote(admin_node_ip, cmd)
        cmd = """
        systemctl status nginx.service |
        awk 'match($0, /\s+Active:.*\((\w+)\)/, a) {print a[1]}'
        """
        wait(lambda: (self.ssh_manager.execute_on_remote(admin_node_ip, cmd)[
            'stdout'][0] != 'dead'),
             interval=10,
             timeout=30)

    # pylint: disable=no-self-use
    @update_rpm_packages
    @upload_manifests
    def setup_customisation(self):
        logger.info('Installing custom packages/manifests '
                    'before master node bootstrap...')

    # pylint: enable=no-self-use

    @logwrap
    def wait_for_provisioning(self,
                              timeout=settings.WAIT_FOR_PROVISIONING_TIMEOUT):
        _wait(lambda: _tcp_ping(
            self.d_env.nodes().admin.get_ip_address_by_network_name(
                self.d_env.admin_net), 22),
              timeout=timeout)

    @logwrap
    def wait_for_fuelmenu(self,
                          timeout=settings.WAIT_FOR_PROVISIONING_TIMEOUT):
        def check_ssh_connection():
            """Try to close fuelmenu and check ssh connection"""
            try:
                _tcp_ping(
                    self.d_env.nodes().admin.get_ip_address_by_network_name(
                        self.d_env.admin_net), 22)
            except Exception:
                #  send F8 trying to exit fuelmenu
                self.d_env.nodes().admin.send_keys("<F8>\n")
                return False
            return True

        wait(check_ssh_connection,
             interval=30,
             timeout=timeout,
             timeout_msg="Fuelmenu hasn't appeared during allocated timeout")

    @logwrap
    def wait_for_external_config(self, timeout=120):
        check_cmd = 'pkill -0 -f wait_for_external_config'

        wait(lambda: self.ssh_manager.execute(ip=self.ssh_manager.admin_ip,
                                              cmd=check_cmd)['exit_code'] == 0,
             timeout=timeout)

    @logwrap
    def kill_wait_for_external_config(self):
        kill_cmd = 'pkill -f "^wait_for_external_config"'
        check_cmd = 'pkill -0 -f "^wait_for_external_config"; [[ $? -eq 1 ]]'
        self.ssh_manager.execute_on_remote(ip=self.ssh_manager.admin_ip,
                                           cmd=kill_cmd)
        self.ssh_manager.execute_on_remote(ip=self.ssh_manager.admin_ip,
                                           cmd=check_cmd)

    def wait_bootstrap(self):
        logger.info("Waiting while bootstrapping is in progress")
        log_path = "/var/log/puppet/bootstrap_admin_node.log"
        logger.info("Running bootstrap (timeout: {0})".format(
            float(settings.ADMIN_NODE_BOOTSTRAP_TIMEOUT)))
        with TimeStat("admin_node_bootsrap_time", is_uniq=True):
            wait(lambda: self.ssh_manager.execute(
                ip=self.ssh_manager.admin_ip,
                cmd="grep 'Fuel node deployment' '{:s}'".format(log_path))[
                    'exit_code'] == 0,
                 timeout=(float(settings.ADMIN_NODE_BOOTSTRAP_TIMEOUT)))
        result = self.ssh_manager.execute(
            ip=self.ssh_manager.admin_ip,
            cmd="grep 'Fuel node deployment "
            "complete' '{:s}'".format(log_path))['exit_code']
        if result != 0:
            raise Exception('Fuel node deployment failed.')
        self.bootstrap_image_check()

    def dhcrelay_check(self):
        # CentOS 7 is pretty stable with admin iface.
        # TODO(akostrikov) refactor it.
        iface = iface_alias('eth0')
        command = "dhcpcheck discover " \
                  "--ifaces {iface} " \
                  "--repeat 3 " \
                  "--timeout 10".format(iface=iface)

        out = self.ssh_manager.execute(ip=self.ssh_manager.admin_ip,
                                       cmd=command)['stdout']

        assert_true(self.get_admin_node_ip() in "".join(out),
                    "dhcpcheck doesn't discover master ip")

    def bootstrap_image_check(self):
        fuel_settings = self.admin_actions.get_fuel_settings()
        if fuel_settings['BOOTSTRAP']['flavor'].lower() != 'ubuntu':
            logger.warning('Default image for bootstrap '
                           'is not based on Ubuntu!')
            return

        bootstrap_images = self.ssh_manager.execute_on_remote(
            ip=self.ssh_manager.admin_ip,
            cmd='fuel-bootstrap --quiet list')['stdout']
        assert_true(
            any('active' in line for line in bootstrap_images),
            'Ubuntu bootstrap image wasn\'t built and activated! '
            'See logs in /var/log/fuel-bootstrap-image-build.log '
            'for details.')

    def admin_install_pkg(self, pkg_name):
        """Install a package <pkg_name> on the admin node"""
        remote_status = self.ssh_manager.execute(
            ip=self.ssh_manager.admin_ip, cmd="rpm -q {0}'".format(pkg_name))
        if remote_status['exit_code'] == 0:
            logger.info("Package '{0}' already installed.".format(pkg_name))
        else:
            logger.info("Installing package '{0}' ...".format(pkg_name))
            remote_status = self.ssh_manager.execute(
                ip=self.ssh_manager.admin_ip,
                cmd="yum -y install {0}".format(pkg_name))
            logger.info("Installation of the package '{0}' has been"
                        " completed with exit code {1}".format(
                            pkg_name, remote_status['exit_code']))
        return remote_status['exit_code']

    def admin_run_service(self, service_name):
        """Start a service <service_name> on the admin node"""

        self.ssh_manager.execute(ip=self.ssh_manager.admin_ip,
                                 cmd="service {0} start".format(service_name))
        remote_status = self.ssh_manager.execute(
            ip=self.ssh_manager.admin_ip,
            cmd="service {0} status".format(service_name))
        if any('running...' in status for status in remote_status['stdout']):
            logger.info("Service '{0}' is running".format(service_name))
        else:
            logger.info("Service '{0}' failed to start"
                        " with exit code {1} :\n{2}".format(
                            service_name, remote_status['exit_code'],
                            remote_status['stdout']))

    # Execute yum updates
    # If updates installed,
    # then `bootstrap_admin_node.sh;`
    def admin_install_updates(self):
        logger.info('Searching for updates..')
        update_command = 'yum clean expire-cache; yum update -y'

        update_result = self.ssh_manager.execute(ip=self.ssh_manager.admin_ip,
                                                 cmd=update_command)

        logger.info('Result of "{1}" command on master node: '
                    '{0}'.format(update_result, update_command))
        assert_equal(int(update_result['exit_code']), 0,
                     'Packages update failed, '
                     'inspect logs for details')

        # Check if any packets were updated and update was successful
        for str_line in update_result['stdout']:
            match_updated_count = re.search("Upgrade(?:\s*)(\d+).*Package",
                                            str_line)
            if match_updated_count:
                updates_count = match_updated_count.group(1)
            match_complete_message = re.search("(Complete!)", str_line)
            match_no_updates = re.search("No Packages marked for Update",
                                         str_line)

        if (not match_updated_count or match_no_updates)\
                and not match_complete_message:
            logger.warning('No updates were found or update was incomplete.')
            return
        logger.info('{0} packet(s) were updated'.format(updates_count))

        cmd = 'bootstrap_admin_node.sh;'

        result = self.ssh_manager.execute(ip=self.ssh_manager.admin_ip,
                                          cmd=cmd)
        logger.info('Result of "{1}" command on master node: '
                    '{0}'.format(result, cmd))
        assert_equal(int(result['exit_code']), 0, 'bootstrap failed, '
                     'inspect logs for details')

    # Modifies a resolv.conf on the Fuel master node and returns
    # its original content.
    # * adds 'nameservers' at start of resolv.conf if merge=True
    # * replaces resolv.conf with 'nameservers' if merge=False
    def modify_resolv_conf(self, nameservers=None, merge=True):
        if nameservers is None:
            nameservers = []

        resolv_conf = self.ssh_manager.execute(ip=self.ssh_manager.admin_ip,
                                               cmd='cat /etc/resolv.conf')
        assert_equal(
            0, resolv_conf['exit_code'],
            'Executing "{0}" on the admin node has failed with: {1}'.format(
                'cat /etc/resolv.conf', resolv_conf['stderr']))
        if merge:
            nameservers.extend(resolv_conf['stdout'])
        resolv_keys = ['search', 'domain', 'nameserver']
        resolv_new = "".join('{0}\n'.format(ns) for ns in nameservers
                             if any(x in ns for x in resolv_keys))
        logger.debug('echo "{0}" > /etc/resolv.conf'.format(resolv_new))
        echo_cmd = 'echo "{0}" > /etc/resolv.conf'.format(resolv_new)
        echo_result = self.ssh_manager.execute(ip=self.ssh_manager.admin_ip,
                                               cmd=echo_cmd)
        assert_equal(
            0, echo_result['exit_code'],
            'Executing "{0}" on the admin node has failed with: {1}'.format(
                echo_cmd, echo_result['stderr']))
        return resolv_conf['stdout']

    @staticmethod
    @logwrap
    def execute_remote_cmd(remote, cmd, exit_code=0):
        result = remote.execute(cmd)
        assert_equal(
            result['exit_code'], exit_code,
            'Failed to execute "{0}" on remote host: {1}'.format(cmd, result))
        return result['stdout']

    @logwrap
    def describe_other_admin_interfaces(self, admin):
        admin_networks = [iface.network.name for iface in admin.interfaces]
        iface_name = None
        for i, network_name in enumerate(admin_networks):
            if 'admin' in network_name and 'admin' != network_name:
                # This will be replaced with actual interface labels
                # form fuel-devops
                iface_name = 'enp0s' + str(i + 3)
                logger.info("Describe Fuel admin node interface {0} for "
                            "network {1}".format(iface_name, network_name))
                self.describe_admin_interface(iface_name, network_name)

        if iface_name:
            return self.ssh_manager.execute(ip=self.ssh_manager.admin_ip,
                                            cmd="cobbler sync")

    @logwrap
    def describe_admin_interface(self, admin_if, network_name):
        admin_net_object = self.d_env.get_network(name=network_name)
        admin_network = admin_net_object.ip.network
        admin_netmask = admin_net_object.ip.netmask
        admin_ip = str(self.d_env.nodes().admin.get_ip_address_by_network_name(
            network_name))
        logger.info(('Parameters for admin interface configuration: '
                     'Network - {0}, Netmask - {1}, Interface - {2}, '
                     'IP Address - {3}').format(admin_network, admin_netmask,
                                                admin_if, admin_ip))
        add_admin_ip = ('DEVICE={0}\\n'
                        'ONBOOT=yes\\n'
                        'NM_CONTROLLED=no\\n'
                        'USERCTL=no\\n'
                        'PEERDNS=no\\n'
                        'BOOTPROTO=static\\n'
                        'IPADDR={1}\\n'
                        'NETMASK={2}\\n').format(admin_if, admin_ip,
                                                 admin_netmask)
        cmd = ('echo -e "{0}" > /etc/sysconfig/network-scripts/ifcfg-{1};'
               'ifup {1}; ip -o -4 a s {1} | grep -w {2}').format(
                   add_admin_ip, admin_if, admin_ip)
        logger.debug(
            'Trying to assign {0} IP to the {1} on master node...'.format(
                admin_ip, admin_if))

        result = self.ssh_manager.execute(ip=self.ssh_manager.admin_ip,
                                          cmd=cmd)
        assert_equal(result['exit_code'], 0,
                     ('Failed to assign second admin '
                      'IP address on master node: {0}').format(result))
        logger.debug('Done: {0}'.format(result['stdout']))

        # TODO for ssh manager
        multiple_networks_hacks.configure_second_admin_dhcp(
            self.ssh_manager.admin_ip, admin_if)
        multiple_networks_hacks.configure_second_admin_firewall(
            self.ssh_manager.admin_ip, admin_network, admin_netmask, admin_if,
            self.get_admin_node_ip())

    @logwrap
    def get_masternode_uuid(self):
        return self.postgres_actions.run_query(
            db='nailgun',
            query="select master_node_uid from master_node_settings limit 1;")
Beispiel #46
0
class EnvironmentModel(object):
    """EnvironmentModel."""  # TODO documentation

    _instance = None

    def __new__(cls, *args, **kwargs):
        if not cls._instance:
            cls._instance = super(EnvironmentModel, cls).__new__(
                cls, *args, **kwargs)
        return cls._instance

    def __init__(self, config=None):
        if not hasattr(self, "_virt_env"):
            self._virt_env = None
        if not hasattr(self, "_fuel_web"):
            self._fuel_web = None
        self._config = config
        self.ssh_manager = SSHManager()
        self.ssh_manager.initialize(
            self.get_admin_node_ip(),
            login=settings.SSH_CREDENTIALS['login'],
            password=settings.SSH_CREDENTIALS['password']
        )
        self.admin_actions = AdminActions()
        self.base_actions = BaseActions()
        self.cobbler_actions = CobblerActions()
        self.docker_actions = DockerActions()
        self.nailgun_actions = NailgunActions()
        self.postgres_actions = PostgresActions()
        self.fuel_bootstrap_actions = FuelBootstrapCliActions()

    @property
    def fuel_web(self):
        if self._fuel_web is None:
            self._fuel_web = FuelWebClient(self)
        return self._fuel_web

    def __repr__(self):
        klass, obj_id = type(self), hex(id(self))
        if getattr(self, '_fuel_web'):
            ip = self.fuel_web.admin_node_ip
        else:
            ip = None
        return "[{klass}({obj_id}), ip:{ip}]".format(klass=klass,
                                                     obj_id=obj_id,
                                                     ip=ip)

    @property
    def admin_node_ip(self):
        return self.fuel_web.admin_node_ip

    @property
    def collector(self):
        return CollectorClient(settings.ANALYTICS_IP, 'api/v1/json')

    @logwrap
    def add_syslog_server(self, cluster_id, port=5514):
        self.fuel_web.add_syslog_server(
            cluster_id, self.d_env.router(), port)

    def bootstrap_nodes(self, devops_nodes, timeout=900, skip_timesync=False):
        """Lists registered nailgun nodes
        Start vms and wait until they are registered on nailgun.
        :rtype : List of registered nailgun nodes
        """
        # self.dhcrelay_check()

        for node in devops_nodes:
            logger.info("Bootstrapping node: {}".format(node.name))
            node.start()
            # TODO(aglarendil): LP#1317213 temporary sleep
            # remove after better fix is applied
            time.sleep(5)

        if not MASTER_IS_CENTOS7:
            with TimeStat("wait_for_nodes_to_start_and_register_in_nailgun"):
                wait(
                    lambda: all(self.nailgun_nodes(devops_nodes)),
                    15,
                    timeout)
        else:
            wait(lambda: all(self.nailgun_nodes(devops_nodes)), 15, timeout)

        if not skip_timesync:
            self.sync_time([node for node in self.nailgun_nodes(devops_nodes)])

        return self.nailgun_nodes(devops_nodes)

    @logwrap
    def get_admin_node_ip(self):
        return str(
            self.d_env.nodes(
            ).admin.get_ip_address_by_network_name(
                self.d_env.admin_net))

    @logwrap
    def get_ebtables(self, cluster_id, devops_nodes):
        return Ebtables(self.get_target_devs(devops_nodes),
                        self.fuel_web.client.get_cluster_vlans(cluster_id))

    def get_keys(self, node, custom=None, build_images=None,
                 iso_connect_as='cdrom'):
        params = {
            'ks': 'hd:LABEL="Mirantis_Fuel":/ks.cfg' if iso_connect_as == 'usb'
            else 'cdrom:/ks.cfg',
            'repo': 'hd:LABEL="Mirantis_Fuel":/',  # only required for USB boot
            'ip': node.get_ip_address_by_network_name(
                self.d_env.admin_net),
            'mask': self.d_env.get_network(
                name=self.d_env.admin_net).ip.netmask,
            'gw': self.d_env.router(),
            'hostname': ''.join((settings.FUEL_MASTER_HOSTNAME,
                                 settings.DNS_SUFFIX)),
            'nat_interface': self.d_env.nat_interface,
            'dns1': settings.DNS,
            'showmenu': 'no',
            'wait_for_external_config': 'yes',
            'build_images': '1' if build_images else '0'
        }
        if iso_connect_as == 'usb':
            keys = (
                "<Wait>\n"  # USB boot uses boot_menu=yes for master node
                "<F12>\n"
                "2\n"
                "<Esc><Enter>\n"
                "<Wait>\n"
                "vmlinuz initrd=initrd.img ks=%(ks)s\n"
                " repo=%(repo)s\n"
                " ip=%(ip)s\n"
                " netmask=%(mask)s\n"
                " gw=%(gw)s\n"
                " dns1=%(dns1)s\n"
                " hostname=%(hostname)s\n"
                " dhcp_interface=%(nat_interface)s\n"
                " showmenu=%(showmenu)s\n"
                " wait_for_external_config=%(wait_for_external_config)s\n"
                " build_images=%(build_images)s\n"
                " <Enter>\n"
            ) % params
        else:  # cdrom case is default
            keys = (
                "<Wait>\n"
                "<Wait>\n"
                "<Wait>\n"
                "<Esc>\n"
                "<Wait>\n"
                "vmlinuz initrd=initrd.img ks=%(ks)s\n"
                " ip=%(ip)s\n"
                " netmask=%(mask)s\n"
                " gw=%(gw)s\n"
                " dns1=%(dns1)s\n"
                " hostname=%(hostname)s\n"
                " dhcp_interface=%(nat_interface)s\n"
                " showmenu=%(showmenu)s\n"
                " wait_for_external_config=%(wait_for_external_config)s\n"
                " build_images=%(build_images)s\n"
                " <Enter>\n"
            ) % params
        if MASTER_IS_CENTOS7:
            # CentOS 7 is pretty stable with admin iface.
            # TODO(akostrikov) add tests for menu items/kernel parameters
            # TODO(akostrikov) refactor it.
            iface = 'enp0s3'
            if iso_connect_as == 'usb':
                keys = (
                    "<Wait>\n"  # USB boot uses boot_menu=yes for master node
                    "<F12>\n"
                    "2\n"
                    "<Esc><Enter>\n"
                    "<Wait>\n"
                    "vmlinuz initrd=initrd.img ks=%(ks)s\n"
                    " repo=%(repo)s\n"
                    " ip=%(ip)s::%(gw)s:%(mask)s:%(hostname)s"
                    ":{iface}:off::: dns1=%(dns1)s"
                    " showmenu=%(showmenu)s\n"
                    " wait_for_external_config=%(wait_for_external_config)s\n"
                    " build_images=%(build_images)s\n"
                    " <Enter>\n".format(iface=iface)
                ) % params
            else:  # cdrom case is default
                keys = (
                    "<Wait>\n"
                    "<Wait>\n"
                    "<Wait>\n"
                    "<Esc>\n"
                    "<Wait>\n"
                    "vmlinuz initrd=initrd.img ks=%(ks)s\n"
                    " ip=%(ip)s::%(gw)s:%(mask)s:%(hostname)s"
                    ":{iface}:off::: dns1=%(dns1)s"
                    " showmenu=%(showmenu)s\n"
                    " wait_for_external_config=%(wait_for_external_config)s\n"
                    " build_images=%(build_images)s\n"
                    " <Enter>\n".format(iface=iface)
                ) % params
        return keys

    def get_target_devs(self, devops_nodes):
        return [
            interface.target_dev for interface in [
                val for var in map(lambda node: node.interfaces, devops_nodes)
                for val in var]]

    @property
    def d_env(self):
        if self._virt_env is None:
            if not self._config:
                try:
                    return Environment.get(name=settings.ENV_NAME)
                except Exception:
                    self._virt_env = Environment.describe_environment(
                        boot_from=settings.ADMIN_BOOT_DEVICE)
                    self._virt_env.define()
            else:
                try:
                    return Environment.get(name=self._config[
                        'template']['devops_settings']['env_name'])
                except Exception:
                    self._virt_env = Environment.create_environment(
                        full_config=self._config)
                    self._virt_env.define()
        return self._virt_env

    def resume_environment(self):
        self.d_env.resume()
        admin = self.d_env.nodes().admin

        try:
            admin.await(self.d_env.admin_net, timeout=30, by_port=8000)
        except Exception as e:
            logger.warning("From first time admin isn't reverted: "
                           "{0}".format(e))
            admin.destroy()
            logger.info('Admin node was destroyed. Wait 10 sec.')
            time.sleep(10)

            admin.start()
            logger.info('Admin node started second time.')
            self.d_env.nodes().admin.await(self.d_env.admin_net)
            self.set_admin_ssh_password()
            self.docker_actions.wait_for_ready_containers(timeout=600)

            # set collector address in case of admin node destroy
            if settings.FUEL_STATS_ENABLED:
                self.nailgun_actions.set_collector_address(
                    settings.FUEL_STATS_HOST,
                    settings.FUEL_STATS_PORT,
                    settings.FUEL_STATS_SSL)
                # Restart statsenderd in order to apply new collector address
                self.nailgun_actions.force_fuel_stats_sending()
                self.fuel_web.client.send_fuel_stats(enabled=True)
                logger.info('Enabled sending of statistics to {0}:{1}'.format(
                    settings.FUEL_STATS_HOST, settings.FUEL_STATS_PORT
                ))
        self.set_admin_ssh_password()
        self.docker_actions.wait_for_ready_containers()

    def make_snapshot(self, snapshot_name, description="", is_make=False):
        if settings.MAKE_SNAPSHOT or is_make:
            self.d_env.suspend(verbose=False)
            time.sleep(10)

            self.d_env.snapshot(snapshot_name, force=True,
                                description=description)
            revert_info(snapshot_name, self.get_admin_node_ip(), description)

        if settings.FUEL_STATS_CHECK:
            self.resume_environment()

    def nailgun_nodes(self, devops_nodes):
        return map(
            lambda node: self.fuel_web.get_nailgun_node_by_devops_node(node),
            devops_nodes
        )

    def check_slaves_are_ready(self):
        devops_nodes = [node for node in self.d_env.nodes().slaves
                        if node.driver.node_active(node)]
        # Bug: 1455753
        time.sleep(30)

        for node in devops_nodes:
            try:
                wait(lambda:
                     self.fuel_web.get_nailgun_node_by_devops_node(
                         node)['online'], timeout=60 * 6)
            except TimeoutError:
                    raise TimeoutError(
                        "Node {0} does not become online".format(node.name))
        return True

    def revert_snapshot(self, name, skip_timesync=False):
        if not self.d_env.has_snapshot(name):
            return False

        logger.info('We have snapshot with such name: %s' % name)

        logger.info("Reverting the snapshot '{0}' ....".format(name))
        self.d_env.revert(name)

        logger.info("Resuming the snapshot '{0}' ....".format(name))
        self.resume_environment()

        if not skip_timesync:
            nailgun_nodes = [self.fuel_web.get_nailgun_node_by_name(node.name)
                             for node in self.d_env.nodes().slaves
                             if node.driver.node_active(node)]
            self.sync_time(nailgun_nodes)

        try:
            _wait(self.fuel_web.client.get_releases,
                  expected=EnvironmentError, timeout=300)
        except exceptions.Unauthorized:
            self.set_admin_keystone_password()
            self.fuel_web.get_nailgun_version()

        _wait(lambda: self.check_slaves_are_ready(), timeout=60 * 6)
        return True

    def set_admin_ssh_password(self):
        new_login = settings.SSH_CREDENTIALS['login']
        new_password = settings.SSH_CREDENTIALS['password']
        try:
            self.ssh_manager.execute_on_remote(
                ip=self.ssh_manager.admin_ip,
                cmd='date'
            )
            logger.debug('Accessing admin node using SSH: SUCCESS')
        except Exception:
            logger.debug('Accessing admin node using SSH credentials:'
                         ' FAIL, trying to change password from default')
            self.ssh_manager.update_connection(
                ip=self.ssh_manager.admin_ip,
                login='******',
                password='******'
            )
            self.ssh_manager.execute_on_remote(
                ip=self.ssh_manager.admin_ip,
                cmd='echo -e "{1}\\n{1}" | passwd {0}'.format(new_login,
                                                              new_password)
            )
            self.ssh_manager.update_connection(
                ip=self.ssh_manager.admin_ip,
                login=new_login,
                password=new_password
            )
            logger.debug("Admin node password has changed.")
        logger.info("Admin node login name: '{0}' , password: '******'".
                    format(new_login, new_password))

    def set_admin_keystone_password(self):
        try:
            self.fuel_web.client.get_releases()
        # TODO(akostrikov) CENTOS7 except exceptions.Unauthorized:
        except:
            self.ssh_manager.execute_on_remote(
                ip=self.ssh_manager.admin_ip,
                cmd='fuel user --newpass {0} --change-password'.format(
                    settings.KEYSTONE_CREDS['password'])
            )
            logger.info(
                'New Fuel UI (keystone) username: "******", password: "******"'
                .format(settings.KEYSTONE_CREDS['username'],
                        settings.KEYSTONE_CREDS['password']))

    def setup_environment(self, custom=settings.CUSTOM_ENV,
                          build_images=settings.BUILD_IMAGES,
                          iso_connect_as=settings.ADMIN_BOOT_DEVICE,
                          security=settings.SECURITY_TEST):
        # start admin node
        admin = self.d_env.nodes().admin
        if iso_connect_as == 'usb':
            admin.disk_devices.get(device='disk',
                                   bus='usb').volume.upload(settings.ISO_PATH)
        else:  # cdrom is default
            admin.disk_devices.get(
                device='cdrom').volume.upload(settings.ISO_PATH)
        self.d_env.start(self.d_env.nodes().admins)
        logger.info("Waiting for admin node to start up")
        wait(lambda: admin.driver.node_active(admin), 60)
        logger.info("Proceed with installation")
        # update network parameters at boot screen
        admin.send_keys(self.get_keys(admin, custom=custom,
                                      build_images=build_images,
                                      iso_connect_as=iso_connect_as))
        self.wait_for_provisioning()
        self.set_admin_ssh_password()
        self.wait_for_external_config()
        if custom:
            self.setup_customisation()
        if security:
            nessus_node = NessusActions(self.d_env)
            nessus_node.add_nessus_node()
        # wait while installation complete

        self.admin_actions.modify_configs(self.d_env.router())
        self.kill_wait_for_external_config()
        self.wait_bootstrap()

        if settings.UPDATE_FUEL:
            # Update Ubuntu packages
            self.admin_actions.upload_packages(
                local_packages_dir=settings.UPDATE_FUEL_PATH,
                centos_repo_path=None,
                ubuntu_repo_path=settings.LOCAL_MIRROR_UBUNTU)

        self.docker_actions.wait_for_ready_containers()
        time.sleep(10)
        self.set_admin_keystone_password()
        if not MASTER_IS_CENTOS7:
            self.sync_time()
        if settings.UPDATE_MASTER:
            if settings.UPDATE_FUEL_MIRROR:
                for i, url in enumerate(settings.UPDATE_FUEL_MIRROR):
                    conf_file = '/etc/yum.repos.d/temporary-{}.repo'.format(i)
                    cmd = ("echo -e"
                           " '[temporary-{0}]\nname="
                           "temporary-{0}\nbaseurl={1}/"
                           "\ngpgcheck=0\npriority="
                           "1' > {2}").format(i, url, conf_file)

                    self.ssh_manager.execute(
                        ip=self.ssh_manager.admin_ip,
                        cmd=cmd
                    )
            self.admin_install_updates()
        if settings.MULTIPLE_NETWORKS:
            self.describe_second_admin_interface()
        if not MASTER_IS_CENTOS7:
            self.nailgun_actions.set_collector_address(
                settings.FUEL_STATS_HOST,
                settings.FUEL_STATS_PORT,
                settings.FUEL_STATS_SSL)
            # Restart statsenderd to apply settings(Collector address)
            self.nailgun_actions.force_fuel_stats_sending()
        if settings.FUEL_STATS_ENABLED and not MASTER_IS_CENTOS7:
            self.fuel_web.client.send_fuel_stats(enabled=True)
            logger.info('Enabled sending of statistics to {0}:{1}'.format(
                settings.FUEL_STATS_HOST, settings.FUEL_STATS_PORT
            ))
        if settings.PATCHING_DISABLE_UPDATES:
            cmd = "find /etc/yum.repos.d/ -type f -regextype posix-egrep" \
                  " -regex '.*/mos[0-9,\.]+\-(updates|security).repo' | " \
                  "xargs -n1 -i sed '$aenabled=0' -i {}"
            self.ssh_manager.execute_on_remote(
                ip=self.ssh_manager.admin_ip,
                cmd=cmd
            )

    @update_rpm_packages
    @upload_manifests
    def setup_customisation(self):
        logger.info('Installing custom packages/manifests '
                    'before master node bootstrap...')

    @logwrap
    def wait_for_provisioning(self,
                              timeout=settings.WAIT_FOR_PROVISIONING_TIMEOUT):
        _wait(lambda: _tcp_ping(
            self.d_env.nodes(
            ).admin.get_ip_address_by_network_name
            (self.d_env.admin_net), 22), timeout=timeout)

    @logwrap
    def wait_for_external_config(self, timeout=120):
        check_cmd = 'pkill -0 -f wait_for_external_config'

        if MASTER_IS_CENTOS7:
            self.ssh_manager.execute(
                ip=self.ssh_manager.admin_ip,
                cmd=check_cmd
            )
        else:
            wait(
                lambda: self.ssh_manager.execute(
                    ip=self.ssh_manager.admin_ip,
                    cmd=check_cmd)['exit_code'] == 0, timeout=timeout)

    @logwrap
    def kill_wait_for_external_config(self):
        kill_cmd = 'pkill -f "^wait_for_external_config"'
        check_cmd = 'pkill -0 -f "^wait_for_external_config"; [[ $? -eq 1 ]]'
        self.ssh_manager.execute_on_remote(
            ip=self.ssh_manager.admin_ip,
            cmd=kill_cmd
        )
        self.ssh_manager.execute_on_remote(
            ip=self.ssh_manager.admin_ip,
            cmd=check_cmd
        )

    @retry(count=3, delay=60)
    def sync_time(self, nailgun_nodes=None):
        # with @retry, failure on any step of time synchronization causes
        # restart the time synchronization starting from the admin node
        if nailgun_nodes is None:
            nailgun_nodes = []
        controller_nodes = [
            n for n in nailgun_nodes if "controller" in n['roles']]
        other_nodes = [
            n for n in nailgun_nodes if "controller" not in n['roles']]

        # 1. The first time source for the environment: admin node
        logger.info("Synchronizing time on Fuel admin node")
        with GroupNtpSync(self, sync_admin_node=True) as g_ntp:
            g_ntp.do_sync_time()

        # 2. Controllers should be synchronized before providing time to others
        if controller_nodes:
            logger.info("Synchronizing time on all controllers")
            with GroupNtpSync(self, nailgun_nodes=controller_nodes) as g_ntp:
                g_ntp.do_sync_time()

        # 3. Synchronize time on all the rest nodes
        if other_nodes:
            logger.info("Synchronizing time on other active nodes")
            with GroupNtpSync(self, nailgun_nodes=other_nodes) as g_ntp:
                g_ntp.do_sync_time()

    def wait_bootstrap(self):
        logger.info("Waiting while bootstrapping is in progress")
        log_path = "/var/log/puppet/bootstrap_admin_node.log"
        logger.info("Puppet timeout set in {0}".format(
            float(settings.PUPPET_TIMEOUT)))
        with self.d_env.get_admin_remote() as admin_remote:
            wait(
                lambda: not
                admin_remote.execute(
                    "grep 'Fuel node deployment' '%s'" % log_path
                )['exit_code'],
                timeout=(float(settings.PUPPET_TIMEOUT))
            )
            result = admin_remote.execute(
                "grep 'Fuel node deployment "
                "complete' '%s'" % log_path)['exit_code']
        if result != 0:
            raise Exception('Fuel node deployment failed.')
        self.bootstrap_image_check()

    def dhcrelay_check(self):
        # CentOS 7 is pretty stable with admin iface.
        # TODO(akostrikov) refactor it.
        iface = 'enp0s3'
        command = "dhcpcheck discover " \
                  "--ifaces {iface} " \
                  "--repeat 3 " \
                  "--timeout 10".format(iface=iface)

        out = self.ssh_manager.execute(
            ip=self.ssh_manager.admin_ip,
            cmd=command
        )['stdout']

        assert_true(self.get_admin_node_ip() in "".join(out),
                    "dhcpcheck doesn't discover master ip")

    def bootstrap_image_check(self):
        fuel_settings = self.admin_actions.get_fuel_settings()
        if fuel_settings['BOOTSTRAP']['flavor'].lower() != 'ubuntu':
            logger.warning('Default image for bootstrap '
                           'is not based on Ubuntu!')
            return

        bootstrap_images = self.ssh_manager.execute_on_remote(
            ip=self.ssh_manager.admin_ip,
            cmd='fuel-bootstrap --quiet list'
        )['stdout']
        assert_true(any('active' in line for line in bootstrap_images),
                    'Ubuntu bootstrap image wasn\'t built and activated! '
                    'See logs in /var/log/fuel-bootstrap-image-build.log '
                    'for details.')

    def admin_install_pkg(self, pkg_name):
        """Install a package <pkg_name> on the admin node"""
        remote_status = self.ssh_manager.execute(
            ip=self.ssh_manager.admin_ip,
            cmd="rpm -q {0}'".format(pkg_name)
        )
        if remote_status['exit_code'] == 0:
            logger.info("Package '{0}' already installed.".format(pkg_name))
        else:
            logger.info("Installing package '{0}' ...".format(pkg_name))
            remote_status = self.ssh_manager.execute(
                ip=self.ssh_manager.admin_ip,
                cmd="yum -y install {0}".format(pkg_name)
            )
            logger.info("Installation of the package '{0}' has been"
                        " completed with exit code {1}"
                        .format(pkg_name, remote_status['exit_code']))
        return remote_status['exit_code']

    def admin_run_service(self, service_name):
        """Start a service <service_name> on the admin node"""

        self.ssh_manager.execute(
            ip=self.ssh_manager.admin_ip,
            cmd="service {0} start".format(service_name)
        )
        remote_status = self.ssh_manager.execute(
            ip=self.ssh_manager.admin_ip,
            cmd="service {0} status".format(service_name)
        )
        if any('running...' in status for status in remote_status['stdout']):
            logger.info("Service '{0}' is running".format(service_name))
        else:
            logger.info("Service '{0}' failed to start"
                        " with exit code {1} :\n{2}"
                        .format(service_name,
                                remote_status['exit_code'],
                                remote_status['stdout']))

    # Execute yum updates
    # If updates installed,
    # then `dockerctl destroy all; bootstrap_admin_node.sh;`
    def admin_install_updates(self):
        logger.info('Searching for updates..')
        update_command = 'yum clean expire-cache; yum update -y'

        update_result = self.ssh_manager.execute(
            ip=self.ssh_manager.admin_ip,
            cmd=update_command
        )

        logger.info('Result of "{1}" command on master node: '
                    '{0}'.format(update_result, update_command))
        assert_equal(int(update_result['exit_code']), 0,
                     'Packages update failed, '
                     'inspect logs for details')

        # Check if any packets were updated and update was successful
        for str_line in update_result['stdout']:
            match_updated_count = re.search("Upgrade(?:\s*)(\d+).*Package",
                                            str_line)
            if match_updated_count:
                updates_count = match_updated_count.group(1)
            match_complete_message = re.search("(Complete!)", str_line)
            match_no_updates = re.search("No Packages marked for Update",
                                         str_line)

        if (not match_updated_count or match_no_updates)\
                and not match_complete_message:
            logger.warning('No updates were found or update was incomplete.')
            return
        logger.info('{0} packet(s) were updated'.format(updates_count))

        cmd = 'dockerctl destroy all; bootstrap_admin_node.sh;'

        result = self.ssh_manager.execute(
            ip=self.ssh_manager.admin_ip,
            cmd=cmd
        )
        logger.info('Result of "{1}" command on master node: '
                    '{0}'.format(result, cmd))
        assert_equal(int(result['exit_code']), 0,
                     'bootstrap failed, '
                     'inspect logs for details')

    # Modifies a resolv.conf on the Fuel master node and returns
    # its original content.
    # * adds 'nameservers' at start of resolv.conf if merge=True
    # * replaces resolv.conf with 'nameservers' if merge=False
    def modify_resolv_conf(self, nameservers=None, merge=True):
        if nameservers is None:
            nameservers = []

        resolv_conf = self.ssh_manager.execute(
            ip=self.ssh_manager.admin_ip,
            cmd='cat /etc/resolv.conf'
        )
        assert_equal(0, resolv_conf['exit_code'],
                     'Executing "{0}" on the admin node has failed with: {1}'
                     .format('cat /etc/resolv.conf', resolv_conf['stderr']))
        if merge:
            nameservers.extend(resolv_conf['stdout'])
        resolv_keys = ['search', 'domain', 'nameserver']
        resolv_new = "".join('{0}\n'.format(ns) for ns in nameservers
                             if any(x in ns for x in resolv_keys))
        logger.debug('echo "{0}" > /etc/resolv.conf'.format(resolv_new))
        echo_cmd = 'echo "{0}" > /etc/resolv.conf'.format(resolv_new)
        echo_result = self.ssh_manager.execute(
            ip=self.ssh_manager.admin_ip,
            cmd=echo_cmd
        )
        assert_equal(0, echo_result['exit_code'],
                     'Executing "{0}" on the admin node has failed with: {1}'
                     .format(echo_cmd, echo_result['stderr']))
        return resolv_conf['stdout']

    @logwrap
    def execute_remote_cmd(self, remote, cmd, exit_code=0):
        result = remote.execute(cmd)
        assert_equal(result['exit_code'], exit_code,
                     'Failed to execute "{0}" on remote host: {1}'.
                     format(cmd, result))
        return result['stdout']

    @logwrap
    def describe_second_admin_interface(self):
        admin_net2_object = self.d_env.get_network(name=self.d_env.admin_net2)
        second_admin_network = admin_net2_object.ip.network
        second_admin_netmask = admin_net2_object.ip.netmask
        second_admin_if = settings.INTERFACES.get(self.d_env.admin_net2)
        second_admin_ip = str(self.d_env.nodes(
        ).admin.get_ip_address_by_network_name(self.d_env.admin_net2))
        logger.info(('Parameters for second admin interface configuration: '
                     'Network - {0}, Netmask - {1}, Interface - {2}, '
                     'IP Address - {3}').format(second_admin_network,
                                                second_admin_netmask,
                                                second_admin_if,
                                                second_admin_ip))
        add_second_admin_ip = ('DEVICE={0}\\n'
                               'ONBOOT=yes\\n'
                               'NM_CONTROLLED=no\\n'
                               'USERCTL=no\\n'
                               'PEERDNS=no\\n'
                               'BOOTPROTO=static\\n'
                               'IPADDR={1}\\n'
                               'NETMASK={2}\\n').format(second_admin_if,
                                                        second_admin_ip,
                                                        second_admin_netmask)
        cmd = ('echo -e "{0}" > /etc/sysconfig/network-scripts/ifcfg-{1};'
               'ifup {1}; ip -o -4 a s {1} | grep -w {2}').format(
            add_second_admin_ip, second_admin_if, second_admin_ip)
        logger.debug('Trying to assign {0} IP to the {1} on master node...'.
                     format(second_admin_ip, second_admin_if))

        result = self.ssh_manager.execute(
            ip=self.ssh_manager.admin_ip,
            cmd=cmd
        )
        assert_equal(result['exit_code'], 0, ('Failed to assign second admin '
                     'IP address on master node: {0}').format(result))
        logger.debug('Done: {0}'.format(result['stdout']))

        # TODO for ssh manager
        multiple_networks_hacks.configure_second_admin_dhcp(
            self.ssh_manager.admin_ip,
            second_admin_if
        )
        multiple_networks_hacks.configure_second_admin_firewall(
            self.ssh_manager.admin_ip,
            second_admin_network,
            second_admin_netmask,
            second_admin_if,
            self.get_admin_node_ip()
        )

    @logwrap
    def get_masternode_uuid(self):
        return self.postgres_actions.run_query(
            db='nailgun',
            query="select master_node_uid from master_node_settings limit 1;")
Beispiel #47
0
class EnvironmentModel(object):
    """EnvironmentModel."""  # TODO documentation

    def __init__(self, config=None):
        if not hasattr(self, "_virt_env"):
            self._virt_env = None
        if not hasattr(self, "_fuel_web"):
            self._fuel_web = None
        self._config = config
        self.ssh_manager = SSHManager()
        self.ssh_manager.initialize(
            self.get_admin_node_ip(),
            admin_login=settings.SSH_FUEL_CREDENTIALS['login'],
            admin_password=settings.SSH_FUEL_CREDENTIALS['password'],
            slave_login=settings.SSH_SLAVE_CREDENTIALS['login'],
            slave_password=settings.SSH_SLAVE_CREDENTIALS['password']
        )
        self.admin_actions = AdminActions()
        self.base_actions = BaseActions()
        self.cobbler_actions = CobblerActions()
        self.nailgun_actions = NailgunActions()
        self.postgres_actions = PostgresActions()
        self.fuel_bootstrap_actions = FuelBootstrapCliActions()

    @property
    def fuel_web(self):
        if self._fuel_web is None:
            self._fuel_web = FuelWebClient(self)
        return self._fuel_web

    def __repr__(self):
        klass, obj_id = type(self), hex(id(self))
        if getattr(self, '_fuel_web'):
            ip = self.fuel_web.admin_node_ip
        else:
            ip = None
        return "[{klass}({obj_id}), ip:{ip}]".format(klass=klass,
                                                     obj_id=obj_id,
                                                     ip=ip)

    @property
    def admin_node_ip(self):
        return self.fuel_web.admin_node_ip

    @property
    def collector(self):
        return CollectorClient(settings.ANALYTICS_IP, 'api/v1/json')

    @logwrap
    def add_syslog_server(self, cluster_id, port=5514):
        self.fuel_web.add_syslog_server(
            cluster_id, self.d_env.router(), port)

    def bootstrap_nodes(self, devops_nodes, timeout=settings.BOOTSTRAP_TIMEOUT,
                        skip_timesync=False):
        """Lists registered nailgun nodes
        Start vms and wait until they are registered on nailgun.
        :rtype : List of registered nailgun nodes
        """
        # self.dhcrelay_check()

        for node in devops_nodes:
            logger.info("Bootstrapping node: {}".format(node.name))
            node.start()
            # TODO(aglarendil): LP#1317213 temporary sleep
            # remove after better fix is applied
            time.sleep(5)

        with TimeStat("wait_for_nodes_to_start_and_register_in_nailgun"):
            wait(lambda: all(self.nailgun_nodes(devops_nodes)), 15, timeout)

        if not skip_timesync:
            self.sync_time()
        return self.nailgun_nodes(devops_nodes)

    def sync_time(self, nodes_names=None, skip_sync=False):
        if nodes_names is None:
            roles = ['fuel_master', 'fuel_slave']
            nodes_names = [node.name for node in self.d_env.get_nodes()
                           if node.role in roles and
                           node.driver.node_active(node)]
        logger.info("Please wait while time on nodes: {0} "
                    "will be synchronized"
                    .format(', '.join(sorted(nodes_names))))
        new_time = sync_time(self.d_env, nodes_names, skip_sync)
        for name in sorted(new_time):
            logger.info("New time on '{0}' = {1}".format(name, new_time[name]))

    @logwrap
    def get_admin_node_ip(self):
        return str(
            self.d_env.nodes(
            ).admin.get_ip_address_by_network_name(
                self.d_env.admin_net))

    @logwrap
    def get_ebtables(self, cluster_id, devops_nodes):
        return Ebtables(self.get_target_devs(devops_nodes),
                        self.fuel_web.client.get_cluster_vlans(cluster_id))

    def get_keys(self, node, custom=None, build_images=None,
                 iso_connect_as='cdrom'):
        params = {
            'device_label': settings.ISO_LABEL,
            'iface': iface_alias('eth0'),
            'ip': node.get_ip_address_by_network_name(
                self.d_env.admin_net),
            'mask': self.d_env.get_network(
                name=self.d_env.admin_net).ip.netmask,
            'gw': self.d_env.router(),
            'hostname': ''.join((settings.FUEL_MASTER_HOSTNAME,
                                 settings.DNS_SUFFIX)),
            'nat_interface': self.d_env.nat_interface,
            'nameserver': settings.DNS,
            'showmenu': 'yes' if settings.SHOW_FUELMENU else 'no',
            'wait_for_external_config': 'yes',
            'build_images': '1' if build_images else '0',
            'MASTER_NODE_EXTRA_PACKAGES': settings.MASTER_NODE_EXTRA_PACKAGES
        }
        # TODO(akostrikov) add tests for menu items/kernel parameters
        # TODO(akostrikov) refactor it.
        if iso_connect_as == 'usb':
            keys = (
                "<Wait>\n"  # USB boot uses boot_menu=yes for master node
                "<F12>\n"
                "2\n"
            )
        else:  # cdrom is default
            keys = (
                "<Wait>\n"
                "<Wait>\n"
                "<Wait>\n"
            )

        keys += (
            "<Esc>\n"
            "<Wait>\n"
            "vmlinuz initrd=initrd.img"
            " inst.ks=cdrom:LABEL=%(device_label)s:/ks.cfg"
            " inst.repo=cdrom:LABEL=%(device_label)s:/"
            " ip=%(ip)s::%(gw)s:%(mask)s:%(hostname)s"
            ":%(iface)s:off::: nameserver=%(nameserver)s"
            " showmenu=%(showmenu)s\n"
            " wait_for_external_config=%(wait_for_external_config)s"
            " build_images=%(build_images)s\n"
            " MASTER_NODE_EXTRA_PACKAGES='%(MASTER_NODE_EXTRA_PACKAGES)s'\n"
            " <Enter>\n"
        ) % params
        return keys

    @staticmethod
    def get_target_devs(devops_nodes):
        return [
            interface.target_dev for interface in [
                val for var in map(lambda node: node.interfaces, devops_nodes)
                for val in var]]

    @property
    def d_env(self):
        if self._virt_env is None:
            if not self._config:
                try:
                    return Environment.get(name=settings.ENV_NAME)
                except Exception:
                    self._virt_env = Environment.describe_environment(
                        boot_from=settings.ADMIN_BOOT_DEVICE)
                    self._virt_env.define()
            else:
                try:
                    return Environment.get(name=self._config[
                        'template']['devops_settings']['env_name'])
                except Exception:
                    self._virt_env = Environment.create_environment(
                        full_config=self._config)
                    self._virt_env.define()
        return self._virt_env

    def resume_environment(self):
        self.d_env.resume()
        admin = self.d_env.nodes().admin

        self.ssh_manager.clean_all_connections()

        try:
            admin.await(self.d_env.admin_net, timeout=30, by_port=8000)
        except Exception as e:
            logger.warning("From first time admin isn't reverted: "
                           "{0}".format(e))
            admin.destroy()
            logger.info('Admin node was destroyed. Wait 10 sec.')
            time.sleep(10)

            admin.start()
            logger.info('Admin node started second time.')
            self.d_env.nodes().admin.await(self.d_env.admin_net)
            self.set_admin_ssh_password()
            self.admin_actions.wait_for_fuel_ready(timeout=600)

            # set collector address in case of admin node destroy
            if settings.FUEL_STATS_ENABLED:
                self.nailgun_actions.set_collector_address(
                    settings.FUEL_STATS_HOST,
                    settings.FUEL_STATS_PORT,
                    settings.FUEL_STATS_SSL)
                # Restart statsenderd in order to apply new collector address
                self.nailgun_actions.force_fuel_stats_sending()
                self.fuel_web.client.send_fuel_stats(enabled=True)
                logger.info('Enabled sending of statistics to {0}:{1}'.format(
                    settings.FUEL_STATS_HOST, settings.FUEL_STATS_PORT
                ))
        self.set_admin_ssh_password()
        self.admin_actions.wait_for_fuel_ready()

    def make_snapshot(self, snapshot_name, description="", is_make=False):
        if settings.MAKE_SNAPSHOT or is_make:
            self.d_env.suspend()
            time.sleep(10)

            self.d_env.snapshot(snapshot_name, force=True,
                                description=description)
            revert_info(snapshot_name, self.get_admin_node_ip(), description)

        if settings.FUEL_STATS_CHECK:
            self.resume_environment()

    def nailgun_nodes(self, devops_nodes):
        return [self.fuel_web.get_nailgun_node_by_devops_node(node)
                for node in devops_nodes]

    def check_slaves_are_ready(self):
        devops_nodes = [node for node in self.d_env.nodes().slaves
                        if node.driver.node_active(node)]
        # Bug: 1455753
        time.sleep(30)

        for node in devops_nodes:
            try:
                wait(lambda:
                     self.fuel_web.get_nailgun_node_by_devops_node(
                         node)['online'], timeout=60 * 6)
            except TimeoutError:
                raise TimeoutError(
                    "Node {0} does not become online".format(node.name))
        return True

    def revert_snapshot(self, name, skip_timesync=False,
                        skip_slaves_check=False):
        if not self.d_env.has_snapshot(name):
            return False

        logger.info('We have snapshot with such name: {:s}'.format(name))

        logger.info("Reverting the snapshot '{0}' ....".format(name))
        self.d_env.revert(name)

        logger.info("Resuming the snapshot '{0}' ....".format(name))
        self.resume_environment()

        if not skip_timesync:
            self.sync_time()
        try:
            _wait(self.fuel_web.client.get_releases,
                  expected=EnvironmentError, timeout=300)
        except exceptions.Unauthorized:
            self.set_admin_keystone_password()
            self.fuel_web.get_nailgun_version()

        if not skip_slaves_check:
            _wait(lambda: self.check_slaves_are_ready(), timeout=60 * 6)
        return True

    def set_admin_ssh_password(self):
        new_login = settings.SSH_FUEL_CREDENTIALS['login']
        new_password = settings.SSH_FUEL_CREDENTIALS['password']
        try:
            self.ssh_manager.execute_on_remote(
                ip=self.ssh_manager.admin_ip,
                cmd='date'
            )
            logger.debug('Accessing admin node using SSH: SUCCESS')
        except Exception:
            logger.debug('Accessing admin node using SSH credentials:'
                         ' FAIL, trying to change password from default')
            self.ssh_manager.initialize(
                admin_ip=self.ssh_manager.admin_ip,
                admin_login='******',
                admin_password='******',
                slave_login=settings.SSH_SLAVE_CREDENTIALS['login'],
                slave_password=settings.SSH_SLAVE_CREDENTIALS['password']
            )
            self.ssh_manager.execute_on_remote(
                ip=self.ssh_manager.admin_ip,
                cmd='echo -e "{1}\\n{1}" | passwd {0}'.format(new_login,
                                                              new_password)
            )
            self.ssh_manager.initialize(
                admin_ip=self.ssh_manager.admin_ip,
                admin_login=new_login,
                admin_password=new_password,
                slave_login=settings.SSH_SLAVE_CREDENTIALS['login'],
                slave_password=settings.SSH_SLAVE_CREDENTIALS['password']
            )
            self.ssh_manager.update_connection(
                ip=self.ssh_manager.admin_ip,
                login=new_login,
                password=new_password
            )
            logger.debug("Admin node password has changed.")
        logger.info("Admin node login name: '{0}' , password: '******'".
                    format(new_login, new_password))

    def set_admin_keystone_password(self):
        try:
            self.fuel_web.client.get_releases()
        # TODO(akostrikov) CENTOS7 except exceptions.Unauthorized:
        except:
            self.ssh_manager.execute_on_remote(
                ip=self.ssh_manager.admin_ip,
                cmd='fuel user --newpass {0} --change-password'.format(
                    settings.KEYSTONE_CREDS['password'])
            )
            logger.info(
                'New Fuel UI (keystone) username: "******", password: "******"'
                .format(settings.KEYSTONE_CREDS['username'],
                        settings.KEYSTONE_CREDS['password']))

    def insert_cdrom_tray(self):
        # This is very rude implementation and it SHOULD be changes after
        # implementation this feature in fuel-devops
        name = "{}_{}".format(settings.ENV_NAME, self.d_env.nodes().admin.name)
        name_size = 80
        if len(name) > name_size:
            hash_str = str(hash(name))
            name = (hash_str + name)[:name_size]

        cmd = """EDITOR="sed -i s/tray=\\'open\\'//" virsh edit {}""".format(
            name)
        subprocess.check_call(cmd, shell=True)

    def setup_environment(self, custom=settings.CUSTOM_ENV,
                          build_images=settings.BUILD_IMAGES,
                          iso_connect_as=settings.ADMIN_BOOT_DEVICE,
                          security=settings.SECURITY_TEST):
        # Create environment and start the Fuel master node
        admin = self.d_env.nodes().admin
        self.d_env.start([admin])

        logger.info("Waiting for admin node to start up")
        wait(lambda: admin.driver.node_active(admin), 60)
        logger.info("Proceed with installation")
        # update network parameters at boot screen
        admin.send_keys(self.get_keys(admin, custom=custom,
                                      build_images=build_images,
                                      iso_connect_as=iso_connect_as))
        if settings.SHOW_FUELMENU:
            self.wait_for_fuelmenu()
        else:
            self.wait_for_provisioning()

        self.set_admin_ssh_password()

        self.wait_for_external_config()
        if custom:
            self.setup_customisation()
        if security:
            nessus_node = NessusActions(self.d_env)
            nessus_node.add_nessus_node()
        # wait while installation complete

        self.admin_actions.modify_configs(self.d_env.router())
        self.kill_wait_for_external_config()
        self.wait_bootstrap()
        self.admin_actions.wait_for_fuel_ready()

    @logwrap
    def enable_force_https(self, admin_node_ip):
        cmd = """
        echo -e '"SSL":\n  "force_https": "true"' >> /etc/fuel/astute.yaml
        """
        self.ssh_manager.execute_on_remote(admin_node_ip, cmd)
        cmd = "find / -name \"nginx_services.pp\""
        puppet_manifest = \
            self.ssh_manager.execute_on_remote(
                admin_node_ip, cmd)['stdout'][0].strip()
        cmd = 'puppet apply {0}'.format(puppet_manifest)
        self.ssh_manager.execute_on_remote(admin_node_ip, cmd)
        cmd = """
        systemctl status nginx.service |
        awk 'match($0, /\s+Active:.*\((\w+)\)/, a) {print a[1]}'
        """
        wait(lambda: (
             self.ssh_manager.execute_on_remote(
                 admin_node_ip, cmd)['stdout'][0] != 'dead'), interval=10,
             timeout=30)

    # pylint: disable=no-self-use
    @update_rpm_packages
    @upload_manifests
    def setup_customisation(self):
        logger.info('Installing custom packages/manifests '
                    'before master node bootstrap...')
    # pylint: enable=no-self-use

    @logwrap
    def wait_for_provisioning(self,
                              timeout=settings.WAIT_FOR_PROVISIONING_TIMEOUT):
        _wait(lambda: _tcp_ping(
            self.d_env.nodes(
            ).admin.get_ip_address_by_network_name
            (self.d_env.admin_net), 22), timeout=timeout)

    @logwrap
    def wait_for_fuelmenu(self,
                          timeout=settings.WAIT_FOR_PROVISIONING_TIMEOUT):

        def check_ssh_connection():
            """Try to close fuelmenu and check ssh connection"""
            try:
                _tcp_ping(
                    self.d_env.nodes(
                    ).admin.get_ip_address_by_network_name
                    (self.d_env.admin_net), 22)
            except Exception:
                #  send F8 trying to exit fuelmenu
                self.d_env.nodes().admin.send_keys("<F8>\n")
                return False
            return True

        wait(check_ssh_connection, interval=30, timeout=timeout,
             timeout_msg="Fuelmenu hasn't appeared during allocated timeout")

    @logwrap
    def wait_for_external_config(self, timeout=120):

        wait(lambda: self.ssh_manager.exists_on_remote(
            self.ssh_manager.admin_ip,
            '/var/lock/wait_for_external_config'),
            timeout=600)

        check_cmd = 'pkill -0 -f wait_for_external_config'

        wait(
            lambda: self.ssh_manager.execute(
                ip=self.ssh_manager.admin_ip,
                cmd=check_cmd)['exit_code'] == 0, timeout=timeout)

    @logwrap
    def kill_wait_for_external_config(self):
        kill_cmd = 'pkill -f "^wait_for_external_config"'
        check_cmd = 'pkill -0 -f "^wait_for_external_config"; [[ $? -eq 1 ]]'
        self.ssh_manager.execute_on_remote(
            ip=self.ssh_manager.admin_ip,
            cmd=kill_cmd
        )
        self.ssh_manager.execute_on_remote(
            ip=self.ssh_manager.admin_ip,
            cmd=check_cmd
        )

    def wait_bootstrap(self):
        logger.info("Waiting while bootstrapping is in progress")
        log_path = "/var/log/puppet/bootstrap_admin_node.log"
        logger.info("Running bootstrap (timeout: {0})".format(
            float(settings.ADMIN_NODE_BOOTSTRAP_TIMEOUT)))
        with TimeStat("admin_node_bootsrap_time", is_uniq=True):
            wait(
                lambda: self.ssh_manager.execute(
                    ip=self.ssh_manager.admin_ip,
                    cmd="grep 'Fuel node deployment' '{:s}'".format(log_path)
                )['exit_code'] == 0,
                timeout=(float(settings.ADMIN_NODE_BOOTSTRAP_TIMEOUT))
            )
        result = self.ssh_manager.execute(
            ip=self.ssh_manager.admin_ip,
            cmd="grep 'Fuel node deployment "
            "complete' '{:s}'".format(log_path))['exit_code']
        if result != 0:
            raise Exception('Fuel node deployment failed.')
        self.bootstrap_image_check()

    def dhcrelay_check(self):
        # CentOS 7 is pretty stable with admin iface.
        # TODO(akostrikov) refactor it.
        iface = iface_alias('eth0')
        command = "dhcpcheck discover " \
                  "--ifaces {iface} " \
                  "--repeat 3 " \
                  "--timeout 10".format(iface=iface)

        out = self.ssh_manager.execute(
            ip=self.ssh_manager.admin_ip,
            cmd=command
        )['stdout']

        assert_true(self.get_admin_node_ip() in "".join(out),
                    "dhcpcheck doesn't discover master ip")

    def bootstrap_image_check(self):
        fuel_settings = self.admin_actions.get_fuel_settings()
        if fuel_settings['BOOTSTRAP']['flavor'].lower() != 'ubuntu':
            logger.warning('Default image for bootstrap '
                           'is not based on Ubuntu!')
            return

        bootstrap_images = self.ssh_manager.execute_on_remote(
            ip=self.ssh_manager.admin_ip,
            cmd='fuel-bootstrap --quiet list'
        )['stdout']
        assert_true(any('active' in line for line in bootstrap_images),
                    'Ubuntu bootstrap image wasn\'t built and activated! '
                    'See logs in /var/log/fuel-bootstrap-image-build.log '
                    'for details.')

    def admin_install_pkg(self, pkg_name):
        """Install a package <pkg_name> on the admin node"""
        remote_status = self.ssh_manager.execute(
            ip=self.ssh_manager.admin_ip,
            cmd="rpm -q {0}'".format(pkg_name)
        )
        if remote_status['exit_code'] == 0:
            logger.info("Package '{0}' already installed.".format(pkg_name))
        else:
            logger.info("Installing package '{0}' ...".format(pkg_name))
            remote_status = self.ssh_manager.execute(
                ip=self.ssh_manager.admin_ip,
                cmd="yum -y install {0}".format(pkg_name)
            )
            logger.info("Installation of the package '{0}' has been"
                        " completed with exit code {1}"
                        .format(pkg_name, remote_status['exit_code']))
        return remote_status['exit_code']

    def admin_run_service(self, service_name):
        """Start a service <service_name> on the admin node"""

        self.ssh_manager.execute(
            ip=self.ssh_manager.admin_ip,
            cmd="service {0} start".format(service_name)
        )
        remote_status = self.ssh_manager.execute(
            ip=self.ssh_manager.admin_ip,
            cmd="service {0} status".format(service_name)
        )
        if any('running...' in status for status in remote_status['stdout']):
            logger.info("Service '{0}' is running".format(service_name))
        else:
            logger.info("Service '{0}' failed to start"
                        " with exit code {1} :\n{2}"
                        .format(service_name,
                                remote_status['exit_code'],
                                remote_status['stdout']))

    # Execute yum updates
    # If updates installed,
    # then `bootstrap_admin_node.sh;`
    def admin_install_updates(self):
        logger.info('Searching for updates..')
        update_command = 'yum clean expire-cache; yum update -y'

        update_result = self.ssh_manager.execute(
            ip=self.ssh_manager.admin_ip,
            cmd=update_command
        )

        logger.info('Result of "{1}" command on master node: '
                    '{0}'.format(update_result, update_command))
        assert_equal(int(update_result['exit_code']), 0,
                     'Packages update failed, '
                     'inspect logs for details')

        # Check if any packets were updated and update was successful
        for str_line in update_result['stdout']:
            match_updated_count = re.search("Upgrade(?:\s*)(\d+).*Package",
                                            str_line)
            if match_updated_count:
                updates_count = match_updated_count.group(1)
            match_complete_message = re.search("(Complete!)", str_line)
            match_no_updates = re.search("No Packages marked for Update",
                                         str_line)

        if (not match_updated_count or match_no_updates)\
                and not match_complete_message:
            logger.warning('No updates were found or update was incomplete.')
            return
        logger.info('{0} packet(s) were updated'.format(updates_count))

        cmd = 'bootstrap_admin_node.sh;'

        result = self.ssh_manager.execute(
            ip=self.ssh_manager.admin_ip,
            cmd=cmd
        )
        logger.info('Result of "{1}" command on master node: '
                    '{0}'.format(result, cmd))
        assert_equal(int(result['exit_code']), 0,
                     'bootstrap failed, '
                     'inspect logs for details')

    # Modifies a resolv.conf on the Fuel master node and returns
    # its original content.
    # * adds 'nameservers' at start of resolv.conf if merge=True
    # * replaces resolv.conf with 'nameservers' if merge=False
    def modify_resolv_conf(self, nameservers=None, merge=True):
        if nameservers is None:
            nameservers = []

        resolv_conf = self.ssh_manager.execute(
            ip=self.ssh_manager.admin_ip,
            cmd='cat /etc/resolv.conf'
        )
        assert_equal(0, resolv_conf['exit_code'],
                     'Executing "{0}" on the admin node has failed with: {1}'
                     .format('cat /etc/resolv.conf', resolv_conf['stderr']))
        if merge:
            nameservers.extend(resolv_conf['stdout'])
        resolv_keys = ['search', 'domain', 'nameserver']
        resolv_new = "".join('{0}\n'.format(ns) for ns in nameservers
                             if any(x in ns for x in resolv_keys))
        logger.debug('echo "{0}" > /etc/resolv.conf'.format(resolv_new))
        echo_cmd = 'echo "{0}" > /etc/resolv.conf'.format(resolv_new)
        echo_result = self.ssh_manager.execute(
            ip=self.ssh_manager.admin_ip,
            cmd=echo_cmd
        )
        assert_equal(0, echo_result['exit_code'],
                     'Executing "{0}" on the admin node has failed with: {1}'
                     .format(echo_cmd, echo_result['stderr']))
        return resolv_conf['stdout']

    @staticmethod
    @logwrap
    def execute_remote_cmd(remote, cmd, exit_code=0):
        result = remote.execute(cmd)
        assert_equal(result['exit_code'], exit_code,
                     'Failed to execute "{0}" on remote host: {1}'.
                     format(cmd, result))
        return result['stdout']

    @logwrap
    def describe_other_admin_interfaces(self, admin):
        admin_networks = [iface.network.name for iface in admin.interfaces]
        iface_name = None
        for i, network_name in enumerate(admin_networks):
            if 'admin' in network_name and 'admin' != network_name:
                # This will be replaced with actual interface labels
                # form fuel-devops
                iface_name = 'enp0s' + str(i + 3)
                logger.info("Describe Fuel admin node interface {0} for "
                            "network {1}".format(iface_name, network_name))
                self.describe_admin_interface(iface_name, network_name)

        if iface_name:
            return self.ssh_manager.execute(
                ip=self.ssh_manager.admin_ip,
                cmd="cobbler sync")

    @logwrap
    def describe_admin_interface(self, admin_if, network_name):
        admin_net_object = self.d_env.get_network(name=network_name)
        admin_network = admin_net_object.ip.network
        admin_netmask = admin_net_object.ip.netmask
        admin_ip = str(self.d_env.nodes(
        ).admin.get_ip_address_by_network_name(network_name))
        logger.info(('Parameters for admin interface configuration: '
                     'Network - {0}, Netmask - {1}, Interface - {2}, '
                     'IP Address - {3}').format(admin_network,
                                                admin_netmask,
                                                admin_if,
                                                admin_ip))
        add_admin_ip = ('DEVICE={0}\\n'
                        'ONBOOT=yes\\n'
                        'NM_CONTROLLED=no\\n'
                        'USERCTL=no\\n'
                        'PEERDNS=no\\n'
                        'BOOTPROTO=static\\n'
                        'IPADDR={1}\\n'
                        'NETMASK={2}\\n').format(admin_if,
                                                 admin_ip,
                                                 admin_netmask)
        cmd = ('echo -e "{0}" > /etc/sysconfig/network-scripts/ifcfg-{1};'
               'ifup {1}; ip -o -4 a s {1} | grep -w {2}').format(
            add_admin_ip, admin_if, admin_ip)
        logger.debug('Trying to assign {0} IP to the {1} on master node...'.
                     format(admin_ip, admin_if))

        result = self.ssh_manager.execute(
            ip=self.ssh_manager.admin_ip,
            cmd=cmd
        )
        assert_equal(result['exit_code'], 0, ('Failed to assign second admin '
                     'IP address on master node: {0}').format(result))
        logger.debug('Done: {0}'.format(result['stdout']))

        # TODO for ssh manager
        multiple_networks_hacks.configure_second_admin_dhcp(
            self.ssh_manager.admin_ip,
            admin_if
        )
        multiple_networks_hacks.configure_second_admin_firewall(
            self.ssh_manager.admin_ip,
            admin_network,
            admin_netmask,
            admin_if,
            self.get_admin_node_ip()
        )

    @logwrap
    def get_masternode_uuid(self):
        return self.postgres_actions.run_query(
            db='nailgun',
            query="select master_node_uid from master_node_settings limit 1;")
Beispiel #48
0
class EnvironmentModel(object):
    """EnvironmentModel."""  # TODO documentation

    _instance = None

    def __new__(cls, *args, **kwargs):
        if not cls._instance:
            cls._instance = super(EnvironmentModel,
                                  cls).__new__(cls, *args, **kwargs)
        return cls._instance

    def __init__(self, config=None):
        if not hasattr(self, "_virt_env"):
            self._virt_env = None
        if not hasattr(self, "_fuel_web"):
            self._fuel_web = None
        self._config = config
        self.ssh_manager = SSHManager()
        self.ssh_manager.initialize(
            self.get_admin_node_ip(),
            login=settings.SSH_CREDENTIALS['login'],
            password=settings.SSH_CREDENTIALS['password'])
        self.admin_actions = AdminActions()
        self.base_actions = BaseActions()
        self.cobbler_actions = CobblerActions()
        self.docker_actions = DockerActions()
        self.nailgun_actions = NailgunActions()
        self.postgres_actions = PostgresActions()
        self.fuel_bootstrap_actions = FuelBootstrapCliActions()

    @property
    def fuel_web(self):
        if self._fuel_web is None:
            self._fuel_web = FuelWebClient(self)
        return self._fuel_web

    def __repr__(self):
        klass, obj_id = type(self), hex(id(self))
        if getattr(self, '_fuel_web'):
            ip = self.fuel_web.admin_node_ip
        else:
            ip = None
        return "[{klass}({obj_id}), ip:{ip}]".format(klass=klass,
                                                     obj_id=obj_id,
                                                     ip=ip)

    @property
    def admin_node_ip(self):
        return self.fuel_web.admin_node_ip

    @property
    def collector(self):
        return CollectorClient(settings.ANALYTICS_IP, 'api/v1/json')

    @logwrap
    def add_syslog_server(self, cluster_id, port=5514):
        self.fuel_web.add_syslog_server(cluster_id, self.d_env.router(), port)

    def bootstrap_nodes(self, devops_nodes, timeout=900, skip_timesync=False):
        """Lists registered nailgun nodes
        Start vms and wait until they are registered on nailgun.
        :rtype : List of registered nailgun nodes
        """
        # self.dhcrelay_check()

        for node in devops_nodes:
            logger.info("Bootstrapping node: {}".format(node.name))
            node.start()
            # TODO(aglarendil): LP#1317213 temporary sleep
            # remove after better fix is applied
            time.sleep(5)

        if not MASTER_IS_CENTOS7:
            with TimeStat("wait_for_nodes_to_start_and_register_in_nailgun"):
                wait(lambda: all(self.nailgun_nodes(devops_nodes)), 15,
                     timeout)
        else:
            wait(lambda: all(self.nailgun_nodes(devops_nodes)), 15, timeout)

        if not skip_timesync:
            self.sync_time([node for node in self.nailgun_nodes(devops_nodes)])

        return self.nailgun_nodes(devops_nodes)

    @logwrap
    def get_admin_node_ip(self):
        return str(self.d_env.nodes().admin.get_ip_address_by_network_name(
            self.d_env.admin_net))

    @logwrap
    def get_ebtables(self, cluster_id, devops_nodes):
        return Ebtables(self.get_target_devs(devops_nodes),
                        self.fuel_web.client.get_cluster_vlans(cluster_id))

    def get_keys(self,
                 node,
                 custom=None,
                 build_images=None,
                 iso_connect_as='cdrom'):
        params = {
            'ks':
            'hd:LABEL="Mirantis_Fuel":/ks.cfg'
            if iso_connect_as == 'usb' else 'cdrom:/ks.cfg',
            'repo':
            'hd:LABEL="Mirantis_Fuel":/',  # only required for USB boot
            'ip':
            node.get_ip_address_by_network_name(self.d_env.admin_net),
            'mask':
            self.d_env.get_network(name=self.d_env.admin_net).ip.netmask,
            'gw':
            self.d_env.router(),
            'hostname':
            ''.join((settings.FUEL_MASTER_HOSTNAME, settings.DNS_SUFFIX)),
            'nat_interface':
            self.d_env.nat_interface,
            'dns1':
            settings.DNS,
            'showmenu':
            'no',
            'wait_for_external_config':
            'yes',
            'build_images':
            '1' if build_images else '0'
        }
        if iso_connect_as == 'usb':
            keys = (
                "<Wait>\n"  # USB boot uses boot_menu=yes for master node
                "<F12>\n"
                "2\n"
                "<Esc><Enter>\n"
                "<Wait>\n"
                "vmlinuz initrd=initrd.img ks=%(ks)s\n"
                " repo=%(repo)s\n"
                " ip=%(ip)s\n"
                " netmask=%(mask)s\n"
                " gw=%(gw)s\n"
                " dns1=%(dns1)s\n"
                " hostname=%(hostname)s\n"
                " dhcp_interface=%(nat_interface)s\n"
                " showmenu=%(showmenu)s\n"
                " wait_for_external_config=%(wait_for_external_config)s\n"
                " build_images=%(build_images)s\n"
                " <Enter>\n") % params
        else:  # cdrom case is default
            keys = ("<Wait>\n"
                    "<Wait>\n"
                    "<Wait>\n"
                    "<Esc>\n"
                    "<Wait>\n"
                    "vmlinuz initrd=initrd.img ks=%(ks)s\n"
                    " ip=%(ip)s\n"
                    " netmask=%(mask)s\n"
                    " gw=%(gw)s\n"
                    " dns1=%(dns1)s\n"
                    " hostname=%(hostname)s\n"
                    " dhcp_interface=%(nat_interface)s\n"
                    " showmenu=%(showmenu)s\n"
                    " wait_for_external_config=%(wait_for_external_config)s\n"
                    " build_images=%(build_images)s\n"
                    " <Enter>\n") % params
        if MASTER_IS_CENTOS7:
            # CentOS 7 is pretty stable with admin iface.
            # TODO(akostrikov) add tests for menu items/kernel parameters
            # TODO(akostrikov) refactor it.
            iface = 'enp0s3'
            if iso_connect_as == 'usb':
                keys = (
                    "<Wait>\n"  # USB boot uses boot_menu=yes for master node
                    "<F12>\n"
                    "2\n"
                    "<Esc><Enter>\n"
                    "<Wait>\n"
                    "vmlinuz initrd=initrd.img ks=%(ks)s\n"
                    " repo=%(repo)s\n"
                    " ip=%(ip)s::%(gw)s:%(mask)s:%(hostname)s"
                    ":{iface}:off::: dns1=%(dns1)s"
                    " showmenu=%(showmenu)s\n"
                    " wait_for_external_config=%(wait_for_external_config)s\n"
                    " build_images=%(build_images)s\n"
                    " <Enter>\n".format(iface=iface)) % params
            else:  # cdrom case is default
                keys = (
                    "<Wait>\n"
                    "<Wait>\n"
                    "<Wait>\n"
                    "<Esc>\n"
                    "<Wait>\n"
                    "vmlinuz initrd=initrd.img ks=%(ks)s\n"
                    " ip=%(ip)s::%(gw)s:%(mask)s:%(hostname)s"
                    ":{iface}:off::: dns1=%(dns1)s"
                    " showmenu=%(showmenu)s\n"
                    " wait_for_external_config=%(wait_for_external_config)s\n"
                    " build_images=%(build_images)s\n"
                    " <Enter>\n".format(iface=iface)) % params
        return keys

    def get_target_devs(self, devops_nodes):
        return [
            interface.target_dev for interface in [
                val for var in map(lambda node: node.interfaces, devops_nodes)
                for val in var
            ]
        ]

    @property
    def d_env(self):
        if self._virt_env is None:
            if not self._config:
                try:
                    return Environment.get(name=settings.ENV_NAME)
                except Exception:
                    self._virt_env = Environment.describe_environment(
                        boot_from=settings.ADMIN_BOOT_DEVICE)
                    self._virt_env.define()
            else:
                try:
                    return Environment.get(name=self._config['template']
                                           ['devops_settings']['env_name'])
                except Exception:
                    self._virt_env = Environment.create_environment(
                        full_config=self._config)
                    self._virt_env.define()
        return self._virt_env

    def resume_environment(self):
        self.d_env.resume()
        admin = self.d_env.nodes().admin

        try:
            admin. await (self.d_env.admin_net, timeout=30, by_port=8000)
        except Exception as e:
            logger.warning("From first time admin isn't reverted: "
                           "{0}".format(e))
            admin.destroy()
            logger.info('Admin node was destroyed. Wait 10 sec.')
            time.sleep(10)

            admin.start()
            logger.info('Admin node started second time.')
            self.d_env.nodes().admin. await (self.d_env.admin_net)
            self.set_admin_ssh_password()
            self.docker_actions.wait_for_ready_containers(timeout=600)

            # set collector address in case of admin node destroy
            if settings.FUEL_STATS_ENABLED:
                self.nailgun_actions.set_collector_address(
                    settings.FUEL_STATS_HOST, settings.FUEL_STATS_PORT,
                    settings.FUEL_STATS_SSL)
                # Restart statsenderd in order to apply new collector address
                self.nailgun_actions.force_fuel_stats_sending()
                self.fuel_web.client.send_fuel_stats(enabled=True)
                logger.info('Enabled sending of statistics to {0}:{1}'.format(
                    settings.FUEL_STATS_HOST, settings.FUEL_STATS_PORT))
        self.set_admin_ssh_password()
        self.docker_actions.wait_for_ready_containers()

    def make_snapshot(self, snapshot_name, description="", is_make=False):
        if settings.MAKE_SNAPSHOT or is_make:
            self.d_env.suspend(verbose=False)
            time.sleep(10)

            self.d_env.snapshot(snapshot_name,
                                force=True,
                                description=description)
            revert_info(snapshot_name, self.get_admin_node_ip(), description)

        if settings.FUEL_STATS_CHECK:
            self.resume_environment()

    def nailgun_nodes(self, devops_nodes):
        return map(
            lambda node: self.fuel_web.get_nailgun_node_by_devops_node(node),
            devops_nodes)

    def check_slaves_are_ready(self):
        devops_nodes = [
            node for node in self.d_env.nodes().slaves
            if node.driver.node_active(node)
        ]
        # Bug: 1455753
        time.sleep(30)

        for node in devops_nodes:
            try:
                wait(lambda: self.fuel_web.get_nailgun_node_by_devops_node(
                    node)['online'],
                     timeout=60 * 6)
            except TimeoutError:
                raise TimeoutError("Node {0} does not become online".format(
                    node.name))
        return True

    def revert_snapshot(self, name, skip_timesync=False):
        if not self.d_env.has_snapshot(name):
            return False

        logger.info('We have snapshot with such name: %s' % name)

        logger.info("Reverting the snapshot '{0}' ....".format(name))
        self.d_env.revert(name)

        logger.info("Resuming the snapshot '{0}' ....".format(name))
        self.resume_environment()

        if not skip_timesync:
            nailgun_nodes = [
                self.fuel_web.get_nailgun_node_by_name(node.name)
                for node in self.d_env.nodes().slaves
                if node.driver.node_active(node)
            ]
            self.sync_time(nailgun_nodes)

        try:
            _wait(self.fuel_web.client.get_releases,
                  expected=EnvironmentError,
                  timeout=300)
        except exceptions.Unauthorized:
            self.set_admin_keystone_password()
            self.fuel_web.get_nailgun_version()

        _wait(lambda: self.check_slaves_are_ready(), timeout=60 * 6)
        return True

    def set_admin_ssh_password(self):
        new_login = settings.SSH_CREDENTIALS['login']
        new_password = settings.SSH_CREDENTIALS['password']
        try:
            self.ssh_manager.execute_on_remote(ip=self.ssh_manager.admin_ip,
                                               cmd='date')
            logger.debug('Accessing admin node using SSH: SUCCESS')
        except Exception:
            logger.debug('Accessing admin node using SSH credentials:'
                         ' FAIL, trying to change password from default')
            self.ssh_manager.update_connection(ip=self.ssh_manager.admin_ip,
                                               login='******',
                                               password='******')
            self.ssh_manager.execute_on_remote(
                ip=self.ssh_manager.admin_ip,
                cmd='echo -e "{1}\\n{1}" | passwd {0}'.format(
                    new_login, new_password))
            self.ssh_manager.update_connection(ip=self.ssh_manager.admin_ip,
                                               login=new_login,
                                               password=new_password)
            logger.debug("Admin node password has changed.")
        logger.info("Admin node login name: '{0}' , password: '******'".format(
            new_login, new_password))

    def set_admin_keystone_password(self):
        try:
            self.fuel_web.client.get_releases()
        # TODO(akostrikov) CENTOS7 except exceptions.Unauthorized:
        except:
            self.ssh_manager.execute_on_remote(
                ip=self.ssh_manager.admin_ip,
                cmd='fuel user --newpass {0} --change-password'.format(
                    settings.KEYSTONE_CREDS['password']))
            logger.info(
                'New Fuel UI (keystone) username: "******", password: "******"'.
                format(settings.KEYSTONE_CREDS['username'],
                       settings.KEYSTONE_CREDS['password']))

    def setup_environment(self,
                          custom=settings.CUSTOM_ENV,
                          build_images=settings.BUILD_IMAGES,
                          iso_connect_as=settings.ADMIN_BOOT_DEVICE,
                          security=settings.SECURITY_TEST):
        # start admin node
        admin = self.d_env.nodes().admin
        if iso_connect_as == 'usb':
            admin.disk_devices.get(device='disk',
                                   bus='usb').volume.upload(settings.ISO_PATH)
        else:  # cdrom is default
            admin.disk_devices.get(device='cdrom').volume.upload(
                settings.ISO_PATH)
        self.d_env.start(self.d_env.nodes().admins)
        logger.info("Waiting for admin node to start up")
        wait(lambda: admin.driver.node_active(admin), 60)
        logger.info("Proceed with installation")
        # update network parameters at boot screen
        admin.send_keys(
            self.get_keys(admin,
                          custom=custom,
                          build_images=build_images,
                          iso_connect_as=iso_connect_as))
        self.wait_for_provisioning()
        self.set_admin_ssh_password()
        self.wait_for_external_config()
        if custom:
            self.setup_customisation()
        if security:
            nessus_node = NessusActions(self.d_env)
            nessus_node.add_nessus_node()
        # wait while installation complete

        self.admin_actions.modify_configs(self.d_env.router())
        self.kill_wait_for_external_config()
        self.wait_bootstrap()

        if settings.UPDATE_FUEL:
            # Update Ubuntu packages
            self.admin_actions.upload_packages(
                local_packages_dir=settings.UPDATE_FUEL_PATH,
                centos_repo_path=None,
                ubuntu_repo_path=settings.LOCAL_MIRROR_UBUNTU)

        self.docker_actions.wait_for_ready_containers()
        time.sleep(10)
        self.set_admin_keystone_password()
        if not MASTER_IS_CENTOS7:
            self.sync_time()
        if settings.UPDATE_MASTER:
            if settings.UPDATE_FUEL_MIRROR:
                for i, url in enumerate(settings.UPDATE_FUEL_MIRROR):
                    conf_file = '/etc/yum.repos.d/temporary-{}.repo'.format(i)
                    cmd = ("echo -e"
                           " '[temporary-{0}]\nname="
                           "temporary-{0}\nbaseurl={1}/"
                           "\ngpgcheck=0\npriority="
                           "1' > {2}").format(i, url, conf_file)

                    self.ssh_manager.execute(ip=self.ssh_manager.admin_ip,
                                             cmd=cmd)
            self.admin_install_updates()
        if settings.MULTIPLE_NETWORKS:
            self.describe_second_admin_interface()
        if not MASTER_IS_CENTOS7:
            self.nailgun_actions.set_collector_address(
                settings.FUEL_STATS_HOST, settings.FUEL_STATS_PORT,
                settings.FUEL_STATS_SSL)
            # Restart statsenderd to apply settings(Collector address)
            self.nailgun_actions.force_fuel_stats_sending()
        if settings.FUEL_STATS_ENABLED and not MASTER_IS_CENTOS7:
            self.fuel_web.client.send_fuel_stats(enabled=True)
            logger.info('Enabled sending of statistics to {0}:{1}'.format(
                settings.FUEL_STATS_HOST, settings.FUEL_STATS_PORT))
        if settings.PATCHING_DISABLE_UPDATES:
            cmd = "find /etc/yum.repos.d/ -type f -regextype posix-egrep" \
                  " -regex '.*/mos[0-9,\.]+\-(updates|security).repo' | " \
                  "xargs -n1 -i sed '$aenabled=0' -i {}"
            self.ssh_manager.execute_on_remote(ip=self.ssh_manager.admin_ip,
                                               cmd=cmd)

    @update_rpm_packages
    @upload_manifests
    def setup_customisation(self):
        logger.info('Installing custom packages/manifests '
                    'before master node bootstrap...')

    @logwrap
    def wait_for_provisioning(self,
                              timeout=settings.WAIT_FOR_PROVISIONING_TIMEOUT):
        _wait(lambda: _tcp_ping(
            self.d_env.nodes().admin.get_ip_address_by_network_name(
                self.d_env.admin_net), 22),
              timeout=timeout)

    @logwrap
    def wait_for_external_config(self, timeout=120):
        check_cmd = 'pkill -0 -f wait_for_external_config'

        if MASTER_IS_CENTOS7:
            self.ssh_manager.execute(ip=self.ssh_manager.admin_ip,
                                     cmd=check_cmd)
        else:
            wait(lambda: self.ssh_manager.execute(
                ip=self.ssh_manager.admin_ip, cmd=check_cmd)['exit_code'] == 0,
                 timeout=timeout)

    @logwrap
    def kill_wait_for_external_config(self):
        kill_cmd = 'pkill -f "^wait_for_external_config"'
        check_cmd = 'pkill -0 -f "^wait_for_external_config"; [[ $? -eq 1 ]]'
        self.ssh_manager.execute_on_remote(ip=self.ssh_manager.admin_ip,
                                           cmd=kill_cmd)
        self.ssh_manager.execute_on_remote(ip=self.ssh_manager.admin_ip,
                                           cmd=check_cmd)

    @retry(count=3, delay=60)
    def sync_time(self, nailgun_nodes=None):
        # with @retry, failure on any step of time synchronization causes
        # restart the time synchronization starting from the admin node
        if nailgun_nodes is None:
            nailgun_nodes = []
        controller_nodes = [
            n for n in nailgun_nodes if "controller" in n['roles']
        ]
        other_nodes = [
            n for n in nailgun_nodes if "controller" not in n['roles']
        ]

        # 1. The first time source for the environment: admin node
        logger.info("Synchronizing time on Fuel admin node")
        with GroupNtpSync(self, sync_admin_node=True) as g_ntp:
            g_ntp.do_sync_time()

        # 2. Controllers should be synchronized before providing time to others
        if controller_nodes:
            logger.info("Synchronizing time on all controllers")
            with GroupNtpSync(self, nailgun_nodes=controller_nodes) as g_ntp:
                g_ntp.do_sync_time()

        # 3. Synchronize time on all the rest nodes
        if other_nodes:
            logger.info("Synchronizing time on other active nodes")
            with GroupNtpSync(self, nailgun_nodes=other_nodes) as g_ntp:
                g_ntp.do_sync_time()

    def wait_bootstrap(self):
        logger.info("Waiting while bootstrapping is in progress")
        log_path = "/var/log/puppet/bootstrap_admin_node.log"
        logger.info("Puppet timeout set in {0}".format(
            float(settings.PUPPET_TIMEOUT)))
        with self.d_env.get_admin_remote() as admin_remote:
            wait(lambda: not admin_remote.execute(
                "grep 'Fuel node deployment' '%s'" % log_path)['exit_code'],
                 timeout=(float(settings.PUPPET_TIMEOUT)))
            result = admin_remote.execute("grep 'Fuel node deployment "
                                          "complete' '%s'" %
                                          log_path)['exit_code']
        if result != 0:
            raise Exception('Fuel node deployment failed.')
        self.bootstrap_image_check()

    def dhcrelay_check(self):
        # CentOS 7 is pretty stable with admin iface.
        # TODO(akostrikov) refactor it.
        iface = 'enp0s3'
        command = "dhcpcheck discover " \
                  "--ifaces {iface} " \
                  "--repeat 3 " \
                  "--timeout 10".format(iface=iface)

        out = self.ssh_manager.execute(ip=self.ssh_manager.admin_ip,
                                       cmd=command)['stdout']

        assert_true(self.get_admin_node_ip() in "".join(out),
                    "dhcpcheck doesn't discover master ip")

    def bootstrap_image_check(self):
        fuel_settings = self.admin_actions.get_fuel_settings()
        if fuel_settings['BOOTSTRAP']['flavor'].lower() != 'ubuntu':
            logger.warning('Default image for bootstrap '
                           'is not based on Ubuntu!')
            return

        bootstrap_images = self.ssh_manager.execute_on_remote(
            ip=self.ssh_manager.admin_ip,
            cmd='fuel-bootstrap --quiet list')['stdout']
        assert_true(
            any('active' in line for line in bootstrap_images),
            'Ubuntu bootstrap image wasn\'t built and activated! '
            'See logs in /var/log/fuel-bootstrap-image-build.log '
            'for details.')

    def admin_install_pkg(self, pkg_name):
        """Install a package <pkg_name> on the admin node"""
        remote_status = self.ssh_manager.execute(
            ip=self.ssh_manager.admin_ip, cmd="rpm -q {0}'".format(pkg_name))
        if remote_status['exit_code'] == 0:
            logger.info("Package '{0}' already installed.".format(pkg_name))
        else:
            logger.info("Installing package '{0}' ...".format(pkg_name))
            remote_status = self.ssh_manager.execute(
                ip=self.ssh_manager.admin_ip,
                cmd="yum -y install {0}".format(pkg_name))
            logger.info("Installation of the package '{0}' has been"
                        " completed with exit code {1}".format(
                            pkg_name, remote_status['exit_code']))
        return remote_status['exit_code']

    def admin_run_service(self, service_name):
        """Start a service <service_name> on the admin node"""

        self.ssh_manager.execute(ip=self.ssh_manager.admin_ip,
                                 cmd="service {0} start".format(service_name))
        remote_status = self.ssh_manager.execute(
            ip=self.ssh_manager.admin_ip,
            cmd="service {0} status".format(service_name))
        if any('running...' in status for status in remote_status['stdout']):
            logger.info("Service '{0}' is running".format(service_name))
        else:
            logger.info("Service '{0}' failed to start"
                        " with exit code {1} :\n{2}".format(
                            service_name, remote_status['exit_code'],
                            remote_status['stdout']))

    # Execute yum updates
    # If updates installed,
    # then `dockerctl destroy all; bootstrap_admin_node.sh;`
    def admin_install_updates(self):
        logger.info('Searching for updates..')
        update_command = 'yum clean expire-cache; yum update -y'

        update_result = self.ssh_manager.execute(ip=self.ssh_manager.admin_ip,
                                                 cmd=update_command)

        logger.info('Result of "{1}" command on master node: '
                    '{0}'.format(update_result, update_command))
        assert_equal(int(update_result['exit_code']), 0,
                     'Packages update failed, '
                     'inspect logs for details')

        # Check if any packets were updated and update was successful
        for str_line in update_result['stdout']:
            match_updated_count = re.search("Upgrade(?:\s*)(\d+).*Package",
                                            str_line)
            if match_updated_count:
                updates_count = match_updated_count.group(1)
            match_complete_message = re.search("(Complete!)", str_line)
            match_no_updates = re.search("No Packages marked for Update",
                                         str_line)

        if (not match_updated_count or match_no_updates)\
                and not match_complete_message:
            logger.warning('No updates were found or update was incomplete.')
            return
        logger.info('{0} packet(s) were updated'.format(updates_count))

        cmd = 'dockerctl destroy all; bootstrap_admin_node.sh;'

        result = self.ssh_manager.execute(ip=self.ssh_manager.admin_ip,
                                          cmd=cmd)
        logger.info('Result of "{1}" command on master node: '
                    '{0}'.format(result, cmd))
        assert_equal(int(result['exit_code']), 0, 'bootstrap failed, '
                     'inspect logs for details')

    # Modifies a resolv.conf on the Fuel master node and returns
    # its original content.
    # * adds 'nameservers' at start of resolv.conf if merge=True
    # * replaces resolv.conf with 'nameservers' if merge=False
    def modify_resolv_conf(self, nameservers=None, merge=True):
        if nameservers is None:
            nameservers = []

        resolv_conf = self.ssh_manager.execute(ip=self.ssh_manager.admin_ip,
                                               cmd='cat /etc/resolv.conf')
        assert_equal(
            0, resolv_conf['exit_code'],
            'Executing "{0}" on the admin node has failed with: {1}'.format(
                'cat /etc/resolv.conf', resolv_conf['stderr']))
        if merge:
            nameservers.extend(resolv_conf['stdout'])
        resolv_keys = ['search', 'domain', 'nameserver']
        resolv_new = "".join('{0}\n'.format(ns) for ns in nameservers
                             if any(x in ns for x in resolv_keys))
        logger.debug('echo "{0}" > /etc/resolv.conf'.format(resolv_new))
        echo_cmd = 'echo "{0}" > /etc/resolv.conf'.format(resolv_new)
        echo_result = self.ssh_manager.execute(ip=self.ssh_manager.admin_ip,
                                               cmd=echo_cmd)
        assert_equal(
            0, echo_result['exit_code'],
            'Executing "{0}" on the admin node has failed with: {1}'.format(
                echo_cmd, echo_result['stderr']))
        return resolv_conf['stdout']

    @logwrap
    def execute_remote_cmd(self, remote, cmd, exit_code=0):
        result = remote.execute(cmd)
        assert_equal(
            result['exit_code'], exit_code,
            'Failed to execute "{0}" on remote host: {1}'.format(cmd, result))
        return result['stdout']

    @logwrap
    def describe_second_admin_interface(self):
        admin_net2_object = self.d_env.get_network(name=self.d_env.admin_net2)
        second_admin_network = admin_net2_object.ip.network
        second_admin_netmask = admin_net2_object.ip.netmask
        second_admin_if = settings.INTERFACES.get(self.d_env.admin_net2)
        second_admin_ip = str(
            self.d_env.nodes().admin.get_ip_address_by_network_name(
                self.d_env.admin_net2))
        logger.info(
            ('Parameters for second admin interface configuration: '
             'Network - {0}, Netmask - {1}, Interface - {2}, '
             'IP Address - {3}').format(second_admin_network,
                                        second_admin_netmask, second_admin_if,
                                        second_admin_ip))
        add_second_admin_ip = ('DEVICE={0}\\n'
                               'ONBOOT=yes\\n'
                               'NM_CONTROLLED=no\\n'
                               'USERCTL=no\\n'
                               'PEERDNS=no\\n'
                               'BOOTPROTO=static\\n'
                               'IPADDR={1}\\n'
                               'NETMASK={2}\\n').format(
                                   second_admin_if, second_admin_ip,
                                   second_admin_netmask)
        cmd = ('echo -e "{0}" > /etc/sysconfig/network-scripts/ifcfg-{1};'
               'ifup {1}; ip -o -4 a s {1} | grep -w {2}').format(
                   add_second_admin_ip, second_admin_if, second_admin_ip)
        logger.debug(
            'Trying to assign {0} IP to the {1} on master node...'.format(
                second_admin_ip, second_admin_if))

        result = self.ssh_manager.execute(ip=self.ssh_manager.admin_ip,
                                          cmd=cmd)
        assert_equal(result['exit_code'], 0,
                     ('Failed to assign second admin '
                      'IP address on master node: {0}').format(result))
        logger.debug('Done: {0}'.format(result['stdout']))

        # TODO for ssh manager
        multiple_networks_hacks.configure_second_admin_dhcp(
            self.ssh_manager.admin_ip, second_admin_if)
        multiple_networks_hacks.configure_second_admin_firewall(
            self.ssh_manager.admin_ip, second_admin_network,
            second_admin_netmask, second_admin_if, self.get_admin_node_ip())

    @logwrap
    def get_masternode_uuid(self):
        return self.postgres_actions.run_query(
            db='nailgun',
            query="select master_node_uid from master_node_settings limit 1;")