Ejemplo n.º 1
0
 def backup_iscsi_initiator_info(self):
     if not plugin.is_enabled(self.env, 'emc_vnx'):
         return
     bup_file_path = get_iscsi_bup_file_path(self.node)
     file_dir = os.path.dirname(bup_file_path)
     if not os.path.exists(file_dir):
         os.makedirs(file_dir)
     ssh.sftp(self.node).get(magic_consts.ISCSI_CONFIG_PATH, bup_file_path)
Ejemplo n.º 2
0
 def backup_iscsi_initiator_info(self):
     if not plugin.is_enabled(self.env, 'emc_vnx'):
         return
     bup_file_path = get_iscsi_bup_file_path(self.node)
     file_dir = os.path.dirname(bup_file_path)
     if not os.path.exists(file_dir):
         os.makedirs(file_dir)
     ssh.sftp(self.node).get(magic_consts.ISCSI_CONFIG_PATH, bup_file_path)
Ejemplo n.º 3
0
 def restore_iscsi_initiator_info(self):
     if not plugin.is_enabled(self.env, 'emc_vnx'):
         return
     bup_file_path = get_iscsi_bup_file_path(self.node)
     if not os.path.exists(bup_file_path):
         raise Exception("Backup iscsi configuration is not present for "
                         "compute node %s" % str(self.node.id))
     ssh.sftp(self.node).put(bup_file_path, magic_consts.ISCSI_CONFIG_PATH)
     for service in ["open-iscsi", "multipath-tools", "nova-compute"]:
         ssh.call(['service', service, 'restart'], node=self.node)
Ejemplo n.º 4
0
 def restore_iscsi_initiator_info(self):
     if not plugin.is_enabled(self.env, 'emc_vnx'):
         return
     bup_file_path = get_iscsi_bup_file_path(self.node)
     if not os.path.exists(bup_file_path):
         raise Exception("Backup iscsi configuration is not present for "
                         "compute node %s" % str(self.node.id))
     ssh.sftp(self.node).put(bup_file_path, magic_consts.ISCSI_CONFIG_PATH)
     for service in ["open-iscsi", "multipath-tools", "nova-compute"]:
         ssh.call(['service', service, 'restart'], node=self.node)
Ejemplo n.º 5
0
def cleanup_environment(env_id):
    env = objects.Environment(env_id)

    nodes = env.get_all_nodes()
    for node in nodes:
        node_util.remove_compute_upgrade_levels(node)

    controller = env_util.get_one_controller(env)
    sftp = ssh.sftp(controller)
    admin_pass = env_util.get_admin_password(env, controller)
    script_filename = 'clean_env.py'

    with ssh.tempdir(controller) as tempdir:
        script_src_filename = os.path.join(magic_consts.CWD, "helpers",
                                           script_filename)
        script_dst_filename = os.path.join(tempdir, script_filename)
        sftp.put(script_src_filename, script_dst_filename)

        command = [
            'sh',
            '-c',
            '. /root/openrc; export OS_PASSWORD={0}; python {1}'.format(
                admin_pass, script_dst_filename),
        ]

        with ssh.popen(command, node=controller, stdin=ssh.PIPE) as proc:
            roles = ["controller", "compute"]
            for node in env_util.get_nodes(env, roles):
                data = "{0}\n{1}\n".format(node.data['fqdn'].split('.')[0],
                                           node.data['fqdn'])
                proc.stdin.write(data)
Ejemplo n.º 6
0
def disable_apis(env):
    controllers = list(env_util.get_controllers(env))
    maintenance_line = "backend maintenance"
    stats_socket_re = re.compile("stats\s+socket\s+/var/lib/haproxy/stats" "(?!.*level admin)")
    mode_tcp_re = re.compile("mode\s+tcp")
    use_backend_line = "  use_backend maintenance if TRUE"
    for node in controllers:
        sftp = ssh.sftp(node)
        sftp.chdir("/etc/haproxy")
        with ssh.update_file(sftp, "haproxy.cfg") as (old, new):
            found_maint_line = False
            for line in old:
                if maintenance_line in line:
                    found_maint_line = True
                line = stats_socket_re.sub(r"\g<0> level admin", line)
                new.write(line)
            if not found_maint_line:
                new.write(maintenance_line)
        sftp.chdir("/etc/haproxy/conf.d")
        for f in sftp.listdir():
            with ssh.update_file(sftp, f) as (old, new):
                contents = old.read()
                if not mode_tcp_re.search(contents):
                    raise ssh.DontUpdateException
                new.write(contents)
                if not contents.endswith("\n"):
                    new.write("\n")
                new.write(use_backend_line)
        ssh.call(["crm", "resource", "restart", "p_haproxy"], node=node)
Ejemplo n.º 7
0
def remove_compute_upgrade_levels(node):
    sftp = ssh.sftp(node)
    with ssh.update_file(sftp, '/etc/nova/nova.conf') as (old, new):
        for line in old:
            if line.startswith("compute="):
                continue
            new.write(line)
Ejemplo n.º 8
0
def add_compute_upgrade_levels(node, version):
    sftp = ssh.sftp(node)
    with ssh.update_file(sftp, '/etc/nova/nova.conf') as (old, new):
        for line in old:
            new.write(line)
            if line.startswith("[upgrade_levels]"):
                new.write("compute={0}\n".format(version))
Ejemplo n.º 9
0
def disable_apis(env):
    controllers = list(env_util.get_controllers(env))
    maintenance_line = 'backend maintenance'
    stats_socket_re = re.compile('stats\s+socket\s+/var/lib/haproxy/stats'
                                 '(?!.*level admin)')
    mode_tcp_re = re.compile('mode\s+tcp')
    use_backend_line = '  use_backend maintenance if TRUE'
    for node in controllers:
        sftp = ssh.sftp(node)
        sftp.chdir('/etc/haproxy')
        with ssh.update_file(sftp, 'haproxy.cfg') as (old, new):
            found_maint_line = False
            for line in old:
                if maintenance_line in line:
                    found_maint_line = True
                line = stats_socket_re.sub(r'\g<0> level admin', line)
                new.write(line)
            if not found_maint_line:
                new.write(maintenance_line)
        sftp.chdir('/etc/haproxy/conf.d')
        for f in sftp.listdir():
            with ssh.update_file(sftp, f) as (old, new):
                contents = old.read()
                if not mode_tcp_re.search(contents):
                    raise ssh.DontUpdateException
                new.write(contents)
                if not contents.endswith('\n'):
                    new.write('\n')
                new.write(use_backend_line)
        ssh.call(['crm', 'resource', 'restart', 'p_haproxy'], node=node)
Ejemplo n.º 10
0
def write_content_to_tmp_file_on_node(node, content, directory, template):
    tmp_name = ssh.call_output(
        ["mktemp", "-p", directory, "-t", template], node=node).strip()
    sftp = ssh.sftp(node)
    with sftp.open(tmp_name, "w") as new:
        new.write(content)
    return tmp_name
Ejemplo n.º 11
0
def disable_apis(env):
    controllers = list(env_util.get_controllers(env))
    maintenance_line = 'backend maintenance'
    stats_socket_re = re.compile('stats\s+socket\s+/var/lib/haproxy/stats'
                                 '(?!.*level admin)')
    mode_tcp_re = re.compile('mode\s+tcp')
    use_backend_line = '  use_backend maintenance if TRUE'
    for node in controllers:
        sftp = ssh.sftp(node)
        sftp.chdir('/etc/haproxy')
        with ssh.update_file(sftp, 'haproxy.cfg') as (old, new):
            found_maint_line = False
            for line in old:
                if maintenance_line in line:
                    found_maint_line = True
                line = stats_socket_re.sub(r'\g<0> level admin', line)
                new.write(line)
            if not found_maint_line:
                new.write(maintenance_line)
        sftp.chdir('/etc/haproxy/conf.d')
        for f in sftp.listdir():
            with ssh.update_file(sftp, f) as (old, new):
                contents = old.read()
                if not mode_tcp_re.search(contents):
                    raise ssh.DontUpdateException
                new.write(contents)
                if not contents.endswith('\n'):
                    new.write('\n')
                new.write(use_backend_line)
        ssh.call(['crm', 'resource', 'restart', 'p_haproxy'], node=node)
Ejemplo n.º 12
0
def cleanup_environment(env_id):
    env = objects.Environment(env_id)

    controller = env_util.get_one_controller(env)
    sftp = ssh.sftp(controller)
    admin_pass = env_util.get_admin_password(env, controller)
    script_filename = 'clean_env.py'

    with ssh.tempdir(controller) as tempdir:
        script_src_filename = os.path.join(
            magic_consts.CWD, "helpers", script_filename)
        script_dst_filename = os.path.join(tempdir, script_filename)
        sftp.put(script_src_filename, script_dst_filename)

        command = [
            'sh', '-c', '. /root/openrc; export OS_PASSWORD={0}; python {1}'
            .format(admin_pass, script_dst_filename),
        ]

        with ssh.popen(command, node=controller, stdin=ssh.PIPE) as proc:
            roles = ["controller", "compute"]
            for node in env_util.get_nodes(env, roles):
                data = "{0}\n{1}\n".format(node.data['fqdn'].split('.')[0],
                                           node.data['fqdn'])
                proc.stdin.write(data)
Ejemplo n.º 13
0
def zabbix_snmptrapd_settings(astute, attrs):
    node = node_obj.Node(astute['uid'])
    with ssh.sftp(node).open('/etc/snmp/snmptrapd.conf') as f:
        data = f.read()
        template = re.compile(r"authCommunity\s[a-z-,]+\s([a-z-]+)")
        match = template.search(data)
        attrs['community']['value'] = match.group(1)
        attrs['metadata']['enabled'] = True
Ejemplo n.º 14
0
def zabbix_snmptrapd_settings(astute, attrs):
    node = node_obj.Node(astute['uid'])
    with ssh.sftp(node).open('/etc/snmp/snmptrapd.conf') as f:
        data = f.read()
        template = re.compile(r"authCommunity\s[a-z-,]+\s([a-z-]+)")
        match = template.search(data)
        attrs['community']['value'] = match.group(1)
        attrs['metadata']['enabled'] = True
Ejemplo n.º 15
0
def zabbix_snmptrapd_settings(astute):
    node = node_obj.Node(astute['uid'])
    with ssh.sftp(node).open('/etc/snmp/snmptrapd.conf') as f:
        data = f.read()
    template = re.compile(r"authCommunity\s[a-z-,]+\s([a-z-]+)")
    match = template.search(data)
    return {'community': {'value': match.group(1)},
            'metadata': {'enabled': True}}
Ejemplo n.º 16
0
def is_live_migration_supported(node):
    sftp = ssh.sftp(node)
    with sftp.open('/etc/nova/nova.conf') as config:
        for line in config:
            if line.strip().startswith("live_migration_flag") \
                    and "VIR_MIGRATE_LIVE" in line:
                return True
    return False
Ejemplo n.º 17
0
def _activate_upstart_instead_sysvinit(node, db_path, node_db_path):
    sftp = ssh.sftp(node)

    sysvinit = os.path.join(node_db_path, 'sysvinit')
    try:
        sftp.remove(sysvinit)
    except IOError:
        pass
    upstart = os.path.join(node_db_path, 'upstart')
    sftp.open(upstart, 'w').close()
Ejemplo n.º 18
0
def delete_fuel_resources(env):
    node = get_one_controller(env)
    sftp = ssh.sftp(node)
    sftp.put(
        os.path.join(magic_consts.CWD, "helpers/delete_fuel_resources.py"),
        "/tmp/delete_fuel_resources.py",
    )
    ssh.call(
        ["sh", "-c", ". /root/openrc; python /tmp/delete_fuel_resources.py"],
        node=node,
    )
Ejemplo n.º 19
0
def change_repositories(node, repos):
    ssh.remove_all_files_from_dirs(['/etc/apt/sources.list.d',
                                    '/etc/apt/preferences.d'], node)
    sftp = ssh.sftp(node)
    for repo in repos:
        filename_source, content_source = apt.create_repo_source(repo)
        ssh.write_content_to_file(sftp, filename_source, content_source)
        if repo['priority']:
            filename_pref, content_pref = apt.create_repo_preferences(repo)
            ssh.write_content_to_file(sftp, filename_pref, content_pref)
    ssh.call(['apt-get', 'update'], node=node)
Ejemplo n.º 20
0
def delete_fuel_resources(env):
    node = get_one_controller(env)
    sftp = ssh.sftp(node)
    sftp.put(
        os.path.join(magic_consts.CWD, "helpers/delete_fuel_resources.py"),
        "/tmp/delete_fuel_resources.py",
    )
    ssh.call(
        ["sh", "-c", ". /root/openrc; python /tmp/delete_fuel_resources.py"],
        node=node,
    )
Ejemplo n.º 21
0
 def evacuate_host(self):
     controller = env_util.get_one_controller(self.env)
     with ssh.tempdir(controller) as tempdir:
         local_path = os.path.join(magic_consts.CWD, 'bin',
                                   'host_evacuation.sh')
         remote_path = os.path.join(tempdir, 'host_evacuation.sh')
         sftp = ssh.sftp(controller)
         sftp.put(local_path, remote_path)
         sftp.chmod(remote_path, stat.S_IRWXO)
         ssh.call(
             [remote_path, 'node-{0}'.format(self.node.data['id'])],
             node=controller,
         )
Ejemplo n.º 22
0
 def evacuate_host(self):
     controller = env_util.get_one_controller(self.env)
     with ssh.tempdir(controller) as tempdir:
         local_path = os.path.join(
             magic_consts.CWD, 'bin', 'host_evacuation.sh')
         remote_path = os.path.join(tempdir, 'host_evacuation.sh')
         sftp = ssh.sftp(controller)
         sftp.put(local_path, remote_path)
         sftp.chmod(remote_path, stat.S_IRWXO)
         ssh.call(
             [remote_path, 'node-{0}'.format(self.node.data['id'])],
             node=controller,
         )
Ejemplo n.º 23
0
def start_upstart_services(env):
    controllers = list(env_util.get_controllers(env))
    for node in controllers:
        sftp = ssh.sftp(node)
        try:
            svc_file = sftp.open('/root/services_list')
        except IOError:
            raise
        else:
            with svc_file:
                to_start = svc_file.read().splitlines()
        for service in to_start:
            ssh.call(['start', service], node=node)
Ejemplo n.º 24
0
def start_upstart_services(env):
    controllers = list(env_util.get_controllers(env))
    for node in controllers:
        sftp = ssh.sftp(node)
        try:
            svc_file = sftp.open('/root/services_list')
        except IOError:
            raise
        else:
            with svc_file:
                to_start = svc_file.read().splitlines()
        for service in to_start:
            ssh.call(['start', service], node=node)
Ejemplo n.º 25
0
def ceph_set_new_mons(seed_env, filename, conf_filename, db_path):
    nodes = list(env_util.get_controllers(seed_env))
    hostnames = map(short_hostname, node_util.get_hostnames(nodes))
    mgmt_ips = map(remove_mask, node_util.get_ips('management', nodes))

    with contextlib.closing(tarfile.open(filename)) as f:
        conf = f.extractfile(conf_filename).read()
        conf = replace_addresses(conf, hostnames, mgmt_ips)

    fsid = get_fsid(conf)
    monmaptool_cmd = ['monmaptool', '--fsid', fsid, '--clobber', '--create']
    for node_hostname, node_ip in itertools.izip(hostnames, mgmt_ips):
        monmaptool_cmd += ['--add', node_hostname, node_ip]

    for node, node_hostname in itertools.izip(nodes, hostnames):
        node_db_path = "/var/lib/ceph/mon/ceph-{0}".format(node_hostname)
        node_conf = replace_host(conf, node_hostname)
        try:
            ssh.call(['stop', 'ceph-mon', "id={0}".format(node_hostname)],
                     node=node)
        except subprocess.CalledProcessError:
            pass
        ssh.call(['rm', '-rf', node_db_path], node=node)
        node_util.untar_files(filename, node)
        sftp = ssh.sftp(node)
        with sftp.open(conf_filename, 'w') as f:
            f.write(node_conf)
        ssh.call(['mv', db_path, node_db_path], node=node)

        sysvinit = os.path.join(node_db_path, 'sysvinit')
        try:
            sftp.remove(sysvinit)
        except IOError:
            pass
        upstart = os.path.join(node_db_path, 'upstart')
        sftp.open(upstart, 'w').close()

        with ssh.tempdir(node) as tempdir:
            monmap_filename = os.path.join(tempdir, 'monmap')
            ssh.call(monmaptool_cmd + [monmap_filename], node=node)
            ssh.call([
                'ceph-mon', '-i', node_hostname, '--inject-monmap',
                monmap_filename
            ],
                     node=node)

    for node, node_hostname in itertools.izip(nodes, hostnames):
        ssh.call(['start', 'ceph-mon', "id={0}".format(node_hostname)],
                 node=node)
    import_bootstrap_osd(nodes[0])
Ejemplo n.º 26
0
def get_parameters(node, filename, parameters_to_get, ensure=True):
    with ssh.sftp(node).open(filename) as fp:
        parameters = helpers.get_parameters(fp, parameters_to_get)
    if ensure:
        required_parameters = set(parameters_to_get)
        current_parameters = set(parameters)
        absent_parameters = required_parameters - current_parameters
        if absent_parameters:
            flat_parameters = []
            for aparam in absent_parameters:
                for param in parameters_to_get[aparam]:
                    flat_parameters.append("/".join(param))
            raise AbsentParametersError(
                node.data["id"], filename, flat_parameters)
    return parameters
Ejemplo n.º 27
0
def save_port_lnx(node, bridge, port):
    ifaces_path = '/etc/network/interfaces.d'
    bridge_file = os.path.join(ifaces_path, 'ifcfg-{0}'.format(bridge))
    sftp = ssh.sftp(node)
    with ssh.update_file(sftp, bridge_file) as (old, new):
        found_bridge_port_line = False
        for line in old:
            if line.startswith('bridge_ports'):
                found_bridge_port_line = True
                if port['name'] not in line:
                    option, _, ports = line.rstrip().partition(' ')
                    line = "{0} {1} {2}\n".format(option, port['name'], ports)
            new.write(line)
        if not found_bridge_port_line:
            new.write('bridge_ports {0}\n'.format(port['name']))
Ejemplo n.º 28
0
 def postdeploy(self):
     # From neutron_update_admin_tenant_id
     sftp = ssh.sftp(self.node)
     with ssh.update_file(sftp, '/etc/neutron/neutron.conf') as (old, new):
         for line in old:
             if line.startswith('nova_admin_tenant_id'):
                 new.write('nova_admin_tenant_id = {0}\n'.format(
                     self.service_tenant_id))
             else:
                 new.write(line)
     ssh.call(['restart', 'neutron-server'], node=self.node)
     if self.isolated:
         # From restore_default_gateway
         ssh.call(['ip', 'route', 'delete', 'default'], node=self.node)
         ssh.call(['ip', 'route', 'add', 'default', 'via', self.gateway],
                  node=self.node)
Ejemplo n.º 29
0
def applied_repos(nodes, preference_priority, seed_repos):
    node_file_to_clear_list = []
    preference_content = generate_preference_pin(
        seed_repos, preference_priority)
    source_content = generate_source_content(seed_repos)
    try:
        for node in nodes:
            node_file_to_clear_list.append(
                (node, apply_preference_for_node(node, preference_content)))
            node_file_to_clear_list.append(
                (node, apply_source_for_node(node, source_content)))
        yield
    finally:
        for node, file_name_to_remove in node_file_to_clear_list:
            sftp = ssh.sftp(node)
            sftp.unlink(file_name_to_remove)
Ejemplo n.º 30
0
def ceph_set_new_mons(seed_env, filename, conf_filename, db_path):
    nodes = list(env_util.get_controllers(seed_env))
    hostnames = map(short_hostname, node_util.get_hostnames(nodes))
    mgmt_ips = map(remove_mask, node_util.get_ips('management', nodes))

    with contextlib.closing(tarfile.open(filename)) as f:
        conf = f.extractfile(conf_filename).read()
        conf = replace_addresses(conf, hostnames, mgmt_ips)

    fsid = get_fsid(conf)
    monmaptool_cmd = ['monmaptool', '--fsid', fsid, '--clobber', '--create']
    for node_hostname, node_ip in itertools.izip(hostnames, mgmt_ips):
        monmaptool_cmd += ['--add', node_hostname, node_ip]

    for node, node_hostname in itertools.izip(nodes, hostnames):
        node_db_path = "/var/lib/ceph/mon/ceph-{0}".format(node_hostname)
        node_conf = replace_host(conf, node_hostname)
        try:
            ssh.call(['stop', 'ceph-mon', "id={0}".format(node_hostname)],
                     node=node)
        except subprocess.CalledProcessError:
            pass
        ssh.call(['rm', '-rf', node_db_path], node=node)
        node_util.untar_files(filename, node)
        sftp = ssh.sftp(node)
        with sftp.open(conf_filename, 'w') as f:
            f.write(node_conf)
        ssh.call(['mv', db_path, node_db_path], node=node)

        sysvinit = os.path.join(node_db_path, 'sysvinit')
        try:
            sftp.remove(sysvinit)
        except IOError:
            pass
        upstart = os.path.join(node_db_path, 'upstart')
        sftp.open(upstart, 'w').close()

        with ssh.tempdir(node) as tempdir:
            monmap_filename = os.path.join(tempdir, 'monmap')
            ssh.call(monmaptool_cmd + [monmap_filename], node=node)
            ssh.call(['ceph-mon', '-i', node_hostname, '--inject-monmap',
                      monmap_filename], node=node)

    for node, node_hostname in itertools.izip(nodes, hostnames):
        ssh.call(['start', 'ceph-mon', "id={0}".format(node_hostname)],
                 node=node)
    import_bootstrap_osd(nodes[0])
Ejemplo n.º 31
0
def enable_apis(env):
    controllers = list(env_util.get_controllers(env))
    maintenance_line = 'backend maintenance'
    use_backend_line = '  use_backend maintenance if TRUE'
    for node in controllers:
        sftp = ssh.sftp(node)
        sftp.chdir('/etc/haproxy')
        with ssh.update_file(sftp, 'haproxy.cfg') as (old, new):
            for line in old:
                if maintenance_line in line:
                    continue
                new.write(line)
        sftp.chdir('/etc/haproxy/conf.d')
        for f in sftp.listdir():
            with ssh.update_file(sftp, f) as (old, new):
                for line in old:
                    if use_backend_line in line:
                        continue
                    new.write(line)
        ssh.call(['crm', 'resource', 'restart', 'p_haproxy'], node=node)
Ejemplo n.º 32
0
def stop_upstart_services(env):
    controllers = list(env_util.get_controllers(env))
    service_re = re.compile("^((?:%s)[^\s]*).*start/running" % ("|".join(magic_consts.OS_SERVICES),), re.MULTILINE)
    for node in controllers:
        sftp = ssh.sftp(node)
        try:
            svc_file = sftp.open("/root/services_list")
        except IOError:
            with sftp.open("/root/services_list.tmp", "w") as svc_file:
                initctl_out = ssh.call_output(["initctl", "list"], node=node)
                to_stop = []
                for match in service_re.finditer(initctl_out):
                    service = match.group(1)
                    to_stop.append(service)
                    svc_file.write(service + "\n")
            sftp.rename("/root/services_list.tmp", "/root/services_list")
        else:
            with svc_file:
                to_stop = svc_file.read().splitlines()
        for service in to_stop:
            ssh.call(["stop", service], node=node)
Ejemplo n.º 33
0
def add_compute_upgrade_levels(node, version):
    sftp = ssh.sftp(node)
    with ssh.update_file(sftp, '/etc/nova/nova.conf') as (old, new):
        add_upgrade_levels = True
        in_section = False
        for line in old:
            if line.startswith("[upgrade_levels]"):
                add_upgrade_levels = False
                in_section = True
                new.write(line)
                new.write("compute={0}\n".format(version))
                continue
            if in_section and line.startswith("["):
                in_section = False
            if in_section and line.startswith("compute="):
                LOG.warning(
                    "Skipping line so not to duplicate compute "
                    "upgrade level setting: %s" % line.rstrip())
                continue
            new.write(line)
        if add_upgrade_levels:
            new.write("[upgrade_levels]\ncompute={0}\n".format(version))
Ejemplo n.º 34
0
    def postdeploy(self):
        # From neutron_update_admin_tenant_id
        sftp = ssh.sftp(self.node)
        with ssh.update_file(sftp, '/etc/neutron/neutron.conf') as (old, new):
            for line in old:
                if line.startswith('nova_admin_tenant_id'):
                    new.write('nova_admin_tenant_id = {0}\n'.format(
                        self.service_tenant_id))
                else:
                    new.write(line)
        orig_version = self.orig_env.data["fuel_version"]
        if orig_version == "6.1":
            openstack_release = magic_consts.VERSIONS[orig_version]
            node_util.add_compute_upgrade_levels(self.node, openstack_release)

            nova_services = ssh.call_output(
                ["bash", "-c",
                 "initctl list | "
                 "awk '/nova/ && /start/ {print $1}' | tr '\n' ' '"],
                node=self.node
            )

            for nova_service in nova_services.split():
                ssh.call(["service", nova_service, "restart"], node=self.node)

        ssh.call(['restart', 'neutron-server'], node=self.node)
        if self.isolated and self.gateway:
            # From restore_default_gateway
            LOG.info("Deleting default route at node %s",
                     self.node.id)
            try:
                ssh.call(['ip', 'route', 'delete', 'default'], node=self.node)
            except subprocess.CalledProcessError as exc:
                LOG.warn("Cannot delete default route at node %s: %s",
                         self.node.id, exc.args[0])
            LOG.info("Set default route at node %s: %s",
                     self.node.id, self.gateway)
            ssh.call(['ip', 'route', 'add', 'default', 'via', self.gateway],
                     node=self.node)
Ejemplo n.º 35
0
 def postdeploy(self):
     # From neutron_update_admin_tenant_id
     sftp = ssh.sftp(self.node)
     with ssh.update_file(sftp, '/etc/neutron/neutron.conf') as (old, new):
         for line in old:
             if line.startswith('nova_admin_tenant_id'):
                 new.write('nova_admin_tenant_id = {0}\n'.format(
                     self.service_tenant_id))
             else:
                 new.write(line)
     ssh.call(['restart', 'neutron-server'], node=self.node)
     if self.isolated and self.gateway:
         # From restore_default_gateway
         LOG.info("Deleting default route at node %s", self.node.id)
         try:
             ssh.call(['ip', 'route', 'delete', 'default'], node=self.node)
         except subprocess.CalledProcessError as exc:
             LOG.warn("Cannot delete default route at node %s: %s",
                      self.node.id, exc.args[0])
         LOG.info("Set default route at node %s: %s", self.node.id,
                  self.gateway)
         ssh.call(['ip', 'route', 'add', 'default', 'via', self.gateway],
                  node=self.node)
Ejemplo n.º 36
0
def cleanup_environment(env_id):
    env = objects.Environment(env_id)

    controller = env_util.get_one_controller(env)
    sftp = ssh.sftp(controller)

    script_filename = 'clean_env.py'
    script_dst_filename = '/tmp/{0}'.format(script_filename)

    sftp.put(
        os.path.join(magic_consts.CWD, "helpers/{0}".format(script_filename)),
        script_dst_filename,
    )

    command = ['sh', '-c', '. /root/openrc; export OS_PASSWORD=admin; python '
               + script_dst_filename]

    with ssh.popen(command, node=controller, stdin=ssh.PIPE) as proc:
        roles = ["controller", "compute"]
        for node in env_util.get_nodes(env, roles):
            proc.stdin.write(node.data['fqdn']+"\n")

    ssh.call(['rm', '-f', script_dst_filename], node=controller)
Ejemplo n.º 37
0
def stop_upstart_services(env):
    controllers = list(env_util.get_controllers(env))
    service_re = re.compile(
        "^((?:%s)[^\s]*).*start/running" %
        ("|".join(magic_consts.OS_SERVICES), ), re.MULTILINE)
    for node in controllers:
        sftp = ssh.sftp(node)
        try:
            svc_file = sftp.open('/root/services_list')
        except IOError:
            with sftp.open('/root/services_list.tmp', 'w') as svc_file:
                initctl_out = ssh.call_output(['initctl', 'list'], node=node)
                to_stop = []
                for match in service_re.finditer(initctl_out):
                    service = match.group(1)
                    to_stop.append(service)
                    svc_file.write(service + '\n')
            sftp.rename('/root/services_list.tmp', '/root/services_list')
        else:
            with svc_file:
                to_stop = svc_file.read().splitlines()
        for service in to_stop:
            ssh.call(['stop', service], node=node)
Ejemplo n.º 38
0
    def postdeploy(self):
        # From neutron_update_admin_tenant_id
        sftp = ssh.sftp(self.node)
        with ssh.update_file(sftp, '/etc/neutron/neutron.conf') as (old, new):
            for line in old:
                if line.startswith('nova_admin_tenant_id'):
                    new.write('nova_admin_tenant_id = {0}\n'.format(
                        self.service_tenant_id))
                else:
                    new.write(line)
        orig_version = self.orig_env.data["fuel_version"]
        if orig_version == "6.1":
            openstack_release = magic_consts.VERSIONS[orig_version]
            node_util.add_compute_upgrade_levels(self.node, openstack_release)

            nova_services = ssh.call_output([
                "bash", "-c", "initctl list | "
                "awk '/nova/ && /start/ {print $1}' | tr '\n' ' '"
            ],
                                            node=self.node)

            for nova_service in nova_services.split():
                ssh.call(["service", nova_service, "restart"], node=self.node)

        ssh.call(['restart', 'neutron-server'], node=self.node)
        if self.isolated and self.gateway:
            # From restore_default_gateway
            LOG.info("Deleting default route at node %s", self.node.id)
            try:
                ssh.call(['ip', 'route', 'delete', 'default'], node=self.node)
            except subprocess.CalledProcessError as exc:
                LOG.warn("Cannot delete default route at node %s: %s",
                         self.node.id, exc.args[0])
            LOG.info("Set default route at node %s: %s", self.node.id,
                     self.gateway)
            ssh.call(['ip', 'route', 'add', 'default', 'via', self.gateway],
                     node=self.node)
Ejemplo n.º 39
0
 def postdeploy(self):
     # From neutron_update_admin_tenant_id
     sftp = ssh.sftp(self.node)
     with ssh.update_file(sftp, '/etc/neutron/neutron.conf') as (old, new):
         for line in old:
             if line.startswith('nova_admin_tenant_id'):
                 new.write('nova_admin_tenant_id = {0}\n'.format(
                     self.service_tenant_id))
             else:
                 new.write(line)
     ssh.call(['restart', 'neutron-server'], node=self.node)
     if self.isolated and self.gateway:
         # From restore_default_gateway
         LOG.info("Deleting default route at node %s",
                  self.node.id)
         try:
             ssh.call(['ip', 'route', 'delete', 'default'], node=self.node)
         except subprocess.CalledProcessError as exc:
             LOG.warn("Cannot delete default route at node %s: %s",
                      self.node.id, exc.args[0])
         LOG.info("Set default route at node %s: %s",
                  self.node.id, self.gateway)
         ssh.call(['ip', 'route', 'add', 'default', 'via', self.gateway],
                  node=self.node)
Ejemplo n.º 40
0
def sync_glance_images(source_env_id, seed_env_id, seed_swift_ep):
    """Sync glance images from original ENV to seed ENV

    Args:
        source_env_id (int): ID of original ENV.
        seed_env_id (int): ID of seed ENV.
        seed_swift_ep (str): endpoint's name where swift-proxy service is
                             listening on.

    Examples:
        sync_glance_images(2, 3, 'br-mgmt')
    """
    # set glance username
    glance_user = "******"
    # set swift container value
    container = "glance"
    # choose tenant
    tenant = "services"
    # get clusters by id
    source_env = environment_obj.Environment(source_env_id)
    seed_env = environment_obj.Environment(seed_env_id)
    # gather cics admin IPs
    source_node = next(env_util.get_controllers(source_env))
    seed_node = next(env_util.get_controllers(seed_env))
    # get cics yaml files
    source_yaml = env_util.get_astute_yaml(source_env, source_node)
    seed_yaml = env_util.get_astute_yaml(seed_env, seed_node)
    # get glance passwords
    source_glance_pass = get_glance_password(source_yaml)
    seed_glance_pass = get_glance_password(seed_yaml)
    # get seed node swift ip
    seed_swift_ip = get_endpoint_ip(seed_swift_ep, seed_yaml)
    # get service tenant id & lists of objects for source env
    source_token = get_auth_token(source_node, tenant, glance_user,
                                  source_glance_pass)
    source_swift_list = set(
        get_swift_objects(source_node, tenant, glance_user, source_glance_pass,
                          source_token, container))
    # get service tenant id & lists of objects for seed env
    seed_token = get_auth_token(seed_node, tenant, glance_user,
                                seed_glance_pass)
    seed_swift_list = set(
        get_swift_objects(seed_node, tenant, glance_user, seed_glance_pass,
                          seed_token, container))
    # get service tenant for seed env
    seed_tenant = env_util.get_service_tenant_id(seed_env)
    # check consistency of matched images
    source_token = get_auth_token(source_node, tenant, glance_user,
                                  source_glance_pass)
    seed_token = get_auth_token(seed_node, tenant, glance_user,
                                seed_glance_pass)
    for image in source_swift_list & seed_swift_list:
        source_obj_etag = get_object_property(source_node, tenant, glance_user,
                                              source_glance_pass, source_token,
                                              container, image, 'ETag')
        seed_obj_etag = get_object_property(seed_node, tenant, glance_user,
                                            seed_glance_pass, seed_token,
                                            container, image, 'ETag')
        if source_obj_etag != seed_obj_etag:
            # image should be resynced
            delete_image(seed_node, tenant, glance_user, seed_glance_pass,
                         seed_token, container, image)
            LOG.info("Swift %s image should be resynced" % image)
            seed_swift_list.remove(image)
    # migrate new images
    for image in source_swift_list - seed_swift_list:
        # download image on source's node local drive
        source_token = get_auth_token(source_node, tenant, glance_user,
                                      source_glance_pass)
        download_image(source_node, tenant, glance_user, source_glance_pass,
                       source_token, container, image)
        # transfer image
        source_token = get_auth_token(source_node, tenant, glance_user,
                                      source_glance_pass)
        seed_token = get_auth_token(seed_node, tenant, glance_user,
                                    seed_glance_pass)
        transfer_image(source_node, tenant, glance_user, seed_glance_pass,
                       seed_token, container, image, seed_swift_ip,
                       seed_tenant)
        # remove transferred image
        ssh.sftp(source_node).remove(image)
    # delete outdated images
    for image in seed_swift_list - source_swift_list:
        token = get_auth_token(seed_node, tenant, glance_user,
                               seed_glance_pass)
        delete_image(seed_node, tenant, glance_user, seed_glance_pass, token,
                     container, image)
Ejemplo n.º 41
0
def sync_glance_images(source_env_id, seed_env_id, seed_swift_ep):
    """Sync glance images from original ENV to seed ENV

    Args:
        source_env_id (int): ID of original ENV.
        seed_env_id (int): ID of seed ENV.
        seed_swift_ep (str): endpoint's name where swift-proxy service is
                             listening on.

    Examples:
        sync_glance_images(2, 3, 'br-mgmt')
    """
    # set glance username
    glance_user = "******"
    # set swift container value
    container = "glance"
    # choose tenant
    tenant = "services"
    # get clusters by id
    source_env = environment_obj.Environment(source_env_id)
    seed_env = environment_obj.Environment(seed_env_id)
    # gather cics admin IPs
    source_node = next(env_util.get_controllers(source_env))
    seed_node = next(env_util.get_controllers(seed_env))
    # get cics yaml files
    source_yaml = env_util.get_astute_yaml(source_env, source_node)
    seed_yaml = env_util.get_astute_yaml(seed_env, seed_node)
    # get glance passwords
    source_glance_pass = get_glance_password(source_yaml)
    seed_glance_pass = get_glance_password(seed_yaml)
    # get seed node swift ip
    seed_swift_ip = get_endpoint_ip(seed_swift_ep, seed_yaml)
    # get service tenant id & lists of objects for source env
    source_token = get_auth_token(source_node, tenant, glance_user, source_glance_pass)
    source_swift_list = set(
        get_swift_objects(source_node, tenant, glance_user, source_glance_pass, source_token, container)
    )
    # get service tenant id & lists of objects for seed env
    seed_token = get_auth_token(seed_node, tenant, glance_user, seed_glance_pass)
    seed_swift_list = set(get_swift_objects(seed_node, tenant, glance_user, seed_glance_pass, seed_token, container))
    # get service tenant for seed env
    seed_tenant = env_util.get_service_tenant_id(seed_env)
    # check consistency of matched images
    source_token = get_auth_token(source_node, tenant, glance_user, source_glance_pass)
    seed_token = get_auth_token(seed_node, tenant, glance_user, seed_glance_pass)
    for image in source_swift_list & seed_swift_list:
        source_obj_etag = get_object_property(
            source_node, tenant, glance_user, source_glance_pass, source_token, container, image, "ETag"
        )
        seed_obj_etag = get_object_property(
            seed_node, tenant, glance_user, seed_glance_pass, seed_token, container, image, "ETag"
        )
        if source_obj_etag != seed_obj_etag:
            # image should be resynced
            delete_image(seed_node, tenant, glance_user, seed_glance_pass, seed_token, container, image)
            LOG.info("Swift %s image should be resynced" % image)
            seed_swift_list.remove(image)
    # migrate new images
    for image in source_swift_list - seed_swift_list:
        # download image on source's node local drive
        source_token = get_auth_token(source_node, tenant, glance_user, source_glance_pass)
        download_image(source_node, tenant, glance_user, source_glance_pass, source_token, container, image)
        # transfer image
        source_token = get_auth_token(source_node, tenant, glance_user, source_glance_pass)
        seed_token = get_auth_token(seed_node, tenant, glance_user, seed_glance_pass)
        transfer_image(
            source_node, tenant, glance_user, seed_glance_pass, seed_token, container, image, seed_swift_ip, seed_tenant
        )
        # remove transferred image
        ssh.sftp(source_node).remove(image)
    # delete outdated images
    for image in seed_swift_list - source_swift_list:
        token = get_auth_token(seed_node, tenant, glance_user, seed_glance_pass)
        delete_image(seed_node, tenant, glance_user, seed_glance_pass, token, container, image)
Ejemplo n.º 42
0
def get_astute_yaml(env, node=None):
    if not node:
        node = get_one_controller(env)
    with ssh.sftp(node).open('/etc/astute.yaml') as f:
        data = f.read()
    return yaml.load(data)
Ejemplo n.º 43
0
def change_fsid(conf_file_path, node, fsid):
    with ssh.update_file(ssh.sftp(node), conf_file_path) as (old, new):
        for line in old:
            if line.startswith("fsid"):
                line = u"fsid = {0}\n".format(fsid)
            new.write(line)
Ejemplo n.º 44
0
def get_astute_yaml(env, node=None):
    if not node:
        node = get_one_controller(env)
    with ssh.sftp(node).open('/etc/astute.yaml') as f:
        data = f.read()
    return yaml.load(data)