Beispiel #1
0
def disable_apis(env):
    controllers = list(env_util.get_controllers(env))
    maintenance_line = 'backend maintenance'
    stats_socket_re = re.compile('stats\s+socket\s+/var/lib/haproxy/stats'
                                 '(?!.*level admin)')
    mode_tcp_re = re.compile('mode\s+tcp')
    use_backend_line = '  use_backend maintenance if TRUE'
    for node in controllers:
        sftp = ssh.sftp(node)
        sftp.chdir('/etc/haproxy')
        with ssh.update_file(sftp, 'haproxy.cfg') as (old, new):
            found_maint_line = False
            for line in old:
                if maintenance_line in line:
                    found_maint_line = True
                line = stats_socket_re.sub(r'\g<0> level admin', line)
                new.write(line)
            if not found_maint_line:
                new.write(maintenance_line)
        sftp.chdir('/etc/haproxy/conf.d')
        for f in sftp.listdir():
            with ssh.update_file(sftp, f) as (old, new):
                contents = old.read()
                if not mode_tcp_re.search(contents):
                    raise ssh.DontUpdateException
                new.write(contents)
                if not contents.endswith('\n'):
                    new.write('\n')
                new.write(use_backend_line)
        ssh.call(['crm', 'resource', 'restart', 'p_haproxy'], node=node)
Beispiel #2
0
def disable_apis(env):
    controllers = list(env_util.get_controllers(env))
    maintenance_line = 'backend maintenance'
    stats_socket_re = re.compile('stats\s+socket\s+/var/lib/haproxy/stats'
                                 '(?!.*level admin)')
    mode_tcp_re = re.compile('mode\s+tcp')
    use_backend_line = '  use_backend maintenance if TRUE'
    for node in controllers:
        sftp = ssh.sftp(node)
        sftp.chdir('/etc/haproxy')
        with ssh.update_file(sftp, 'haproxy.cfg') as (old, new):
            found_maint_line = False
            for line in old:
                if maintenance_line in line:
                    found_maint_line = True
                line = stats_socket_re.sub(r'\g<0> level admin', line)
                new.write(line)
            if not found_maint_line:
                new.write(maintenance_line)
        sftp.chdir('/etc/haproxy/conf.d')
        for f in sftp.listdir():
            with ssh.update_file(sftp, f) as (old, new):
                contents = old.read()
                if not mode_tcp_re.search(contents):
                    raise ssh.DontUpdateException
                new.write(contents)
                if not contents.endswith('\n'):
                    new.write('\n')
                new.write(use_backend_line)
        ssh.call(['crm', 'resource', 'restart', 'p_haproxy'], node=node)
def disconnect_networks(env):
    controllers = list(env_util.get_controllers(env))
    for node in controllers:
        deployment_info = env.get_default_facts('deployment',
                                                nodes=[node.data['id']])
        for info in deployment_info:
            network.delete_patch_ports(node, info)
def upgrade_control_plane(orig_id, seed_id):
    orig_env = environment_obj.Environment(orig_id)
    seed_env = environment_obj.Environment(seed_id)
    controllers = list(env_util.get_controllers(seed_env))
    update_neutron_config(orig_env, seed_env)
    # enable all services on seed env
    if len(controllers) > 1:
        maintenance.stop_cluster(seed_env)
    else:
        maintenance.start_corosync_services(seed_env)
        maintenance.start_upstart_services(seed_env)
    # disable cluster services on orig env
    maintenance.stop_cluster(orig_env)
    # switch networks to seed env
    roles = ['primary-controller', 'controller']
    # disable physical connectivity for orig env
    for node, info in env_util.iter_deployment_info(orig_env, roles):
        network.delete_patch_ports(node, info)
    # enable physical connectivity for seed env
    for node, info in env_util.iter_deployment_info(seed_env, roles):
        network.delete_overlay_networks(node, info)
        network.create_patch_ports(node, info)
    # enable all services on seed env
    if len(controllers) > 1:
        maintenance.start_cluster(seed_env)
        maintenance.start_corosync_services(seed_env)
        maintenance.start_upstart_services(seed_env)
def disconnect_networks(env):
    controllers = list(env_util.get_controllers(env))
    for node in controllers:
        deployment_info = env.get_default_facts('deployment',
                                                nodes=[node.data['id']])
        for info in deployment_info:
            network.delete_patch_ports(node, info)
Beispiel #6
0
def disable_apis(env):
    controllers = list(env_util.get_controllers(env))
    maintenance_line = "backend maintenance"
    stats_socket_re = re.compile("stats\s+socket\s+/var/lib/haproxy/stats" "(?!.*level admin)")
    mode_tcp_re = re.compile("mode\s+tcp")
    use_backend_line = "  use_backend maintenance if TRUE"
    for node in controllers:
        sftp = ssh.sftp(node)
        sftp.chdir("/etc/haproxy")
        with ssh.update_file(sftp, "haproxy.cfg") as (old, new):
            found_maint_line = False
            for line in old:
                if maintenance_line in line:
                    found_maint_line = True
                line = stats_socket_re.sub(r"\g<0> level admin", line)
                new.write(line)
            if not found_maint_line:
                new.write(maintenance_line)
        sftp.chdir("/etc/haproxy/conf.d")
        for f in sftp.listdir():
            with ssh.update_file(sftp, f) as (old, new):
                contents = old.read()
                if not mode_tcp_re.search(contents):
                    raise ssh.DontUpdateException
                new.write(contents)
                if not contents.endswith("\n"):
                    new.write("\n")
                new.write(use_backend_line)
        ssh.call(["crm", "resource", "restart", "p_haproxy"], node=node)
def upgrade_control_plane(orig_id, seed_id):
    orig_env = environment_obj.Environment(orig_id)
    seed_env = environment_obj.Environment(seed_id)
    controllers = list(env_util.get_controllers(seed_env))
    update_neutron_config(orig_env, seed_env)
    # enable all services on seed env
    if len(controllers) > 1:
        maintenance.stop_cluster(seed_env)
    else:
        maintenance.start_corosync_services(seed_env)
        maintenance.start_upstart_services(seed_env)
    # disable cluster services on orig env
    maintenance.stop_cluster(orig_env)
    # switch networks to seed env
    roles = ['primary-controller', 'controller']
    # disable physical connectivity for orig env
    for node, info in env_util.iter_deployment_info(orig_env, roles):
        network.delete_patch_ports(node, info)
    # enable physical connectivity for seed env
    for node, info in env_util.iter_deployment_info(seed_env, roles):
        network.delete_overlay_networks(node, info)
        network.create_patch_ports(node, info)
    # enable all services on seed env
    if len(controllers) > 1:
        maintenance.start_cluster(seed_env)
        maintenance.start_corosync_services(seed_env)
        maintenance.start_upstart_services(seed_env)
def upgrade_control_plane(orig_id, seed_id):
    orig_env = environment_obj.Environment(orig_id)
    seed_env = environment_obj.Environment(seed_id)
    controllers = list(env_util.get_controllers(seed_env))
    # enable all services on seed env
    if len(controllers) > 1:
        maintenance.stop_cluster(seed_env)
    else:
        maintenance.start_corosync_services(seed_env)
        maintenance.start_upstart_services(seed_env)
    # disable cluster services on orig env
    maintenance.stop_cluster(orig_env)
    # switch networks to seed env
    roles = ['primary-controller', 'controller']
    # disable physical connectivity for orig env
    for node, info in env_util.iter_deployment_info(orig_env, roles):
        network.delete_patch_ports(node, info)
    # enable physical connectivity for seed env
    for node, info in env_util.iter_deployment_info(seed_env, roles):
        network.delete_overlay_networks(node, info)
        network.create_patch_ports(node, info)
    # enable all services on seed env
    if len(controllers) > 1:
        maintenance.start_cluster(seed_env)
        maintenance.start_corosync_services(seed_env)
        maintenance.start_upstart_services(seed_env)
    # NOTE(akscram): Remove replaced deployment info with
    # the isolation mode and the alternative gateway.
    # CAUTION: This method removes replaced deployment
    # information for all nodes in an environment.
    seed_env.delete_facts("deployment")
def update_neutron_config(orig_env, seed_env):
    controllers = list(env_util.get_controllers(seed_env))
    tenant_id = env_util.cache_service_tenant_id(orig_env)

    sed_script = 's/^(nova_admin_tenant_id )=.*/\\1 = %s/' % (tenant_id, )
    for node in controllers:
        ssh.call(['sed', '-re', sed_script, '-i', '/etc/neutron/neutron.conf'],
                 node=node)
def update_neutron_config(orig_env, seed_env):
    controllers = list(env_util.get_controllers(seed_env))
    tenant_id = env_util.cache_service_tenant_id(orig_env)

    sed_script = 's/^(nova_admin_tenant_id )=.*/\\1 = %s/' % (tenant_id,)
    for node in controllers:
        ssh.call(['sed', '-re', sed_script, '-i', '/etc/neutron/neutron.conf'],
                 node=node)
def update_neutron_config(env):
    controllers = list(env_util.get_controllers(env))
    tenant_file = '%s/env-%s-service-tenant-id' % (magic_consts.FUEL_CACHE,
                                                   str(env.id))
    with open(tenant_file) as f:
        tenant_id = f.read()

    sed_script = 's/^(nova_admin_tenant_id )=.*/\1 =%s' % (tenant_id, )
    for node in controllers:
        ssh.call(['sed', '-re', sed_script, '-i', '/etc/neutron/neutron.conf'],
                 node=node)
def update_neutron_config(env):
    controllers = list(env_util.get_controllers(env))
    tenant_file = '%s/env-%s-service-tenant-id' % (magic_consts.FUEL_CACHE,
                                                   str(env.id))
    with open(tenant_file) as f:
        tenant_id = f.read()

    sed_script = 's/^(nova_admin_tenant_id )=.*/\1 =%s' % (tenant_id,)
    for node in controllers:
        ssh.call(['sed', '-re', sed_script, '-i', '/etc/neutron/neutron.conf'],
                 node=node)
Beispiel #13
0
def prepare(orig_id, seed_id):
    orig_env = environment_obj.Environment(orig_id)
    seed_env = environment_obj.Environment(seed_id)
    controller = env_util.get_one_controller(seed_env)

    with tempfile.NamedTemporaryFile() as temp:
        db.mysqldump_from_env(orig_env, ['keystone'], temp.name)
        db.mysqldump_restore_to_env(seed_env, temp.name)

    ssh.call(['keystone-manage', 'db_sync'],
             node=controller, parse_levels=True)
    for controller in env_util.get_controllers(seed_env):
        ssh.call(['service', 'memcached', 'restart'], node=controller)
def start_corosync_services(env):
    node = next(env_util.get_controllers(env))
    status_out, _ = ssh.call(['crm', 'resource', 'list'],
                             stdout=ssh.PIPE,
                             node=node)
    for service in maintenance.parse_crm_status(status_out):
        while True:
            try:
                ssh.call(['crm', 'resource', 'start', service], node=node)
            except subprocess.CalledProcessError:
                pass
            else:
                break
def start_upstart_services(env):
    controllers = list(env_util.get_controllers(env))
    for node in controllers:
        sftp = ssh.sftp(node)
        try:
            svc_file = sftp.open('/root/services_list')
        except IOError:
            raise
        else:
            with svc_file:
                to_start = svc_file.read().splitlines()
        for service in to_start:
            ssh.call(['start', service], node=node)
Beispiel #16
0
def start_upstart_services(env):
    controllers = list(env_util.get_controllers(env))
    for node in controllers:
        sftp = ssh.sftp(node)
        try:
            svc_file = sftp.open('/root/services_list')
        except IOError:
            raise
        else:
            with svc_file:
                to_start = svc_file.read().splitlines()
        for service in to_start:
            ssh.call(['start', service], node=node)
Beispiel #17
0
def ceph_set_new_mons(seed_env, filename, conf_filename, db_path):
    nodes = list(env_util.get_controllers(seed_env))
    hostnames = map(short_hostname, node_util.get_hostnames(nodes))
    mgmt_ips = map(remove_mask, node_util.get_ips('management', nodes))

    with contextlib.closing(tarfile.open(filename)) as f:
        conf = f.extractfile(conf_filename).read()
        conf = replace_addresses(conf, hostnames, mgmt_ips)

    fsid = get_fsid(conf)
    monmaptool_cmd = ['monmaptool', '--fsid', fsid, '--clobber', '--create']
    for node_hostname, node_ip in itertools.izip(hostnames, mgmt_ips):
        monmaptool_cmd += ['--add', node_hostname, node_ip]

    for node, node_hostname in itertools.izip(nodes, hostnames):
        node_db_path = "/var/lib/ceph/mon/ceph-{0}".format(node_hostname)
        node_conf = replace_host(conf, node_hostname)
        try:
            ssh.call(['stop', 'ceph-mon', "id={0}".format(node_hostname)],
                     node=node)
        except subprocess.CalledProcessError:
            pass
        ssh.call(['rm', '-rf', node_db_path], node=node)
        node_util.untar_files(filename, node)
        sftp = ssh.sftp(node)
        with sftp.open(conf_filename, 'w') as f:
            f.write(node_conf)
        ssh.call(['mv', db_path, node_db_path], node=node)

        sysvinit = os.path.join(node_db_path, 'sysvinit')
        try:
            sftp.remove(sysvinit)
        except IOError:
            pass
        upstart = os.path.join(node_db_path, 'upstart')
        sftp.open(upstart, 'w').close()

        with ssh.tempdir(node) as tempdir:
            monmap_filename = os.path.join(tempdir, 'monmap')
            ssh.call(monmaptool_cmd + [monmap_filename], node=node)
            ssh.call([
                'ceph-mon', '-i', node_hostname, '--inject-monmap',
                monmap_filename
            ],
                     node=node)

    for node, node_hostname in itertools.izip(nodes, hostnames):
        ssh.call(['start', 'ceph-mon', "id={0}".format(node_hostname)],
                 node=node)
    import_bootstrap_osd(nodes[0])
def start_corosync_services(env):
    node = next(env_util.get_controllers(env))
    status_out, _ = ssh.call(['crm', 'resource', 'list'],
                             stdout=ssh.PIPE,
                             node=node)
    for service in maintenance.parse_crm_status(status_out):
        while True:
            try:
                ssh.call(['crm', 'resource', 'start', service],
                         node=node)
            except subprocess.CalledProcessError:
                pass
            else:
                break
def connect_to_networks(env):
    deployment_info = []
    controllers = list(env_util.get_controllers(env))
    backup_path = os.path.join(magic_consts.FUEL_CACHE,
                               'deployment_{0}.orig'.format(env.id))
    for filename in os.listdir(backup_path):
        filepath = os.path.join(backup_path, filename)
        with open(filepath) as info_file:
            info = yaml.safe_load(info_file)
            deployment_info.append(info)
    for node in controllers:
        for info in deployment_info:
            if info['role'] in ('primary-controller', 'controller'):
                network.delete_overlay_networks(node, info)
                network.create_patch_ports(node, info)
def connect_to_networks(env):
    deployment_info = []
    controllers = list(env_util.get_controllers(env))
    backup_path = os.path.join(magic_consts.FUEL_CACHE,
                               'deployment_{0}.orig'
                               .format(env.id))
    for filename in os.listdir(backup_path):
        filepath = os.path.join(backup_path, filename)
        with open(filepath) as info_file:
            info = yaml.safe_load(info_file)
            deployment_info.append(info)
    for node in controllers:
        for info in deployment_info:
            if info['role'] in ('primary-controller', 'controller'):
                network.delete_overlay_networks(node, info)
                network.create_patch_ports(node, info)
Beispiel #21
0
def ceph_set_new_mons(seed_env, filename, conf_filename, db_path):
    nodes = list(env_util.get_controllers(seed_env))
    hostnames = map(short_hostname, node_util.get_hostnames(nodes))
    mgmt_ips = map(remove_mask, node_util.get_ips('management', nodes))

    with contextlib.closing(tarfile.open(filename)) as f:
        conf = f.extractfile(conf_filename).read()
        conf = replace_addresses(conf, hostnames, mgmt_ips)

    fsid = get_fsid(conf)
    monmaptool_cmd = ['monmaptool', '--fsid', fsid, '--clobber', '--create']
    for node_hostname, node_ip in itertools.izip(hostnames, mgmt_ips):
        monmaptool_cmd += ['--add', node_hostname, node_ip]

    for node, node_hostname in itertools.izip(nodes, hostnames):
        node_db_path = "/var/lib/ceph/mon/ceph-{0}".format(node_hostname)
        node_conf = replace_host(conf, node_hostname)
        try:
            ssh.call(['stop', 'ceph-mon', "id={0}".format(node_hostname)],
                     node=node)
        except subprocess.CalledProcessError:
            pass
        ssh.call(['rm', '-rf', node_db_path], node=node)
        node_util.untar_files(filename, node)
        sftp = ssh.sftp(node)
        with sftp.open(conf_filename, 'w') as f:
            f.write(node_conf)
        ssh.call(['mv', db_path, node_db_path], node=node)

        sysvinit = os.path.join(node_db_path, 'sysvinit')
        try:
            sftp.remove(sysvinit)
        except IOError:
            pass
        upstart = os.path.join(node_db_path, 'upstart')
        sftp.open(upstart, 'w').close()

        with ssh.tempdir(node) as tempdir:
            monmap_filename = os.path.join(tempdir, 'monmap')
            ssh.call(monmaptool_cmd + [monmap_filename], node=node)
            ssh.call(['ceph-mon', '-i', node_hostname, '--inject-monmap',
                      monmap_filename], node=node)

    for node, node_hostname in itertools.izip(nodes, hostnames):
        ssh.call(['start', 'ceph-mon', "id={0}".format(node_hostname)],
                 node=node)
    import_bootstrap_osd(nodes[0])
Beispiel #22
0
def ceph_set_new_mons(orig_env, seed_env, filename, conf_filename, db_path):
    nodes = list(env_util.get_controllers(seed_env))

    with contextlib.closing(tarfile.open(filename)) as f:
        conf = f.extractfile(conf_filename).read()

    fsid = get_fsid(conf)

    for node in nodes:
        node_hostname = short_hostname(node.data['fqdn'])
        node_db_path = "/var/lib/ceph/mon/ceph-{0}".format(node_hostname)
        try:
            ssh.call(['stop', 'ceph-mon', "id={0}".format(node_hostname)],
                     node=node)
        except subprocess.CalledProcessError:
            pass
        with ssh.tempdir(node) as tempdir:
            # save current seed conf and monmap in tmp dir
            monmap_filename = os.path.join(tempdir, 'monmap')
            ssh.call(["ceph-mon", "-i", node_hostname,
                     "--extract-monmap", monmap_filename], node=node)
            seed_conf_path = os.path.join(tempdir, "ceph.conf")
            ssh.call(['cp', conf_filename, seed_conf_path], node=node)

            ssh.call(['rm', '-rf', node_db_path], node=node)
            node_util.untar_files(filename, node)

            # return seed ceph confs
            ssh.call(['cp', seed_conf_path, conf_filename], node=node)
            # change fsid for orig fsid value
            change_fsid(conf_filename, node, fsid)
            # change fsid value in monmap
            ssh.call(["monmaptool", "--fsid", fsid,
                      "--clobber", monmap_filename], node=node)
            ssh.call(['mv', db_path, node_db_path], node=node)
            if version.StrictVersion(orig_env.data["fuel_version"]) < \
                    version.StrictVersion(magic_consts.CEPH_UPSTART_VERSION):
                _activate_upstart_instead_sysvinit(node, db_path, node_db_path)
            # return old monmap value
            ssh.call(['ceph-mon', '-i', node_hostname,
                      '--inject-monmap', monmap_filename], node=node)
        ssh.call(['start', 'ceph-mon', "id={0}".format(node_hostname)],
                 node=node)
    import_bootstrap_osd(nodes[0])
Beispiel #23
0
def enable_apis(env):
    controllers = list(env_util.get_controllers(env))
    maintenance_line = 'backend maintenance'
    use_backend_line = '  use_backend maintenance if TRUE'
    for node in controllers:
        sftp = ssh.sftp(node)
        sftp.chdir('/etc/haproxy')
        with ssh.update_file(sftp, 'haproxy.cfg') as (old, new):
            for line in old:
                if maintenance_line in line:
                    continue
                new.write(line)
        sftp.chdir('/etc/haproxy/conf.d')
        for f in sftp.listdir():
            with ssh.update_file(sftp, f) as (old, new):
                for line in old:
                    if use_backend_line in line:
                        continue
                    new.write(line)
        ssh.call(['crm', 'resource', 'restart', 'p_haproxy'], node=node)
Beispiel #24
0
def stop_upstart_services(env):
    controllers = list(env_util.get_controllers(env))
    service_re = re.compile("^((?:%s)[^\s]*).*start/running" % ("|".join(magic_consts.OS_SERVICES),), re.MULTILINE)
    for node in controllers:
        sftp = ssh.sftp(node)
        try:
            svc_file = sftp.open("/root/services_list")
        except IOError:
            with sftp.open("/root/services_list.tmp", "w") as svc_file:
                initctl_out = ssh.call_output(["initctl", "list"], node=node)
                to_stop = []
                for match in service_re.finditer(initctl_out):
                    service = match.group(1)
                    to_stop.append(service)
                    svc_file.write(service + "\n")
            sftp.rename("/root/services_list.tmp", "/root/services_list")
        else:
            with svc_file:
                to_stop = svc_file.read().splitlines()
        for service in to_stop:
            ssh.call(["stop", service], node=node)
Beispiel #25
0
def start_cluster(env):
    major_version = env.data['fuel_version'].split('.')[0]
    cmds = []
    if int(major_version) < 6:
        cmds = [['service', 'corosync', 'start']]
    else:
        cmds = [['pcs', 'cluster', 'start']]
    controllers = list(env_util.get_controllers(env))
    for node in controllers:
        for cmd in cmds:
            ssh.call(cmd, node=node)
    # When we start cluster we should wait while resources from constant
    # `_default_exclude_services` become up and running. BTW, We don't touch
    # these resources in stop/start corosync resources methods at all.
    node = env_util.get_one_controller(env)
    status_out = ssh.call_output(['cibadmin', '--query', '--scope',
                                  'resources'], node=node)
    services_list = []
    for res in get_crm_services(status_out):
        if any(service in res for service in _default_exclude_services):
            services_list.append(res)

    wait_for_corosync_services_sync(env, services_list, 'start')
Beispiel #26
0
def stop_upstart_services(env):
    controllers = list(env_util.get_controllers(env))
    service_re = re.compile(
        "^((?:%s)[^\s]*).*start/running" %
        ("|".join(magic_consts.OS_SERVICES), ), re.MULTILINE)
    for node in controllers:
        sftp = ssh.sftp(node)
        try:
            svc_file = sftp.open('/root/services_list')
        except IOError:
            with sftp.open('/root/services_list.tmp', 'w') as svc_file:
                initctl_out = ssh.call_output(['initctl', 'list'], node=node)
                to_stop = []
                for match in service_re.finditer(initctl_out):
                    service = match.group(1)
                    to_stop.append(service)
                    svc_file.write(service + '\n')
            sftp.rename('/root/services_list.tmp', '/root/services_list')
        else:
            with svc_file:
                to_stop = svc_file.read().splitlines()
        for service in to_stop:
            ssh.call(['stop', service], node=node)
Beispiel #27
0
def stop_corosync_services(env):
    controllers = list(env_util.get_controllers(env))
    for node in controllers:
        status_out, _ = ssh.call(['crm', 'status'], stdout=ssh.PIPE, node=node)
        for service in parse_crm_status(status_out):
            ssh.call(['crm', 'resource', 'stop', service], node=node)
Beispiel #28
0
def sync_glance_images(source_env_id, seed_env_id, seed_swift_ep):
    """Sync glance images from original ENV to seed ENV

    Args:
        source_env_id (int): ID of original ENV.
        seed_env_id (int): ID of seed ENV.
        seed_swift_ep (str): endpoint's name where swift-proxy service is
                             listening on.

    Examples:
        sync_glance_images(2, 3, 'br-mgmt')
    """
    # set glance username
    glance_user = "******"
    # set swift container value
    container = "glance"
    # choose tenant
    tenant = "services"
    # get clusters by id
    source_env = environment_obj.Environment(source_env_id)
    seed_env = environment_obj.Environment(seed_env_id)
    # gather cics admin IPs
    source_node = next(env_util.get_controllers(source_env))
    seed_node = next(env_util.get_controllers(seed_env))
    # get cics yaml files
    source_yaml = env_util.get_astute_yaml(source_env, source_node)
    seed_yaml = env_util.get_astute_yaml(seed_env, seed_node)
    # get glance passwords
    source_glance_pass = get_glance_password(source_yaml)
    seed_glance_pass = get_glance_password(seed_yaml)
    # get seed node swift ip
    seed_swift_ip = get_endpoint_ip(seed_swift_ep, seed_yaml)
    # get service tenant id & lists of objects for source env
    source_token = get_auth_token(source_node, tenant, glance_user, source_glance_pass)
    source_swift_list = set(
        get_swift_objects(source_node, tenant, glance_user, source_glance_pass, source_token, container)
    )
    # get service tenant id & lists of objects for seed env
    seed_token = get_auth_token(seed_node, tenant, glance_user, seed_glance_pass)
    seed_swift_list = set(get_swift_objects(seed_node, tenant, glance_user, seed_glance_pass, seed_token, container))
    # get service tenant for seed env
    seed_tenant = env_util.get_service_tenant_id(seed_env)
    # check consistency of matched images
    source_token = get_auth_token(source_node, tenant, glance_user, source_glance_pass)
    seed_token = get_auth_token(seed_node, tenant, glance_user, seed_glance_pass)
    for image in source_swift_list & seed_swift_list:
        source_obj_etag = get_object_property(
            source_node, tenant, glance_user, source_glance_pass, source_token, container, image, "ETag"
        )
        seed_obj_etag = get_object_property(
            seed_node, tenant, glance_user, seed_glance_pass, seed_token, container, image, "ETag"
        )
        if source_obj_etag != seed_obj_etag:
            # image should be resynced
            delete_image(seed_node, tenant, glance_user, seed_glance_pass, seed_token, container, image)
            LOG.info("Swift %s image should be resynced" % image)
            seed_swift_list.remove(image)
    # migrate new images
    for image in source_swift_list - seed_swift_list:
        # download image on source's node local drive
        source_token = get_auth_token(source_node, tenant, glance_user, source_glance_pass)
        download_image(source_node, tenant, glance_user, source_glance_pass, source_token, container, image)
        # transfer image
        source_token = get_auth_token(source_node, tenant, glance_user, source_glance_pass)
        seed_token = get_auth_token(seed_node, tenant, glance_user, seed_glance_pass)
        transfer_image(
            source_node, tenant, glance_user, seed_glance_pass, seed_token, container, image, seed_swift_ip, seed_tenant
        )
        # remove transferred image
        ssh.sftp(source_node).remove(image)
    # delete outdated images
    for image in seed_swift_list - source_swift_list:
        token = get_auth_token(seed_node, tenant, glance_user, seed_glance_pass)
        delete_image(seed_node, tenant, glance_user, seed_glance_pass, token, container, image)
Beispiel #29
0
def stop_corosync_services(env):
    controllers = list(env_util.get_controllers(env))
    for node in controllers:
        status_out = ssh.call_output(["crm", "status"], node=node)
        for service in parse_crm_status(status_out):
            ssh.call(["crm", "resource", "stop", service], node=node)
def disconnect_networks(env):
    controllers = list(env_util.get_controllers(env))
    for node in controllers:
        deployment_info = env_util.get_astute_yaml(env, node)
        network.delete_patch_ports(node, deployment_info)
Beispiel #31
0
def stop_corosync_services(env):
    controllers = list(env_util.get_controllers(env))
    for node in controllers:
        status_out = ssh.call_output(['crm', 'status'], node=node)
        for service in parse_crm_status(status_out):
            ssh.call(['crm', 'resource', 'stop', service], node=node)
Beispiel #32
0
def sync_glance_images(source_env_id, seed_env_id, seed_swift_ep):
    """Sync glance images from original ENV to seed ENV

    Args:
        source_env_id (int): ID of original ENV.
        seed_env_id (int): ID of seed ENV.
        seed_swift_ep (str): endpoint's name where swift-proxy service is
                             listening on.

    Examples:
        sync_glance_images(2, 3, 'br-mgmt')
    """
    # set glance username
    glance_user = "******"
    # set swift container value
    container = "glance"
    # choose tenant
    tenant = "services"
    # get clusters by id
    source_env = environment_obj.Environment(source_env_id)
    seed_env = environment_obj.Environment(seed_env_id)
    # gather cics admin IPs
    source_node = next(env_util.get_controllers(source_env))
    seed_node = next(env_util.get_controllers(seed_env))
    # get cics yaml files
    source_yaml = env_util.get_astute_yaml(source_env, source_node)
    seed_yaml = env_util.get_astute_yaml(seed_env, seed_node)
    # get glance passwords
    source_glance_pass = get_glance_password(source_yaml)
    seed_glance_pass = get_glance_password(seed_yaml)
    # get seed node swift ip
    seed_swift_ip = get_endpoint_ip(seed_swift_ep, seed_yaml)
    # get service tenant id & lists of objects for source env
    source_token = get_auth_token(source_node, tenant, glance_user,
                                  source_glance_pass)
    source_swift_list = set(
        get_swift_objects(source_node, tenant, glance_user, source_glance_pass,
                          source_token, container))
    # get service tenant id & lists of objects for seed env
    seed_token = get_auth_token(seed_node, tenant, glance_user,
                                seed_glance_pass)
    seed_swift_list = set(
        get_swift_objects(seed_node, tenant, glance_user, seed_glance_pass,
                          seed_token, container))
    # get service tenant for seed env
    seed_tenant = env_util.get_service_tenant_id(seed_env)
    # check consistency of matched images
    source_token = get_auth_token(source_node, tenant, glance_user,
                                  source_glance_pass)
    seed_token = get_auth_token(seed_node, tenant, glance_user,
                                seed_glance_pass)
    for image in source_swift_list & seed_swift_list:
        source_obj_etag = get_object_property(source_node, tenant, glance_user,
                                              source_glance_pass, source_token,
                                              container, image, 'ETag')
        seed_obj_etag = get_object_property(seed_node, tenant, glance_user,
                                            seed_glance_pass, seed_token,
                                            container, image, 'ETag')
        if source_obj_etag != seed_obj_etag:
            # image should be resynced
            delete_image(seed_node, tenant, glance_user, seed_glance_pass,
                         seed_token, container, image)
            LOG.info("Swift %s image should be resynced" % image)
            seed_swift_list.remove(image)
    # migrate new images
    for image in source_swift_list - seed_swift_list:
        # download image on source's node local drive
        source_token = get_auth_token(source_node, tenant, glance_user,
                                      source_glance_pass)
        download_image(source_node, tenant, glance_user, source_glance_pass,
                       source_token, container, image)
        # transfer image
        source_token = get_auth_token(source_node, tenant, glance_user,
                                      source_glance_pass)
        seed_token = get_auth_token(seed_node, tenant, glance_user,
                                    seed_glance_pass)
        transfer_image(source_node, tenant, glance_user, seed_glance_pass,
                       seed_token, container, image, seed_swift_ip,
                       seed_tenant)
        # remove transferred image
        ssh.sftp(source_node).remove(image)
    # delete outdated images
    for image in seed_swift_list - source_swift_list:
        token = get_auth_token(seed_node, tenant, glance_user,
                               seed_glance_pass)
        delete_image(seed_node, tenant, glance_user, seed_glance_pass, token,
                     container, image)
Beispiel #33
0
def stop_cluster(env):
    cmds = [['pcs', 'cluster', 'kill']]
    controllers = list(env_util.get_controllers(env))
    for node in controllers:
        for cmd in cmds:
            ssh.call(cmd, node=node)