Exemple #1
0
def write_content_to_tmp_file_on_node(node, content, directory, template):
    tmp_name = ssh.call_output(
        ["mktemp", "-p", directory, "-t", template], node=node).strip()
    sftp = ssh.sftp(node)
    with sftp.open(tmp_name, "w") as new:
        new.write(content)
    return tmp_name
Exemple #2
0
def get_service_tenant_id(env, node=None):
    env_id = env.data['id']
    fname = os.path.join(
        magic_consts.FUEL_CACHE,
        "env-{0}-service-tenant-id".format(env_id),
    )
    if os.path.exists(fname):
        with open(fname) as f:
            return f.readline()

    if node is None:
        node = get_one_controller(env)

    password = get_admin_password(env, node)
    tenant_out = ssh.call_output(
        [
            'sh',
            '-c',
            '. /root/openrc; keystone --os-password={0} tenant-get services'.
            format(password),
        ],
        node=node,
    )
    tenant_id = parse_tenant_get(tenant_out, 'id')
    dname = os.path.dirname(fname)
    if not os.path.exists(dname):
        os.makedirs(dname)
    with open(fname, 'w') as f:
        f.write(tenant_id)
    return tenant_id
def get_auth_token(node, tenant, user, password):
    cmd = ". /root/openrc; keystone --os-tenant-name {0}"\
        " --os-username {1} --os-password {2} token-get".format(tenant,
                                                                user,
                                                                password)
    token_info = ssh.call_output(["sh", "-c", cmd], node=node)
    return env_util.parse_tenant_get(token_info, 'id')
Exemple #4
0
def restart_nova_services(node):
    nova_services = ssh.call_output(["service", "--status-all"], node=node)
    for service_line in nova_services.splitlines():
        service_line = service_line.strip()
        _, status, _, service = service_line.split()
        if status == "+" and service.startswith("nova"):
            ssh.call(["service", service, "restart"], node=node)
Exemple #5
0
def get_service_tenant_id(env, node=None):
    env_id = env.data['id']
    fname = os.path.join(
        magic_consts.FUEL_CACHE,
        "env-{0}-service-tenant-id".format(env_id),
    )
    if os.path.exists(fname):
        with open(fname) as f:
            return f.readline()

    if node is None:
        node = get_one_controller(env)

    password = get_admin_password(env, node)
    tenant_out = ssh.call_output(
        [
            'sh', '-c',
            '. /root/openrc; keystone --os-password={0} tenant-get services'
            .format(password),
        ],
        node=node,
    )
    tenant_id = parse_tenant_get(tenant_out, 'id')
    dname = os.path.dirname(fname)
    if not os.path.exists(dname):
        os.makedirs(dname)
    with open(fname, 'w') as f:
        f.write(tenant_id)
    return tenant_id
Exemple #6
0
def create_partition(disk_name, size, node):
    out = ssh.call_output(
        ['parted', '/dev/%s' % disk_name, 'unit', 'MB', 'print'], node=node)
    start = parse_last_partition_end(out) + 1
    end = start + size
    ssh.call(['parted', '/dev/%s' % disk_name, 'unit', 'MB', 'mkpart',
              'custom', 'ext4', str(start), str(end)],
             node=node)
def get_swift_objects(node, tenant, user, password, token, container):
    cmd = ". /root/openrc; swift --os-project-name {0} --os-username {1}"\
        " --os-password {2} --os-auth-token {3} list {4}".format(tenant,
                                                                 user,
                                                                 password,
                                                                 token,
                                                                 container)
    objects_list = ssh.call_output(["sh", "-c", cmd], node=node)
    return objects_list.split('\n')[:-1]
Exemple #8
0
def _get_backup_path(path, node):
    dir_name = os.path.dirname(path)
    prefix_name = os.path.basename(path)
    return ssh.call_output(
        [
            "tempfile",
            "-d", dir_name,
            "-p", ".{0}".format(prefix_name),
            "-s", ".bak",
        ],
        node=node)
Exemple #9
0
def get_current_versions(controller, kind):
    stdout = ssh.call_output(
        ['ceph', 'tell', '{0}.*'.format(kind), 'version', '-f', 'json'],
        node=controller)
    results = []
    for line in stdout.splitlines():
        if not line:
            continue
        if line.startswith(kind):
            line = line.split(":", 1)[1]
        results.append(json.loads(line))
    return {v['version'] for v in results}
def get_object_property(node, tenant, user, password, token, container,
                        object_id, prop):
    cmd = ". /root/openrc; swift --os-project-name {0} --os-username {1}"\
        " --os-password {2} --os-auth-token {3} stat {4} {5}"\
        .format(tenant,
                user,
                password,
                token,
                container,
                object_id)
    object_data = ssh.call_output(["sh", "-c", cmd], node=node)
    return parse_swift_out(object_data, prop)
Exemple #11
0
def get_ceph_conf_filename(node):
    cmd = [
        'bash', '-c',
        'pgrep ceph-mon | xargs -I{} cat /proc/{}/cmdline',
    ]
    cmdlines = ssh.call_output(cmd, node=node)
    if cmdlines:
        cmdline = cmdlines.split('\n')[0].split('\0')
        for i, value in enumerate(cmdline):
            if value == '-c' and i < len(cmdline):
                return cmdline[i + 1]
    return '/etc/ceph/ceph.conf'
Exemple #12
0
def stop_corosync_services(env):
    node = env_util.get_one_controller(env)
    status_out = ssh.call_output(['crm', 'status'], node=node)
    for service in parse_crm_status(status_out):
        while True:
            try:
                ssh.call(['crm', 'resource', 'stop', service], node=node)
            except subprocess.CalledProcessError:
                pass
            else:
                break
    time.sleep(60)
Exemple #13
0
def get_ceph_conf_filename(node):
    cmd = [
        'bash',
        '-c',
        'pgrep ceph-mon | xargs -I{} cat /proc/{}/cmdline',
    ]
    cmdlines = ssh.call_output(cmd, node=node)
    if cmdlines:
        cmdline = cmdlines.split('\n')[0].split('\0')
        for i, value in enumerate(cmdline):
            if value == '-c' and i < len(cmdline):
                return cmdline[i + 1]
    return '/etc/ceph/ceph.conf'
Exemple #14
0
def stop_corosync_services(env):
    node = env_util.get_one_controller(env)
    status_out = ssh.call_output(['crm', 'status'], node=node)
    for service in parse_crm_status(status_out):
        while True:
            try:
                ssh.call(['crm', 'resource', 'stop', service],
                         node=node)
            except subprocess.CalledProcessError:
                pass
            else:
                break
    time.sleep(60)
Exemple #15
0
def wait_for_corosync_services_sync(env, resource_list, status,
                                    timeout=1200, check_freq=20):
    status_bool = status == 'start'
    node = env_util.get_one_controller(env)
    started_at = time.time()
    while True:
        crm_out = ssh.call_output(['crm_mon', '--as-xml'], node=node)
        if is_resources_synced(resource_list, crm_out, status_bool):
            return
        if time.time() - started_at >= timeout:
            raise Exception("Timeout waiting for corosync cluster for env %s"
                            " to be synced" % env.id)
        time.sleep(check_freq)
Exemple #16
0
def get_service_tenant_id(env, node=None):
    if node is None:
        node = get_one_controller(env)

    password = get_admin_password(env, node)
    tenant_out = ssh.call_output(
        [
            'sh', '-c',
            '. /root/openrc; keystone --os-password={0} tenant-get services'
            .format(password),
        ],
        node=node,
    )
    tenant_id = parse_tenant_get(tenant_out, 'id')
    return tenant_id
Exemple #17
0
def get_service_tenant_id(env, node=None):
    if node is None:
        node = get_one_controller(env)

    password = get_admin_password(env, node)
    tenant_out = ssh.call_output(
        [
            'sh',
            '-c',
            '. /root/openrc; keystone --os-password={0} tenant-get services'.
            format(password),
        ],
        node=node,
    )
    tenant_id = parse_tenant_get(tenant_out, 'id')
    return tenant_id
Exemple #18
0
def get_openstack_project_dict(env, node=None):
    if node is None:
        node = get_one_controller(env)

    password = get_admin_password(env, node)
    tenant_out = ssh.call_output(
        [
            'sh', '-c',
            '. /root/openrc; openstack --os-password {0} project list -f json'
            .format(password),
        ],
        node=node,
    )
    data = [{k.lower(): v for k, v in d.items()}
            for d in json.loads(tenant_out)]
    return {i["name"]: i["id"] for i in data}
Exemple #19
0
def get_openstack_project_dict(env, node=None):
    if node is None:
        node = get_one_controller(env)

    password = get_admin_password(env, node)
    tenant_out = ssh.call_output(
        [
            'sh',
            '-c',
            '. /root/openrc; openstack --os-password {0} project list -f json'.
            format(password),
        ],
        node=node,
    )
    data = [{k.lower(): v
             for k, v in d.items()} for d in json.loads(tenant_out)]
    return {i["name"]: i["id"] for i in data}
Exemple #20
0
def stop_upstart_services(env):
    controllers = list(env_util.get_controllers(env))
    service_re = re.compile("^((?:%s)[^\s]*).*start/running" % ("|".join(magic_consts.OS_SERVICES),), re.MULTILINE)
    for node in controllers:
        sftp = ssh.sftp(node)
        try:
            svc_file = sftp.open("/root/services_list")
        except IOError:
            with sftp.open("/root/services_list.tmp", "w") as svc_file:
                initctl_out = ssh.call_output(["initctl", "list"], node=node)
                to_stop = []
                for match in service_re.finditer(initctl_out):
                    service = match.group(1)
                    to_stop.append(service)
                    svc_file.write(service + "\n")
            sftp.rename("/root/services_list.tmp", "/root/services_list")
        else:
            with svc_file:
                to_stop = svc_file.read().splitlines()
        for service in to_stop:
            ssh.call(["stop", service], node=node)
Exemple #21
0
def nova_migrate_flavor_data(env, attempts=20, attempt_delay=30):
    node = env_util.get_one_controller(env)
    for i in xrange(attempts):
        output = ssh.call_output(['nova-manage', 'db', 'migrate_flavor_data'],
                                 node=node, parse_levels=True)
        match = FLAVOR_STATUS_RE.match(output)
        if match is None:
            raise Exception(
                "The format of the migrate_flavor_data command was changed: "
                "'{0}'".format(output))
        params = match.groupdict()
        matched = int(params["matched"])
        completed = int(params["completed"])
        if matched == 0 or matched == completed:
            LOG.info("All flavors were successfully migrated.")
            return
        LOG.debug("Trying to migrate flavors data, iteration %s: %s matches, "
                  "%s completed.", i, matched, completed)
        time.sleep(attempt_delay)
    raise Exception(
        "After {0} attempts flavors data migration is still not completed."
        .format(attempts))
Exemple #22
0
def manage_corosync_services(env, status):
    node = env_util.get_one_controller(env)
    status_out = ssh.call_output(['cibadmin', '--query', '--scope',
                                  'resources'], node=node)
    services_list = []
    for res in get_crm_services(status_out):
        if any(service in res for service in _default_exclude_services):
            continue
        services_list.append(res)

    for service in services_list:
        while True:
            try:
                ssh.call(['crm', 'resource', status, service],
                         node=node)
            except subprocess.CalledProcessError:
                # Sometimes pacemaker rejects part of requests what it is
                # not able to process. Sleep was added to mitigate this risk.
                time.sleep(1)
            else:
                break
    wait_for_corosync_services_sync(env, services_list, status)
Exemple #23
0
    def postdeploy(self):
        # From neutron_update_admin_tenant_id
        sftp = ssh.sftp(self.node)
        with ssh.update_file(sftp, '/etc/neutron/neutron.conf') as (old, new):
            for line in old:
                if line.startswith('nova_admin_tenant_id'):
                    new.write('nova_admin_tenant_id = {0}\n'.format(
                        self.service_tenant_id))
                else:
                    new.write(line)
        orig_version = self.orig_env.data["fuel_version"]
        if orig_version == "6.1":
            openstack_release = magic_consts.VERSIONS[orig_version]
            node_util.add_compute_upgrade_levels(self.node, openstack_release)

            nova_services = ssh.call_output(
                ["bash", "-c",
                 "initctl list | "
                 "awk '/nova/ && /start/ {print $1}' | tr '\n' ' '"],
                node=self.node
            )

            for nova_service in nova_services.split():
                ssh.call(["service", nova_service, "restart"], node=self.node)

        ssh.call(['restart', 'neutron-server'], node=self.node)
        if self.isolated and self.gateway:
            # From restore_default_gateway
            LOG.info("Deleting default route at node %s",
                     self.node.id)
            try:
                ssh.call(['ip', 'route', 'delete', 'default'], node=self.node)
            except subprocess.CalledProcessError as exc:
                LOG.warn("Cannot delete default route at node %s: %s",
                         self.node.id, exc.args[0])
            LOG.info("Set default route at node %s: %s",
                     self.node.id, self.gateway)
            ssh.call(['ip', 'route', 'add', 'default', 'via', self.gateway],
                     node=self.node)
Exemple #24
0
def stop_upstart_services(env):
    controllers = list(env_util.get_controllers(env))
    service_re = re.compile(
        "^((?:%s)[^\s]*).*start/running" %
        ("|".join(magic_consts.OS_SERVICES), ), re.MULTILINE)
    for node in controllers:
        sftp = ssh.sftp(node)
        try:
            svc_file = sftp.open('/root/services_list')
        except IOError:
            with sftp.open('/root/services_list.tmp', 'w') as svc_file:
                initctl_out = ssh.call_output(['initctl', 'list'], node=node)
                to_stop = []
                for match in service_re.finditer(initctl_out):
                    service = match.group(1)
                    to_stop.append(service)
                    svc_file.write(service + '\n')
            sftp.rename('/root/services_list.tmp', '/root/services_list')
        else:
            with svc_file:
                to_stop = svc_file.read().splitlines()
        for service in to_stop:
            ssh.call(['stop', service], node=node)
Exemple #25
0
def start_cluster(env):
    major_version = env.data['fuel_version'].split('.')[0]
    cmds = []
    if int(major_version) < 6:
        cmds = [['service', 'corosync', 'start']]
    else:
        cmds = [['pcs', 'cluster', 'start']]
    controllers = list(env_util.get_controllers(env))
    for node in controllers:
        for cmd in cmds:
            ssh.call(cmd, node=node)
    # When we start cluster we should wait while resources from constant
    # `_default_exclude_services` become up and running. BTW, We don't touch
    # these resources in stop/start corosync resources methods at all.
    node = env_util.get_one_controller(env)
    status_out = ssh.call_output(['cibadmin', '--query', '--scope',
                                  'resources'], node=node)
    services_list = []
    for res in get_crm_services(status_out):
        if any(service in res for service in _default_exclude_services):
            services_list.append(res)

    wait_for_corosync_services_sync(env, services_list, 'start')
Exemple #26
0
    def postdeploy(self):
        # From neutron_update_admin_tenant_id
        sftp = ssh.sftp(self.node)
        with ssh.update_file(sftp, '/etc/neutron/neutron.conf') as (old, new):
            for line in old:
                if line.startswith('nova_admin_tenant_id'):
                    new.write('nova_admin_tenant_id = {0}\n'.format(
                        self.service_tenant_id))
                else:
                    new.write(line)
        orig_version = self.orig_env.data["fuel_version"]
        if orig_version == "6.1":
            openstack_release = magic_consts.VERSIONS[orig_version]
            node_util.add_compute_upgrade_levels(self.node, openstack_release)

            nova_services = ssh.call_output([
                "bash", "-c", "initctl list | "
                "awk '/nova/ && /start/ {print $1}' | tr '\n' ' '"
            ],
                                            node=self.node)

            for nova_service in nova_services.split():
                ssh.call(["service", nova_service, "restart"], node=self.node)

        ssh.call(['restart', 'neutron-server'], node=self.node)
        if self.isolated and self.gateway:
            # From restore_default_gateway
            LOG.info("Deleting default route at node %s", self.node.id)
            try:
                ssh.call(['ip', 'route', 'delete', 'default'], node=self.node)
            except subprocess.CalledProcessError as exc:
                LOG.warn("Cannot delete default route at node %s: %s",
                         self.node.id, exc.args[0])
            LOG.info("Set default route at node %s: %s", self.node.id,
                     self.gateway)
            ssh.call(['ip', 'route', 'add', 'default', 'via', self.gateway],
                     node=self.node)
Exemple #27
0
def check_cluster(node):
    # From check_ceph_cluster
    res = ssh.call_output(['ceph', 'health'], node=node)
    LOG.debug('Got status: %s', res)
    if not res or 'HEALTH_OK' not in res:
        raise Exception("Ceph cluster is unhealthy: " + res)
Exemple #28
0
def check_cluster(node):
    # From check_ceph_cluster
    res = ssh.call_output(['ceph', 'health'], node=node)
    LOG.debug('Got status: %s', res)
    if not res or 'HEALTH_OK' not in res:
        raise Exception("Ceph cluster is unhealthy: " + res)
Exemple #29
0
def get_hostname_remotely(node):
    hostname = ssh.call_output(['hostname'], node=node)
    return hostname[:-1]
def check_cluster(node):
    # From check_ceph_cluster
    res = ssh.call_output(["ceph", "health"], node=node)
    LOG.debug("Got status: %s", res)
    if not res or "HEALTH_OK" not in res:
        raise Exception("Ceph cluster is unhealthy: " + res)
Exemple #31
0
def stop_corosync_services(env):
    controllers = list(env_util.get_controllers(env))
    for node in controllers:
        status_out = ssh.call_output(["crm", "status"], node=node)
        for service in parse_crm_status(status_out):
            ssh.call(["crm", "resource", "stop", service], node=node)
Exemple #32
0
def run_nova_cmd(cmd, node, output=True):
    run_cmd = ['sh', '-c', ' '.join(['.', '/root/openrc;'] + cmd)]
    if output:
        return ssh.call_output(run_cmd, node=node)
    return ssh.call(run_cmd, node=node)
Exemple #33
0
def stop_corosync_services(env):
    controllers = list(env_util.get_controllers(env))
    for node in controllers:
        status_out = ssh.call_output(['crm', 'status'], node=node)
        for service in parse_crm_status(status_out):
            ssh.call(['crm', 'resource', 'stop', service], node=node)