示例#1
0
def add_ceph_mon(cluster_name, minions):
    conf_file = _CEPH_CLUSTER_CONF_DIR + "/" + cluster_name + "/" + cluster_name + ".conf"
    config = ConfigParser.RawConfigParser()
    config.read(conf_file)

    public_network = IPNetwork(config.get("global", "public network"))
    check_minion_networks(minions, public_network)

    used_mon_ids = set([id.strip() for id in config.get("mon", "mon initial members").split(",")])
    unused_mon_ids = list(set(_MON_ID_LIST) - used_mon_ids)
    unused_mon_ids.sort()

    mon_id_map, monitors = _get_mon_id_map(unused_mon_ids, minions)

    mon_initial_members = list(used_mon_ids) + list(monitors)
    mon_initial_members.sort()
    config.set("mon", "mon initial members", ", ".join(mon_initial_members))

    _config_add_monitors(config, monitors)

    with open(conf_file, "wb") as f:
        config.write(f)

    pillar_data = _add_ceph_mon_pillar_data(mon_id_map, cluster_name, monitors)
    pillar = {"usm": pillar_data}

    out = run_state(local, minions, "add_ceph_mon", expr_form="list", kwarg={"pillar": pillar})
    if out:
        return out

    return sync_ceph_conf(cluster_name, minions)
示例#2
0
def AddMon(cluster_name, minions, ctxt=""):
    # convert list of minions to below dict
    # {MINION_ID: {'public_ip': IP_ADDRESS,
    #              'cluster_ip': IP_ADDRESS}, ...}
    d = {}
    for m in minions:
        d.update({m['Node']: {'public_ip': m['PublicIP4'],
                              'cluster_ip': m['ClusterIP4']}})
    minion_set = minions
    minions = d

    conf_file = (_CEPH_CLUSTER_CONF_DIR + '/' + cluster_name + '/' +
                 cluster_name + '.conf')
    config = ConfigParser.RawConfigParser()
    config.read(conf_file)

    public_network = IPNetwork(config.get('global', 'public network'))
    _check_minion_networks(minions, public_network)

    used_mon_ids = set([id.strip() for id in config.get(
        'mon', 'mon initial members').split(',')])
    unused_mon_ids = list(set(_MON_ID_LIST) - used_mon_ids)
    unused_mon_ids.sort()

    mon_id_map, monitors = _get_mon_id_map(unused_mon_ids, minions)

    mon_initial_members = list(used_mon_ids) + list(monitors)
    mon_initial_members.sort()
    config.set('mon', 'mon initial members', ', '.join(mon_initial_members))

    _config_add_monitors(config, monitors)

    with open(conf_file, 'wb') as f:
        config.write(f)

    pillar_data = _add_ceph_mon_pillar_data(mon_id_map, cluster_name, monitors)
    pillar = {'skyring': pillar_data}

    local = salt.client.LocalClient()
    out = run_state(local, minions, 'add_ceph_mon', expr_form='list',
                    kwarg={'pillar': pillar})
    if out:
        log.error('%s-add_mon failed for %s. error=%s' %
                  (ctxt, minion_set, out))
        raise Exception('add_mon failed for %s. error=%s' %
                        (minion_set, out))

    out = sync_ceph_conf(cluster_name, minions)
    if out:
        log.error("%s-sync_ceph_conf failed to %s. error=%s" %
                  (ctxt, minions, out))
        raise Exception("sync_ceph_conf failed to %s. error=%s" %
                        (minions, out))

    return True
示例#3
0
 def verify_config(cls, config):
     prepend_root_dirs = []
     for config_key in ("log_file", ):
         if urllib.parse.urlparse(config.get(config_key, "")).scheme == "":
             prepend_root_dirs.append(config_key)
     if prepend_root_dirs:
         salt.config.prepend_root_dir(config, prepend_root_dirs)
     salt.utils.verify.verify_env(
         [str(pathlib.Path(config["log_file"]).parent)],
         running_username(),
         pki_dir=config.get("pki_dir") or "",
         root_dir=config["root_dir"],
     )
示例#4
0
 def verify_config(cls, config):
     salt.utils.verify.verify_env(
         [str(pathlib.Path(config["log_file"]).parent)],
         running_username(),
         pki_dir=config.get("pki_dir") or "",
         root_dir=config["root_dir"],
     )
示例#5
0
 def list_server_logs(self, request, fqdn):
     client = salt.client.LocalClient(
         config.get('cthulhu', 'salt_config_path'))
     results = client.cmd(fqdn, "log_tail.list_logs", ["."])
     if not results:
         return Response(status=status.HTTP_503_SERVICE_UNAVAILABLE)
     return Response(sorted(results[fqdn]))
示例#6
0
    def get_server_log(self, request, fqdn, log_path):
        lines = request.GET.get('lines', 40)

        client = salt.client.LocalClient(
            config.get('cthulhu', 'salt_config_path'))
        results = client.cmd(fqdn, "log_tail.tail", [log_path, lines])
        if not results:
            return Response(status=status.HTTP_503_SERVICE_UNAVAILABLE)
        else:
            return Response({'lines': results[fqdn]})
示例#7
0
def _get_cluster_uuid_from_name(cluster_name):
    configfile = "/etc/ceph/%s.conf" % (cluster_name)
    if not os.path.isfile(configfile):
        raise Error("Cluster confg file does not exist:'%s'" % configfile)
    config = ConfigParser.ConfigParser()
    config.read(configfile)
    try:
        fsid = config.get("global","fsid")
    except ConfigParser.NoOptionError:
        raise Error("Cluster confg file does not sewt fsid:'%s'" % configfile)
    return fsid
示例#8
0
def _get_cluster_uuid_from_name(cluster_name):
    configfile = "/etc/ceph/%s.conf" % (cluster_name)
    if not os.path.isfile(configfile):
        raise Error("Cluster confg file does not exist:'%s'" % configfile)
    config = ConfigParser.ConfigParser()
    config.read(configfile)
    try:
        fsid = config.get("global", "fsid")
    except ConfigParser.NoOptionError:
        raise Error("Cluster confg file does not sewt fsid:'%s'" % configfile)
    return fsid
示例#9
0
def _get_cluster_name_from_uuid(cluster_uuid):
    output = None
    dir_content = os.listdir("/etc/ceph/")
    for file_name in dir_content:
        if file_name[-5:] != ".conf":
            continue
        fullpath = os.path.join("/etc/ceph/", file_name)
        config = ConfigParser()
        config.read(fullpath)
        try:
            fsid = config.get("global","fsid")
            if fsid is not None:
                output = file_name[:-5]
        except:
            continue
    return output
示例#10
0
def apply_fuse_config(overrides, defaults=None):
    if defaults is None:
        defaults = DEFAULT_FUSE_OPTS

    config = defaults.copy()

    if overrides:
        config.update(overrides)

    # set up the extension_modules location from the cachedir
    config['extension_modules'] = (
        config.get('extension_modules') or
        os.path.join(config['cachedir'], 'extmods')
    )

    return config
示例#11
0
def _get_cluster_name_from_uuid(cluster_uuid):
    output = None
    dir_content = os.listdir("/etc/ceph/")
    for file_name in dir_content:
        if file_name[-5:] != ".conf":
            continue
        fullpath = os.path.join("/etc/ceph/", file_name)
        config = ConfigParser()
        config.read(fullpath)
        try:
            fsid = config.get("global", "fsid")
            if fsid is not None:
                output = file_name[:-5]
        except:
            continue
    return output
示例#12
0
    def retrieve_grains(self, request, fqdn):
        salt_config = salt.config.client_config(
            config.get('cthulhu', 'salt_config_path'))
        pillar_util = salt.utils.master.MasterPillarUtil(
            fqdn,
            'glob',
            use_cached_grains=True,
            grains_fallback=False,
            opts=salt_config)

        try:
            # We (ab)use an internal interface to get at the cache by minion ID
            # instead of by glob, because the process of resolving the glob
            # relies on access to the root only PKI folder.
            cache_grains, cache_pillar = pillar_util._get_cached_minion_data(
                fqdn)
            return Response(cache_grains[fqdn])
        except KeyError:
            return Response(status=status.HTTP_404_NOT_FOUND)
示例#13
0
    def get_cluster_log(self, request, fsid):
        """
        Retrieve the cluster log from one of a cluster's mons (expect it to be in /var/log/ceph/ceph.log)
        """

        lines = request.GET.get('lines', 40)

        # Resolve FSID to name
        name = self.client.get_cluster(fsid)['name']

        # Resolve FSID to list of mon FQDNs
        servers = self.client.server_list_cluster(fsid)
        # Sort to get most recently contacted server first; drop any
        # for whom last_contact is None
        servers = [s for s in servers if s['last_contact']]
        servers = sorted(servers,
                         key=lambda t: dateutil_parse(t['last_contact']),
                         reverse=True)
        mon_fqdns = []
        for server in servers:
            for service in server['services']:
                service_id = ServiceId(*(service['id']))
                if service[
                        'running'] and service_id.service_type == MON and service_id.fsid == fsid:
                    mon_fqdns.append(server['fqdn'])

        client = salt.client.LocalClient(
            config.get('cthulhu', 'salt_config_path'))
        log.debug("LogTailViewSet: mons for %s are %s" % (fsid, mon_fqdns))
        # For each mon FQDN, try to go get ceph/$cluster.log, if we succeed return it, if we fail try the next one
        # NB this path is actually customizable in ceph as `mon_cluster_log_file` but we assume user hasn't done that.
        for mon_fqdn in mon_fqdns:
            results = client.cmd(mon_fqdn, "log_tail.tail",
                                 ["ceph/{name}.log".format(name=name), lines])
            if results:
                return Response({'lines': results[mon_fqdn]})
            else:
                log.info("Failed to get log from %s" % mon_fqdn)

        # If none of the mons gave us what we wanted, return a 503 service unavailable
        return Response("mon log data unavailable",
                        status=status.HTTP_503_SERVICE_UNAVAILABLE)
示例#14
0
    def _lookup_ifaces(self, servers):
        """
        Resolve the frontend/backend addresses (known
        by cthulhu via Ceph) to network interfaces (known by salt from its
        grains).
        """
        salt_config = salt.config.client_config(
            config.get('cthulhu', 'salt_config_path'))
        pillar_util = salt.utils.master.MasterPillarUtil(
            '',
            'glob',
            use_cached_grains=True,
            grains_fallback=False,
            opts=salt_config)

        def _lookup_one(server):
            log.debug(">> resolving grains for server {0}".format(
                server['fqdn']))
            fqdn = server['fqdn']
            cache_grains, cache_pillar = pillar_util._get_cached_minion_data(
                fqdn)
            server['frontend_iface'] = None
            server['backend_iface'] = None
            try:
                grains = cache_grains[fqdn]
                if server['frontend_addr']:
                    server['frontend_iface'] = self._addr_to_iface(
                        server['frontend_addr'], grains['ip_interfaces'])
                if server['backend_addr']:
                    server['backend_iface'] = self._addr_to_iface(
                        server['backend_addr'], grains['ip_interfaces'])
            except KeyError:
                pass
            log.debug("<< resolving grains for server {0}".format(
                server['fqdn']))

        # Issue up to this many disk I/Os to load grains at once
        CONCURRENT_GRAIN_LOADS = 16
        p = gevent.pool.Pool(CONCURRENT_GRAIN_LOADS)
        p.map(_lookup_one, servers)
示例#15
0
文件: rpc.py 项目: abonas/calamari
 def _salt_key(self):
     return Key(salt.config.master_config(config.get('cthulhu', 'salt_config_path')))
示例#16
0
def AddOSD(cluster_name, minions, ctxt=""):
    # convert minions dict to below dict
    # {MINION_ID: {'public_ip': IP_ADDRESS,
    #              'cluster_ip': IP_ADDRESS,
    #              'devices': {DEVICE: FSTYPE, ...}}, ...}
    d = {minions['Node']: {'public_ip': minions['PublicIP4'],
                           'cluster_ip': minions['ClusterIP4'],
                           'devices': {
                               minions['Device']: minions['FSType'],
                           }}}
    minions = d

    conf_file = (_CEPH_CLUSTER_CONF_DIR + '/' + cluster_name + '/' +
                 cluster_name + '.conf')
    config = ConfigParser.RawConfigParser()
    config.read(conf_file)

    public_network = IPNetwork(config.get('global', 'public network'))
    if config.has_option('global', 'cluster network'):
        cluster_network = IPNetwork(config.get('global', 'cluster network'))
    else:
        cluster_network = None
    public_network, cluster_network = _check_minion_networks(
        minions, public_network, cluster_network, check_cluster_network=True)

    pillar_data = {}
    for minion, v in minions.iteritems():
        pillar_data[minion] = {'cluster_name': cluster_name,
                               'cluster_id': config.get('global', 'fsid'),
                               'devices': v['devices']}
    pillar = {'skyring': pillar_data}

    local = salt.client.LocalClient()
    out = run_state(local, minions, 'prepare_ceph_osd', expr_form='list',
                    kwarg={'pillar': pillar})
    if out:
        log.error("%s-prepare_osd failed for %s. error=%s" %
                  (ctxt, minions, out))
        raise Exception("prepare_osd failed for %s. error=%s" %
                        (minions, out))

    for minion, v in minions.iteritems():
        count = 0
        found = False
        failed_devices = []
        while count < 6:
            out = local.cmd(minion, 'cmd.run_all', ['ls -l /dev/disk/by-parttypeuuid'])
            time.sleep(15)
            for key, value in v['devices'].iteritems():
                val_to_check = key.split('/')[-1]
                found = False
                for line in out[minion]["stdout"].splitlines():
                    if val_to_check in line:
                        found = True
                        if key in failed_devices:
                            failed_devices.remove(key)
                        break
                if not found:
                    if key not in failed_devices:
                        failed_devices.append(key)
                    break
            if found:
                break
            count += 1
        if len(failed_devices) != 0:
            log.error("%s-prepare_osd failed for %s" % (ctxt, failed_devices))
            raise Exception("prepare_osd failed for %s" % failed_devices)

    out = local.cmd(minions, 'cmd.run_all', ['ceph-disk activate-all'],
                    expr_form='list')

    osd_map = {}
    failed_minions = {}
    for minion, v in out.iteritems():
        osds = []

        if v.get('retcode') != 0:
            failed_minions[minion] = v
            continue

        for line in v['stdout'].splitlines():
            if line.startswith('=== '):
                osds.append(line.split('=== ')[1].strip())
                break
        osd_map[minion] = osds

    config.set('global', 'cluster network', cluster_network)
    for minion, osds in osd_map.iteritems():
        name = _get_short_hostname(minion)
        for osd in osds:
            config.add_section(osd)
            config.set(osd, 'host', name)
            config.set(osd, 'public addr', minions[minion]['public_ip'])
            config.set(osd, 'cluster addr', minions[minion]['cluster_ip'])

    with open(conf_file, 'wb') as f:
        config.write(f)

    out = sync_ceph_conf(cluster_name, minions)
    if out:
        log.error("%s-sync_cepH-conf failed for %s. error=%s" %
                  (ctxt, minions, out))
        #raise Exception("sync_ceph_conf failed for %s. error=%s" %
        #                (minions, out))

    if failed_minions:
        log.error('%s-add_osd failed. error=%s' % (ctxt, failed_minions))
        raise Exception('add_osd failed. error=%s' % failed_minions)

    return osd_map
示例#17
0
文件: rpc.py 项目: abonas/calamari
 def bind(self):
     log.info("%s bind..." % self.__class__.__name__)
     self._server.bind(config.get('cthulhu', 'rpc_url'))
     self._bound = True
示例#18
0
from calamari_common.config import CalamariConfig
import salt.config

# A config instance for use from within the manager service
config = CalamariConfig()

# A salt config instance for places we'll need the sock_dir
salt_config = salt.config.client_config(config.get('cthulhu', 'salt_config_path'))
示例#19
0
 def bind(self):
     log.info("%s bind..." % self.__class__.__name__)
     self._server.bind(config.get('cthulhu', 'rpc_url'))
     self._bound = True
def deploy(source, update_type, versions, **kwargs):
    '''
    Updates all installed binary packages of the source package
    to the specified version.

    source      : Name of the source package
    update_type : tool | library and others, see doc/readme.txt
    versions    : A dictionary of distros and the version to be installed,
                  e.g. jessie : 1.0-1.
                  If the distro isn't used, no update is performed
    '''

    pending_restarts_pre = set()
    pending_restarts_post = set()
    blacklisted_packages = []

    installed_distro = grains['oscodename']
    if versions.get(installed_distro, None) == None:
        log.info("Update doesn't apply to the installed distribution (" + installed_distro + ")")
        return {}

    if os.path.exists("/etc/debdeploy-minion.conf"):
        config = ConfigParser.ConfigParser()
        config.read("/etc/debdeploy-minion.conf")

        if config.has_section("blacklist-" + installed_distro):
            if config.has_option("blacklist-" + installed_distro, source):
                blacklisted_packages = [x.strip() for x in config.get("blacklist-" + installed_distro, source).split(",")]
    log.info("Packages blacklisted for upgrades: " + str(blacklisted_packages))

    # Detect all locally installed binary packages of a given source package
    # The only resource we can use for that is parsing the /var/lib/dpkg/status
    # file. The format is a bit erratic: The Source: line is only present for
    # binary packages not having the same name as the binary package
    installed_binary_packages = []
    for pkg in deb822.Packages.iter_paragraphs(file('/var/lib/dpkg/status')):

        # skip packages in deinstalled status ("rc" in dpkg). These are not relevant for
        # upgrades and cause problems when binary package names have changed (since package
        # installations are forced with a specific version which is not available for those
        # outdated binary package names)
        installation_status = pkg['Status'].split()[0]
        if installation_status == "deinstall":
            continue

        if pkg.has_key('Package') and pkg.get('Package') in blacklisted_packages:
            log.info('Package ' + pkg.get('Package') + ' has been blacklisted for installation')
            continue

        # Source packages which have had a binNMU have a Source: entry with the source
        # package version in brackets, so strip these
        # If no Source: entry is present in /var/lib/dpkg/status, then the source package
        # name is identical to the binary package name
        if pkg.has_key('Source') and re.sub(r'\(.*?\)', '', pkg['Source']).strip() == source:
            installed_binary_packages.append({pkg['Package'] : versions[installed_distro]})
        elif pkg.has_key('Package') and pkg['Package'] == source:
            installed_binary_packages.append({pkg['Package'] : versions[installed_distro]})
    log.debug("Installed binary packages for " + source + ": " + str(installed_binary_packages))

    if len(installed_binary_packages) == 0:
        log.info("No binary packages installed for source package " + source)
        return {}

    if update_type == "library":
        pending_restarts_pre = Checkrestart().get_programs_to_restart()
        log.debug("Packages needing a restart prior to the update:" + str(pending_restarts_pre))

    old = list_pkgs()

    log.warn("Refreshing apt package database")
    log.info("Refreshing apt package database")
    __salt__['pkg.refresh_db']

    apt_call = install_pkgs(installed_binary_packages)

    new = list_pkgs()

    if update_type == "library":
        pending_restarts_post = Checkrestart().get_programs_to_restart()
        log.debug("Packages needing a restart after to the update:" + str(pending_restarts_post))

    old_keys = set(old.keys())
    new_keys = set(new.keys())

    additions = []
    removals = []
    updated = []
    restarts = []
    new_restarts = []

    if update_type == "library":
        restarts = list(pending_restarts_post)
        new_restarts = list(pending_restarts_post.difference(pending_restarts_pre))

    for i in new_keys.difference(old_keys):
        additions.append[i]
    for i in old_keys.difference(new_keys):
        removals.append[i]
    intersect = old_keys.intersection(new_keys)
    modified = {x : (old[x], new[x]) for x in intersect if old[x] != new[x]}

    log.info("Newly installed packages:" + str(additions))
    log.info("Removed packages: "  + str(removals))
    log.info("Modified packages: " + str(modified))
    log.info("Packages needing a restart: " + str(restarts))
    log.info("New packages needing a restart: " + str(new_restarts))

    r = {}
    r["additions"] = additions
    r["removals"] = removals
    r["updated"] = modified
    r["new_restart"] = new_restarts
    r["restart"] = restarts
    r["aptlog"] = str(apt_call['stdout'])
    r["apterrlog"] = str(apt_call['stderr'])
    r["aptreturn"] = apt_call['retcode']

    jobid = kwargs.get('__pub_jid')
    with open("/var/lib/debdeploy/" + jobid + ".job", "w") as jobfile:
        pickle.dump(r, jobfile)

    return r
示例#21
0
def add_ceph_osd(cluster_name, minions):
    """
    :: minions = {MINION_ID: {'public_ip': IP_ADDRESS,
                              'cluster_ip': IP_ADDRESS,
                              'host_name': HOSTNAME,
                              'devices': {DEVICE: FSTYPE, ...}}, ...}

    """
    conf_file = _CEPH_CLUSTER_CONF_DIR + "/" + cluster_name + "/" + cluster_name + ".conf"
    config = ConfigParser.RawConfigParser()
    config.read(conf_file)

    public_network = IPNetwork(config.get("global", "public network"))
    if config.has_option("global", "cluster network"):
        cluster_network = IPNetwork(config.get("global", "cluster network"))
    else:
        cluster_network = None
    public_network, cluster_network = check_minion_networks(
        minions, public_network, cluster_network, check_cluster_network=True
    )

    pillar_data = {}
    for minion, v in minions.iteritems():
        pillar_data[minion] = {
            "cluster_name": cluster_name,
            "cluster_id": config.get("global", "fsid"),
            "devices": v["devices"],
        }
    pillar = {"usm": pillar_data}

    out = run_state(local, minions, "prepare_ceph_osd", expr_form="list", kwarg={"pillar": pillar})
    if out:
        return out

    out = local.cmd(minions, "cmd.run_all", ["ceph-disk activate-all"], expr_form="list")

    osd_map = {}
    failed_minions = {}
    for minion, v in out.iteritems():
        osds = []

        if v.get("retcode") != 0:
            failed_minions[minion] = v
            continue

        for line in v["stdout"].splitlines():
            if line.startswith("=== "):
                osds.append(line.split("=== ")[1].strip())
                break
        osd_map[minion] = osds

    config.set("global", "cluster network", cluster_network)
    for minion, osds in osd_map.iteritems():
        name = minions[minion].get("host_name", utils.get_short_hostname(minion))
        for osd in osds:
            config.add_section(osd)
            config.set(osd, "host", name)
            config.set(osd, "public addr", minions[minion]["public_ip"])
            config.set(osd, "cluster addr", minions[minion]["cluster_ip"])

    with open(conf_file, "wb") as f:
        config.write(f)

    sync_ceph_conf(cluster_name, minions)

    return failed_minions
def get_queue_size():
    credentials = pika.PlainCredentials(RABBIT_USERNAME, RABBIT_PASSWORD)
    connection = pika.BlockingConnection(
        pika.ConnectionParameters(RABBIT_HOST, RABBIT_PORT, RABBIT_VHOST,
                                  credentials))
    channel = connection.channel()
    result = channel.queue_declare(queue=HOST_ID)
    return result.method.message_count


if __name__ == "__main__":
    Watcher()
    config = ConfigParser.SafeConfigParser()
    config.read('dockerm-minion.conf')
    HOST_ID = salt.config.minion_config('/etc/salt/minion')['id']
    RABBIT_USERNAME = config.get('rabbitmq', 'RABBIT_USERNAME')
    RABBIT_PASSWORD = config.get('rabbitmq', 'RABBIT_PASSWORD')
    RABBIT_HOST = config.get('rabbitmq', 'RABBIT_HOST')
    RABBIT_PORT = config.getint('rabbitmq', 'RABBIT_PORT')
    RABBIT_VHOST = config.get('rabbitmq', 'RABBIT_VHOST')
    while 1:
        queue_size = get_queue_size()
        if queue_size > 10:
            thread_num = 5
        else:
            thread_num = 2
        if queue_size > 0:
            t = []
            for i in range(thread_num):
                t.append(threading.Thread(target=main, args=()))
                t[i].setDaemon(True)
示例#23
0
from calamari_common.config import CalamariConfig
import salt.config

# A config instance for use from within the manager service
config = CalamariConfig()

# A salt config instance for places we'll need the sock_dir
salt_config = salt.config.client_config(
    config.get('cthulhu', 'salt_config_path'))
示例#24
0
 def _salt_key(self):
     return Key(
         salt.config.master_config(config.get('cthulhu',
                                              'salt_config_path')))
示例#25
0
def AddMon(cluster_name, minions, ctxt=""):
    # convert list of minions to below dict
    # {MINION_ID: {'public_ip': IP_ADDRESS,
    #              'cluster_ip': IP_ADDRESS}, ...}
    d = {}
    for m in minions:
        d.update({
            m['Node']: {
                'public_ip': m['PublicIP4'],
                'cluster_ip': m['ClusterIP4']
            }
        })
    minion_set = minions
    minions = d

    conf_file = (_CEPH_CLUSTER_CONF_DIR + '/' + cluster_name + '/' +
                 cluster_name + '.conf')
    config = ConfigParser.RawConfigParser()
    config.read(conf_file)

    public_network = IPNetwork(config.get('global', 'public network'))
    _check_minion_networks(minions, public_network)

    used_mon_ids = set([
        id.strip()
        for id in config.get('mon', 'mon initial members').split(',')
    ])
    unused_mon_ids = list(set(_MON_ID_LIST) - used_mon_ids)
    unused_mon_ids.sort()

    mon_id_map, monitors = _get_mon_id_map(unused_mon_ids, minions)

    mon_initial_members = list(used_mon_ids) + list(monitors)
    mon_initial_members.sort()
    config.set('mon', 'mon initial members', ', '.join(mon_initial_members))

    _config_add_monitors(config, monitors)

    with open(conf_file, 'wb') as f:
        config.write(f)

    pillar_data = _add_ceph_mon_pillar_data(mon_id_map, cluster_name, monitors)
    pillar = {'skyring': pillar_data}

    local = salt.client.LocalClient()
    out = run_state(local,
                    minions,
                    'add_ceph_mon',
                    expr_form='list',
                    kwarg={'pillar': pillar})
    if out:
        log.error('%s-add_mon failed for %s. error=%s' %
                  (ctxt, minion_set, out))
        raise Exception('add_mon failed for %s. error=%s' % (minion_set, out))

    out = sync_ceph_conf(cluster_name, minions)
    if out:
        log.error("%s-sync_ceph_conf failed to %s. error=%s" %
                  (ctxt, minions, out))
        raise Exception("sync_ceph_conf failed to %s. error=%s" %
                        (minions, out))

    return True
示例#26
0
def AddOSD(cluster_name, minions, ctxt=""):
    # convert minions dict to below dict
    # {MINION_ID: {'public_ip': IP_ADDRESS,
    #              'cluster_ip': IP_ADDRESS,
    #              'devices': {DEVICE: FSTYPE, ...}}, ...}
    d = {
        minions['Node']: {
            'public_ip': minions['PublicIP4'],
            'cluster_ip': minions['ClusterIP4'],
            'devices': {
                minions['Device']: minions['FSType'],
            }
        }
    }
    minions = d

    conf_file = (_CEPH_CLUSTER_CONF_DIR + '/' + cluster_name + '/' +
                 cluster_name + '.conf')
    config = ConfigParser.RawConfigParser()
    config.read(conf_file)

    public_network = IPNetwork(config.get('global', 'public network'))
    if config.has_option('global', 'cluster network'):
        cluster_network = IPNetwork(config.get('global', 'cluster network'))
    else:
        cluster_network = None
    public_network, cluster_network = _check_minion_networks(
        minions, public_network, cluster_network, check_cluster_network=True)

    pillar_data = {}
    for minion, v in minions.iteritems():
        pillar_data[minion] = {
            'cluster_name': cluster_name,
            'cluster_id': config.get('global', 'fsid'),
            'devices': v['devices']
        }
    pillar = {'skyring': pillar_data}

    local = salt.client.LocalClient()
    out = run_state(local,
                    minions,
                    'prepare_ceph_osd',
                    expr_form='list',
                    kwarg={'pillar': pillar})
    if out:
        log.error("%s-prepare_osd failed for %s. error=%s" %
                  (ctxt, minions, out))
        raise Exception("prepare_osd failed for %s. error=%s" % (minions, out))

    for minion, v in minions.iteritems():
        count = 0
        found = False
        failed_devices = []
        while count < 6:
            out = local.cmd(minion, 'cmd.run_all',
                            ['ls -l /dev/disk/by-parttypeuuid'])
            time.sleep(15)
            for key, value in v['devices'].iteritems():
                val_to_check = key.split('/')[-1]
                found = False
                for line in out[minion]["stdout"].splitlines():
                    if val_to_check in line:
                        found = True
                        if key in failed_devices:
                            failed_devices.remove(key)
                        break
                if not found:
                    if key not in failed_devices:
                        failed_devices.append(key)
                    break
            if found:
                break
            count += 1
        if len(failed_devices) != 0:
            log.error("%s-prepare_osd failed for %s" % (ctxt, failed_devices))
            raise Exception("prepare_osd failed for %s" % failed_devices)

    out = local.cmd(minions,
                    'cmd.run_all', ['ceph-disk activate-all'],
                    expr_form='list')

    osd_map = {}
    failed_minions = {}
    for minion, v in out.iteritems():
        osds = []

        if v.get('retcode') != 0:
            failed_minions[minion] = v
            continue

        for line in v['stdout'].splitlines():
            if line.startswith('=== '):
                osds.append(line.split('=== ')[1].strip())
                break
        osd_map[minion] = osds

    config.set('global', 'cluster network', cluster_network)
    for minion, osds in osd_map.iteritems():
        name = _get_short_hostname(minion)
        for osd in osds:
            config.add_section(osd)
            config.set(osd, 'host', name)
            config.set(osd, 'public addr', minions[minion]['public_ip'])
            config.set(osd, 'cluster addr', minions[minion]['cluster_ip'])

    with open(conf_file, 'wb') as f:
        config.write(f)

    out = sync_ceph_conf(cluster_name, minions)
    if out:
        log.error("%s-sync_cepH-conf failed for %s. error=%s" %
                  (ctxt, minions, out))
        #raise Exception("sync_ceph_conf failed for %s. error=%s" %
        #                (minions, out))

    if failed_minions:
        log.error('%s-add_osd failed. error=%s' % (ctxt, failed_minions))
        raise Exception('add_osd failed. error=%s' % failed_minions)

    return osd_map
示例#27
0
    if args.config:
        # load config from file
        with open(args.config, 'r') as stream:
            config = ruamel.yaml.YAML().load(stream)
    elif select.select([
            sys.stdin,
    ], [], [], 0.0)[0]:
        # expect wheelhouse yaml structured metadata
        config = ruamel.yaml.YAML().load(sys.stdin)

    jobs = args.jobs if args.jobs else jobs
    Toolbox.merge_dict(config, config_override)

    # Make it happen
    if len(jobs) > 1 and len(config) > 1:
        if config.get('engine', '') == 'salt':
            # trigge Salt wheel
            SaltWheel(config, jobs=jobs).runner()
        elif True:
            pass

    #TODO, should we go over some errors?
    #RED_ERROR = termcolor.colored('FATAL ERROR:', 'red')
    #if args.debug:
    #else:
    #    try:
    #        run(args)
    #    except (errors.UserException, errors.BuildError) as exc:
    #        print(RED_ERROR, exc.args[0], file=sys.stderr)
    #        sys.exit(exc.CODE)