def add_ceph_osd(cluster_name, minions): """ :: minions = {MINION_ID: {'public_ip': IP_ADDRESS, 'cluster_ip': IP_ADDRESS, 'host_name': HOSTNAME, 'devices': {DEVICE: FSTYPE, ...}}, ...} """ conf_file = _CEPH_CLUSTER_CONF_DIR + "/" + cluster_name + "/" + cluster_name + ".conf" config = ConfigParser.RawConfigParser() config.read(conf_file) public_network = IPNetwork(config.get("global", "public network")) if config.has_option("global", "cluster network"): cluster_network = IPNetwork(config.get("global", "cluster network")) else: cluster_network = None public_network, cluster_network = check_minion_networks( minions, public_network, cluster_network, check_cluster_network=True ) pillar_data = {} for minion, v in minions.iteritems(): pillar_data[minion] = { "cluster_name": cluster_name, "cluster_id": config.get("global", "fsid"), "devices": v["devices"], } pillar = {"usm": pillar_data} out = run_state(local, minions, "prepare_ceph_osd", expr_form="list", kwarg={"pillar": pillar}) if out: return out out = local.cmd(minions, "cmd.run_all", ["ceph-disk activate-all"], expr_form="list") osd_map = {} failed_minions = {} for minion, v in out.iteritems(): osds = [] if v.get("retcode") != 0: failed_minions[minion] = v continue for line in v["stdout"].splitlines(): if line.startswith("=== "): osds.append(line.split("=== ")[1].strip()) break osd_map[minion] = osds config.set("global", "cluster network", cluster_network) for minion, osds in osd_map.iteritems(): name = minions[minion].get("host_name", utils.get_short_hostname(minion)) for osd in osds: config.add_section(osd) config.set(osd, "host", name) config.set(osd, "public addr", minions[minion]["public_ip"]) config.set(osd, "cluster addr", minions[minion]["cluster_ip"]) with open(conf_file, "wb") as f: config.write(f) sync_ceph_conf(cluster_name, minions) return failed_minions
def AddOSD(cluster_name, minions, ctxt=""): # convert minions dict to below dict # {MINION_ID: {'public_ip': IP_ADDRESS, # 'cluster_ip': IP_ADDRESS, # 'devices': {DEVICE: FSTYPE, ...}}, ...} d = { minions['Node']: { 'public_ip': minions['PublicIP4'], 'cluster_ip': minions['ClusterIP4'], 'devices': { minions['Device']: minions['FSType'], } } } minions = d conf_file = (_CEPH_CLUSTER_CONF_DIR + '/' + cluster_name + '/' + cluster_name + '.conf') config = ConfigParser.RawConfigParser() config.read(conf_file) public_network = IPNetwork(config.get('global', 'public network')) if config.has_option('global', 'cluster network'): cluster_network = IPNetwork(config.get('global', 'cluster network')) else: cluster_network = None public_network, cluster_network = _check_minion_networks( minions, public_network, cluster_network, check_cluster_network=True) pillar_data = {} for minion, v in minions.iteritems(): pillar_data[minion] = { 'cluster_name': cluster_name, 'cluster_id': config.get('global', 'fsid'), 'devices': v['devices'] } pillar = {'skyring': pillar_data} local = salt.client.LocalClient() out = run_state(local, minions, 'prepare_ceph_osd', expr_form='list', kwarg={'pillar': pillar}) if out: log.error("%s-prepare_osd failed for %s. error=%s" % (ctxt, minions, out)) raise Exception("prepare_osd failed for %s. error=%s" % (minions, out)) for minion, v in minions.iteritems(): count = 0 found = False failed_devices = [] while count < 6: out = local.cmd(minion, 'cmd.run_all', ['ls -l /dev/disk/by-parttypeuuid']) time.sleep(15) for key, value in v['devices'].iteritems(): val_to_check = key.split('/')[-1] found = False for line in out[minion]["stdout"].splitlines(): if val_to_check in line: found = True if key in failed_devices: failed_devices.remove(key) break if not found: if key not in failed_devices: failed_devices.append(key) break if found: break count += 1 if len(failed_devices) != 0: log.error("%s-prepare_osd failed for %s" % (ctxt, failed_devices)) raise Exception("prepare_osd failed for %s" % failed_devices) out = local.cmd(minions, 'cmd.run_all', ['ceph-disk activate-all'], expr_form='list') osd_map = {} failed_minions = {} for minion, v in out.iteritems(): osds = [] if v.get('retcode') != 0: failed_minions[minion] = v continue for line in v['stdout'].splitlines(): if line.startswith('=== '): osds.append(line.split('=== ')[1].strip()) break osd_map[minion] = osds config.set('global', 'cluster network', cluster_network) for minion, osds in osd_map.iteritems(): name = _get_short_hostname(minion) for osd in osds: config.add_section(osd) config.set(osd, 'host', name) config.set(osd, 'public addr', minions[minion]['public_ip']) config.set(osd, 'cluster addr', minions[minion]['cluster_ip']) with open(conf_file, 'wb') as f: config.write(f) out = sync_ceph_conf(cluster_name, minions) if out: log.error("%s-sync_cepH-conf failed for %s. error=%s" % (ctxt, minions, out)) #raise Exception("sync_ceph_conf failed for %s. error=%s" % # (minions, out)) if failed_minions: log.error('%s-add_osd failed. error=%s' % (ctxt, failed_minions)) raise Exception('add_osd failed. error=%s' % failed_minions) return osd_map
def AddOSD(cluster_name, minions, ctxt=""): # convert minions dict to below dict # {MINION_ID: {'public_ip': IP_ADDRESS, # 'cluster_ip': IP_ADDRESS, # 'devices': {DEVICE: FSTYPE, ...}}, ...} d = {minions['Node']: {'public_ip': minions['PublicIP4'], 'cluster_ip': minions['ClusterIP4'], 'devices': { minions['Device']: minions['FSType'], }}} minions = d conf_file = (_CEPH_CLUSTER_CONF_DIR + '/' + cluster_name + '/' + cluster_name + '.conf') config = ConfigParser.RawConfigParser() config.read(conf_file) public_network = IPNetwork(config.get('global', 'public network')) if config.has_option('global', 'cluster network'): cluster_network = IPNetwork(config.get('global', 'cluster network')) else: cluster_network = None public_network, cluster_network = _check_minion_networks( minions, public_network, cluster_network, check_cluster_network=True) pillar_data = {} for minion, v in minions.iteritems(): pillar_data[minion] = {'cluster_name': cluster_name, 'cluster_id': config.get('global', 'fsid'), 'devices': v['devices']} pillar = {'skyring': pillar_data} local = salt.client.LocalClient() out = run_state(local, minions, 'prepare_ceph_osd', expr_form='list', kwarg={'pillar': pillar}) if out: log.error("%s-prepare_osd failed for %s. error=%s" % (ctxt, minions, out)) raise Exception("prepare_osd failed for %s. error=%s" % (minions, out)) for minion, v in minions.iteritems(): count = 0 found = False failed_devices = [] while count < 6: out = local.cmd(minion, 'cmd.run_all', ['ls -l /dev/disk/by-parttypeuuid']) time.sleep(15) for key, value in v['devices'].iteritems(): val_to_check = key.split('/')[-1] found = False for line in out[minion]["stdout"].splitlines(): if val_to_check in line: found = True if key in failed_devices: failed_devices.remove(key) break if not found: if key not in failed_devices: failed_devices.append(key) break if found: break count += 1 if len(failed_devices) != 0: log.error("%s-prepare_osd failed for %s" % (ctxt, failed_devices)) raise Exception("prepare_osd failed for %s" % failed_devices) out = local.cmd(minions, 'cmd.run_all', ['ceph-disk activate-all'], expr_form='list') osd_map = {} failed_minions = {} for minion, v in out.iteritems(): osds = [] if v.get('retcode') != 0: failed_minions[minion] = v continue for line in v['stdout'].splitlines(): if line.startswith('=== '): osds.append(line.split('=== ')[1].strip()) break osd_map[minion] = osds config.set('global', 'cluster network', cluster_network) for minion, osds in osd_map.iteritems(): name = _get_short_hostname(minion) for osd in osds: config.add_section(osd) config.set(osd, 'host', name) config.set(osd, 'public addr', minions[minion]['public_ip']) config.set(osd, 'cluster addr', minions[minion]['cluster_ip']) with open(conf_file, 'wb') as f: config.write(f) out = sync_ceph_conf(cluster_name, minions) if out: log.error("%s-sync_cepH-conf failed for %s. error=%s" % (ctxt, minions, out)) #raise Exception("sync_ceph_conf failed for %s. error=%s" % # (minions, out)) if failed_minions: log.error('%s-add_osd failed. error=%s' % (ctxt, failed_minions)) raise Exception('add_osd failed. error=%s' % failed_minions) return osd_map
def deploy(source, update_type, versions, **kwargs): ''' Updates all installed binary packages of the source package to the specified version. source : Name of the source package update_type : tool | library and others, see doc/readme.txt versions : A dictionary of distros and the version to be installed, e.g. jessie : 1.0-1. If the distro isn't used, no update is performed ''' pending_restarts_pre = set() pending_restarts_post = set() blacklisted_packages = [] installed_distro = grains['oscodename'] if versions.get(installed_distro, None) == None: log.info("Update doesn't apply to the installed distribution (" + installed_distro + ")") return {} if os.path.exists("/etc/debdeploy-minion.conf"): config = ConfigParser.ConfigParser() config.read("/etc/debdeploy-minion.conf") if config.has_section("blacklist-" + installed_distro): if config.has_option("blacklist-" + installed_distro, source): blacklisted_packages = [x.strip() for x in config.get("blacklist-" + installed_distro, source).split(",")] log.info("Packages blacklisted for upgrades: " + str(blacklisted_packages)) # Detect all locally installed binary packages of a given source package # The only resource we can use for that is parsing the /var/lib/dpkg/status # file. The format is a bit erratic: The Source: line is only present for # binary packages not having the same name as the binary package installed_binary_packages = [] for pkg in deb822.Packages.iter_paragraphs(file('/var/lib/dpkg/status')): # skip packages in deinstalled status ("rc" in dpkg). These are not relevant for # upgrades and cause problems when binary package names have changed (since package # installations are forced with a specific version which is not available for those # outdated binary package names) installation_status = pkg['Status'].split()[0] if installation_status == "deinstall": continue if pkg.has_key('Package') and pkg.get('Package') in blacklisted_packages: log.info('Package ' + pkg.get('Package') + ' has been blacklisted for installation') continue # Source packages which have had a binNMU have a Source: entry with the source # package version in brackets, so strip these # If no Source: entry is present in /var/lib/dpkg/status, then the source package # name is identical to the binary package name if pkg.has_key('Source') and re.sub(r'\(.*?\)', '', pkg['Source']).strip() == source: installed_binary_packages.append({pkg['Package'] : versions[installed_distro]}) elif pkg.has_key('Package') and pkg['Package'] == source: installed_binary_packages.append({pkg['Package'] : versions[installed_distro]}) log.debug("Installed binary packages for " + source + ": " + str(installed_binary_packages)) if len(installed_binary_packages) == 0: log.info("No binary packages installed for source package " + source) return {} if update_type == "library": pending_restarts_pre = Checkrestart().get_programs_to_restart() log.debug("Packages needing a restart prior to the update:" + str(pending_restarts_pre)) old = list_pkgs() log.warn("Refreshing apt package database") log.info("Refreshing apt package database") __salt__['pkg.refresh_db'] apt_call = install_pkgs(installed_binary_packages) new = list_pkgs() if update_type == "library": pending_restarts_post = Checkrestart().get_programs_to_restart() log.debug("Packages needing a restart after to the update:" + str(pending_restarts_post)) old_keys = set(old.keys()) new_keys = set(new.keys()) additions = [] removals = [] updated = [] restarts = [] new_restarts = [] if update_type == "library": restarts = list(pending_restarts_post) new_restarts = list(pending_restarts_post.difference(pending_restarts_pre)) for i in new_keys.difference(old_keys): additions.append[i] for i in old_keys.difference(new_keys): removals.append[i] intersect = old_keys.intersection(new_keys) modified = {x : (old[x], new[x]) for x in intersect if old[x] != new[x]} log.info("Newly installed packages:" + str(additions)) log.info("Removed packages: " + str(removals)) log.info("Modified packages: " + str(modified)) log.info("Packages needing a restart: " + str(restarts)) log.info("New packages needing a restart: " + str(new_restarts)) r = {} r["additions"] = additions r["removals"] = removals r["updated"] = modified r["new_restart"] = new_restarts r["restart"] = restarts r["aptlog"] = str(apt_call['stdout']) r["apterrlog"] = str(apt_call['stderr']) r["aptreturn"] = apt_call['retcode'] jobid = kwargs.get('__pub_jid') with open("/var/lib/debdeploy/" + jobid + ".job", "w") as jobfile: pickle.dump(r, jobfile) return r