def make_global_vars(self, cluster, servers, hints): result = super().make_global_vars(cluster, servers, hints) result["journal_collocation"] = False result["dmcrypt_journal_collocation"] = False result["dmcrypt_dedicated_journal"] = False result["raw_multi_journal"] = False if hints["dmcrypt"]: if hints["collocation"]: result["dmcrypt_journal_collocation"] = True else: result["dmcrypt_dedicated_journal"] = True elif hints["collocation"]: result["journal_collocation"] = True else: result["raw_multi_journal"] = True result["journal_size"] = self.config["journal"]["size"] result["ceph_facts_template"] = str( pathutils.resource("decapod_common", "facts", "ceph_facts_module.py.j2")) result["restapi_template_local_path"] = str( pathutils.resource("decapod_plugin_playbook_deploy_cluster", "ceph-rest-api.service")) return result
def get_extra_vars(self, task): config = self.get_playbook_configuration(task) config = config.configuration["global_vars"] config["decapod_common_playbooks"] = str( pathutils.resource("decapod_common", "playbooks")) return config
def copy_decapod_common_playbooks(path): destpath = path.joinpath("common_playbooks") path_to_common_playbooks = pathutils.resource( "decapod_common", "playbooks" ) shutil.copytree(path_to_common_playbooks.as_posix(), destpath.as_posix())
def make_global_vars(self, cluster, data, servers, hints): result = super().make_global_vars(cluster, servers, hints) result.update(data.global_vars) result["journal_collocation"] = False result["dmcrypt_journal_collocation"] = False result["dmcrypt_dedicated_journal"] = False result["raw_multi_journal"] = False result["ceph_version_verify"] = bool(hints["ceph_version_verify"]) result["ceph_version_verify_packagename"] = \ self.config["ceph_version_verify_packagename"] if hints["dmcrypt"]: if hints["collocation"]: result["dmcrypt_journal_collocation"] = True else: result["dmcrypt_dedicated_journal"] = True elif hints["collocation"]: result["journal_collocation"] = True else: result["raw_multi_journal"] = True if "journal_size" not in result: result["journal_size"] = self.config["journal"]["size"] result["ceph_facts_template"] = pathutils.resource( "decapod_common", "facts", "ceph_facts_module.py.j2") result["ceph_facts_template"] = str(result["ceph_facts_template"]) return result
def get_extra_vars(self, task): config = self.get_playbook_configuration(task) config = config.configuration["global_vars"] config["decapod_common_playbooks"] = str(pathutils.resource( "decapod_common", "playbooks")) return config
def make_global_vars(self, cluster, data, servers, hints): result = super().make_global_vars(cluster, servers, hints) result.update(data.global_vars) result["ceph_version_verify"] = bool(hints["ceph_version_verify"]) result["ceph_version_verify_packagename"] = \ self.config["ceph_version_verify_packagename"] result["ceph_facts_template"] = pathutils.resource( "decapod_common", "facts", "ceph_facts_module.py.j2") result["ceph_facts_template"] = str(result["ceph_facts_template"]) return result
def test_resource(): resource = pkg_resources.resource_filename("decapod_common", "file/obj") assert str(pathutils.resource("decapod_common", "file", "obj")) == resource assert isinstance(pathutils.resource("decapod_common"), pathlib.Path)
"""Logger.""" MIGRATION_SHOW_TEMPLATE = """ Name: {name} Result: {result} Executed at: {time} SHA1 of script: {script_hash} -- Stdout: {stdout} -- Stderr: {stderr} """.strip() """Template to display for migration.show""" DIRECTORY = pathutils.resource("decapod_admin", "migration_scripts") """Directory where migration scripts are placed.""" @main.cli_group def migration(): """Migrations for database.""" @utils.command(migration) @click.argument( "query", type=click.Choice(["all", "applied", "not-applied"]), default="all" ) def list(query):
class CephAnsiblePlaybook(Playbook, metaclass=abc.ABCMeta): CEPH_ANSIBLE_CONFIGFILE = pathutils.resource("decapod_common", "configs", "ceph-ansible-defaults.yaml") @classmethod def get_ceph_ansible_common_settings(cls, cluster, servers, *, verify_ceph_version=False): config = load_config(cls.CEPH_ANSIBLE_CONFIGFILE) result = { "ceph_{0}".format(config["install"]["source"]): True, "ceph_nfs_access_type": config["nfs"]["ganesha"]["access_type"], "ceph_nfs_ceph_access_type": config["nfs"]["ceph"]["access_type"], "ceph_nfs_ceph_export_id": config["nfs"]["ceph"]["export_id"], "ceph_nfs_ceph_protocols": config["nfs"]["ceph"]["protocols"], "ceph_nfs_ceph_pseudo_path": config["nfs"]["ceph"]["pseudo_path"], "ceph_nfs_export_id": config["nfs"]["ganesha"]["export_id"], "ceph_nfs_log_file": config["nfs"]["ganesha"]["log_file"], "ceph_nfs_protocols": config["nfs"]["ganesha"]["protocols"], "ceph_nfs_pseudo_path": config["nfs"]["ganesha"]["pseudo_path"], "ceph_nfs_rgw_access_type": config["nfs"]["rgw"]["access_type"], "ceph_nfs_rgw_export_id": config["nfs"]["rgw"]["export_id"], "ceph_nfs_rgw_protocols": config["nfs"]["rgw"]["protocols"], "ceph_nfs_rgw_pseudo_path": config["nfs"]["rgw"]["pseudo_path"], "ceph_nfs_rgw_user": config["nfs"]["rgw"]["user"], "ceph_restapi_port": config["restapi_port"], "ceph_stable_distro_source": config["install"]["distro_source"], "ceph_stable_release": config["install"]["release"], "ceph_stable_repo": config["install"]["repo"], "ceph_stable_repo_key": config["install"]["repo_key"], "ceph_stable_repo_keyserver": config["install"]["keyserver"], "ceph_version_verify_packagename": config["ceph_version_verify_packagename"], # NOQA "ceph_version_verify": verify_ceph_version, "cluster": cluster.name, "common_single_host_mode": True, # allow to deploy single OSD "copy_admin_key": bool(config.get("copy_admin_key", False)), "fsid": cluster.model_id, "journal_size": config["journal"]["size"], "max_open_files": config["max_open_files"], "nfs_file_gw": False, "nfs_obj_gw": False, "mds_max_mds": config["mds"]["max"], "mds_allow_multimds": config["mds"]["multi"], "os_tuning_params": [], "public_network": str(networkutils.get_public_network(servers)), "radosgw_civetweb_num_threads": config["radosgw"]["num_threads"], "radosgw_civetweb_port": config["radosgw"]["port"], "radosgw_dns_s3website_name": config["radosgw"]["dns_s3website_name"], # NOQA "radosgw_static_website": config["radosgw"]["static_website"], "radosgw_usage_log": config["radosgw"]["usage"]["log"], "radosgw_usage_log_flush_threshold": config["radosgw"]["usage"]["log_flush_threshold"], # NOQA "radosgw_usage_log_tick_interval": config["radosgw"]["usage"]["log_tick_interval"], # NOQA "radosgw_usage_max_shards": config["radosgw"]["usage"]["max_shards"], # NOQA "radosgw_usage_max_user_shards": config["radosgw"]["usage"]["user_shards"] # NOQA } result["ceph_stable_release_uca"] = result["ceph_stable_distro_source"] # FIXME(Sergey Arkhipov): For some reason, Ceph cannot converge # if I set another network. result["cluster_network"] = result["public_network"] for family, values in config.get("os", {}).items(): for param, value in values.items(): parameter = {"name": ".".join([family, param]), "value": value} result["os_tuning_params"].append(parameter) return result def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) self.fetchdir = None def on_pre_execute(self, task): self.fetchdir = pathutils.tempdir() super().on_pre_execute(task) def on_post_execute(self, task, exc_value, exc_type, exc_tb): pathutils.remove(self.fetchdir) super().on_post_execute(task, exc_value, exc_type, exc_tb) def get_extra_vars(self, task): config = super().get_extra_vars(task) config["fetch_directory"] = str(self.fetchdir) return config def make_global_vars(self, cluster, servers, hints): return self.get_ceph_ansible_common_settings( cluster, servers, verify_ceph_version=bool(hints.get("ceph_version_verify"))) def get_dynamic_inventory(self): if not self.playbook_config: raise exceptions.UnknownPlaybookConfiguration() return self.playbook_config.configuration["inventory"]
def get_filename(self, filename): return pathutils.resource(self.module_name, filename)
import yaml from decapod_common import pathutils try: from yaml import CSafeLoader as YAMLLoader except Exception as exc: from yaml import SafeLoader as YAMLLoader CONFIG_FILES = ( pathutils.CWD.joinpath("decapod.yaml"), pathutils.XDG_CONFIG_HOME.joinpath("decapod", "config.yaml"), pathutils.HOME.joinpath(".decapod.yaml"), pathutils.ROOT.joinpath("etc", "decapod", "config.yaml"), pathutils.resource("decapod_common", "configs", "defaults.yaml") ) """A list of config files in order to load/parse them.""" _PARSED_CONFIG = None """Internal cache to avoid reparsing of files anytime.""" class Config(dict): @property def logging_config(self): conf = self["logging"] return { "version": conf["version"],
def prepare_plugin(self): resource_path = pathutils.resource( "decapod_plugin_playbook_deploy_cluster", "roles") resource_path.symlink_to( str(playbook_plugin.PATH_CEPH_ANSIBLE.joinpath("roles")))
"""Logger.""" MIGRATION_SHOW_TEMPLATE = """ Name: {name} Result: {result} Executed at: {time} SHA1 of script: {script_hash} -- Stdout: {stdout} -- Stderr: {stderr} """.strip() """Template to display for migration.show""" DIRECTORY = pathutils.resource("decapod_admin", "migration_scripts") """Directory where migration scripts are placed.""" @main.cli_group def migration(): """Migrations for database.""" @utils.command(migration) @click.argument("query", type=click.Choice(["all", "applied", "not-applied"]), default="all") def list(query): """List migrations.
def copy_decapod_common_playbooks(path): destpath = path.joinpath("common_playbooks") path_to_common_playbooks = pathutils.resource("decapod_common", "playbooks") shutil.copytree(path_to_common_playbooks.as_posix(), destpath.as_posix())