def config_changed(): if config.changed("nested_mode"): raise Exception('Nested mode cannot be changed after deployment.') # TODO: analyze other params and raise exception if readonly params were changed utils.update_nrpe_config() if config.changed("control-network"): settings = {'private-address': common_utils.get_ip()} rnames = ("contrail-controller", "contrail-kubernetes-config") for rname in rnames: for rid in relation_ids(rname): relation_set(relation_id=rid, relation_settings=settings) _notify_contrail_kubernetes_node() if config.changed("kubernetes_api_hostname") or config.changed( "kubernetes_api_secure_port"): _notify_controller() config[ "config_analytics_ssl_available"] = common_utils.is_config_analytics_ssl_available( ) config.save() docker_utils.config_changed() utils.update_charm_status()
def contrail_controller_changed(): data = relation_get() def _update_config(key, data_key): if data_key in data: val = data[data_key] if val is not None: config[key] = val else: config.pop(key, None) _update_config("auth_info", "auth-info") _update_config("ssl_ca", "ssl-ca") _update_config("api_vip", "api-vip") _update_config("api_ip", "private-address") _update_config("api_port", "port") config.save() write_configs() status_set("active", "Unit is ready") # auth_info can affect endpoints changed = update_service_ips() if changed and is_leader(): data = _get_orchestrator_info() for rid in relation_ids("contrail-controller"): relation_set(relation_id=rid, **data)
def tls_changed(module, rel_data): if not rel_data: # departed case cert = key = ca = None else: # changed case unitname = local_unit().replace('/', '_') cert_name = '{0}.server.cert'.format(unitname) key_name = '{0}.server.key'.format(unitname) cert = rel_data.get(cert_name) key = rel_data.get(key_name) ca = rel_data.get('ca') if not cert or not key or not ca: log("tls-certificates client's relation data is not fully available. Rel data: {}".format(rel_data)) cert = key = ca = None changed = update_certificates(module, cert, key, ca) if not changed: log("Certificates were not changed.") return False log("Certificates have been changed. Rewrite configs and rerun services.") if cert is not None and len(cert) > 0: config["ssl_enabled"] = True config["ca_cert"] = ca else: config["ssl_enabled"] = False config.pop("ca_cert", None) config.save() return True
def config_changed(): config = hookenv.config() for key in config: if config.changed(key): log("config['{}'] changed from {} to {}".format( key, config.previous(key), config[key])) if config.changed('development-mode'): if config['development-mode']: # Development mode: Engage! log('Turning on development mode') cmd = """ reddit-run %s/r2/models/populatedb.py -c 'populate()' """ % REDDIT_INSTALL_PATH log(cmd) subprocess.call(cmd) # cd $REDDIT_HOME/src/reddit/r2 # reddit-run r2/models/populatedb.py -c 'populate()' else: log('Turning off development mode') else: log('Development mode not changed') config.save() start()
def neutron_api_joined(rel_id=None): apt_install(NEUTRON_API_PACKAGES, fatal=True) try: cmd = [ "dpkg-query", "-f", "${Version}\\n", "-W", "neutron-plugin-contrail" ] version = check_output(cmd).decode("UTF-8").rstrip() application_version_set(version) # save version for future using version = version.split('-')[0].split('.') m = int(version[0]) r = int(version[1]) if len(version) > 1 else 0 a = int(version[2]) if len(version) > 2 else 0 config["version"] = (m * 1e4) + (r * 1e2) + a config.save() except CalledProcessError as e: log("Couldn't detect installed application version: " + str(e)) # create plugin config base = "neutron_plugin_contrail.plugins.opencontrail" plugin = base + ".contrail_plugin.NeutronPluginContrailCoreV2" service_plugins = base + ".loadbalancer.v2.plugin.LoadBalancerPluginV2" extensions = [ "/usr/lib/python2.7/dist-packages/neutron_plugin_contrail/extensions", "/usr/lib/python2.7/dist-packages/neutron_lbaas/extensions" ] conf = { "neutron-api": { "/etc/neutron/neutron.conf": { "sections": { "DEFAULT": [("api_extensions_path", ":".join(extensions))] } } } } settings = { "neutron-plugin": "contrail", "core-plugin": plugin, "neutron-plugin-config": "/etc/neutron/plugins/opencontrail/ContrailPlugin.ini", "service-plugins": service_plugins, "quota-driver": base + ".quota.driver.QuotaDriver", "subordinate_configuration": json.dumps(conf), } auth_mode = config.get("auth_mode", "cloud-admin") if auth_mode == "rbac": settings["extra_middleware"] = [{ "name": "user_token", "type": "filter", "config": { "paste.filter_factory": base + ".neutron_middleware:token_factory" } }] relation_set(relation_id=rel_id, relation_settings=settings) # if this hook raised after contrail-controller we need # to overwrite default config file after installation write_configs()
def contrail_controller_changed(): data = relation_get() if "orchestrator-info" in data: config["orchestrator_info"] = data["orchestrator-info"] if data.get("unit-type") == 'issu': config["maintenance"] = 'issu' config["issu_controller_ips"] = data.get("issu_controller_ips") config["issu_controller_data_ips"] = data.get("issu_controller_data_ips") config["issu_analytics_ips"] = data.get("issu_analytics_ips") use_internal_endpoints = data.get("use-internal-endpoints") if use_internal_endpoints: if not isinstance(use_internal_endpoints, bool): use_internal_endpoints = yaml.load(use_internal_endpoints) if not isinstance(use_internal_endpoints, bool): use_internal_endpoints = False config["use_internal_endpoints"] = use_internal_endpoints # TODO: set error if orchestrator is changed and container was started # with another orchestrator if "dpdk" in data: # remote unit is an agent address = data["private-address"] flags = common_utils.json_loads(config.get("agents-info"), dict()) flags[address] = data["dpdk"] config["agents-info"] = json.dumps(flags) config.save() update_southbound_relations() update_northbound_relations() utils.update_ziu("controller-changed") utils.update_charm_status()
def config_changed(): utils.update_nrpe_config() # Charm doesn't support changing of some parameters. if config.changed("dpdk"): raise Exception("Configuration parameter dpdk couldn't be changed") if config.changed("l3mh-cidr"): raise Exception( "Configuration parameter l3mh-cidr couldn't be changed") # Charm doesn't support changing container runtime (check for empty value after upgrade). if config.changed("container_runtime") and config.previous( "container_runtime"): raise Exception( "Configuration parameter container_runtime couldn't be changed") if not config["dpdk"] and (config.changed("kernel-hugepages-1g") or config.changed("kernel-hugepages-2m")): utils.prepare_hugepages_kernel_mode() common_utils.container_engine().config_changed() utils.pull_images() utils.update_charm_status() # leave it as latest - in case of exception in previous steps # config.changed doesn't work sometimes... if config.get("saved-image-tag") != config["image-tag"]: utils.update_ziu("image-tag") config["saved-image-tag"] = config["image-tag"] config.save()
def config_changed(): utils.update_nrpe_config() auth_mode = config.get("auth-mode") if auth_mode not in ("rbac", "cloud-admin", "no-auth"): raise Exception("Config is invalid. auth-mode must one of: " "rbac, cloud-admin, no-auth.") if config.changed("control-network") or config.changed("data-network"): ip = common_utils.get_ip() data_ip = common_utils.get_ip(config_param="data-network", fallback=ip) rel_settings = {"private-address": ip} for rname in ("http-services", "https-services"): for rid in relation_ids(rname): relation_set(relation_id=rid, relation_settings=rel_settings) cluster_settings = {"unit-address": ip, "data-address": data_ip} if config.get('local-rabbitmq-hostname-resolution'): cluster_settings.update({ "rabbitmq-hostname": utils.get_contrail_rabbit_hostname(), }) # this will also take care of updating the hostname in case # control-network changes to something different although # such host reconfiguration is unlikely utils.update_rabbitmq_cluster_hostnames() for rid in relation_ids("controller-cluster"): relation_set(relation_id=rid, relation_settings=cluster_settings) if is_leader(): _address_changed(local_unit(), ip, 'ip') _address_changed(local_unit(), data_ip, 'data_ip') if config.changed("local-rabbitmq-hostname-resolution"): if config.get("local-rabbitmq-hostname-resolution"): # enabling this option will trigger events on other units # so their hostnames will be added as -changed events fire # we just need to set our hostname utils.update_rabbitmq_cluster_hostnames() else: kvstore = kv() rabbitmq_hosts = kvstore.get(key='rabbitmq_hosts', default={}) for ip, hostname in rabbitmq_hosts: utils.update_hosts_file(ip, hostname, remove_hostname=True) docker_utils.config_changed() utils.update_charm_status() # leave it after update_charm_status - in case of exception in previous steps # config.changed doesn't work sometimes... if config.get("saved-image-tag") != config["image-tag"]: utils.update_ziu("image-tag") config["saved-image-tag"] = config["image-tag"] config.save() update_http_relations() update_https_relations() update_northbound_relations() update_southbound_relations() update_issu_relations()
def amqp_changed(): data = relation_get() _update_config(data, "rabbit-hostname", "hostname") _update_config(data, "rabbit-password", "password") config.save() utils.update_charm_status()
def contrail_controller_changed(): data = relation_get() if "orchestrator-info" in data: config["orchestrator_info"] = data["orchestrator-info"] else: config.pop("orchestrator_info", None) config.save() utils.update_charm_status(import_cluster=True)
def contrail_controller_changed(): data = relation_get() _update_config(data, "analytics_servers", "analytics-server") _update_config(data, "auth_info", "auth-info") _update_config(data, "orchestrator_info", "orchestrator-info") config.save() utils.update_charm_status()
def contrail_analytics_departed(): units = [unit for rid in relation_ids("contrail-analytics") for unit in related_units(rid)] if not units: for key in ["auth_info", "auth_mode", "orchestrator_info", "rabbitmq_hosts"]: config.pop(key, None) config.save() utils.update_charm_status() _notify_proxy_services()
def _tls_changed(cert, key, ca): changed = update_certificates(cert, key, ca) if not changed: return # save certs & notify relations config["ssl_enabled"] = (cert is not None and len(cert) > 0) config.save() update_northbound_relations() update_charm_status(force=True)
def contrail_controller_changed(): data = relation_get() log("RelData: " + str(data)) _update_config(data, "analytics_servers", "analytics-server") _update_config(data, "maintenance", "maintenance") _update_config(data, "controller_ips", "controller_ips") _update_config(data, "controller_data_ips", "controller_data_ips") config.save() utils.update_charm_status()
def upgrade_charm(): # NOTE: old image can not be deleted if container is running. # TODO: so think about killing the container # clear cached version of image config.pop("version_with_build", None) config.pop("version", None) config.save() # NOTE: this hook can be fired when either resource changed or charm code # changed. so if code was changed then we may need to update config update_charm_status()
def contrail_cotroller_departed(): units = [unit for rid in relation_ids("contrail-controller") for unit in related_units(rid)] if units: return keys = ["auth_info", "api_vip", "api_ip", "api_port", "auth_mode"] for key in keys: config.pop(key, None) config.save() write_configs() status_set("blocked", "Missing relation to contrail-controller")
def contrail_controller_changed(): data = relation_get() if data.get("unit-type") == 'issu': config["maintenance"] = 'issu' config["issu_controller_ips"] = data.get("issu_controller_ips") config["issu_controller_data_ips"] = data.get( "issu_controller_data_ips") config["issu_analytics_ips"] = data.get("issu_analytics_ips") use_internal_endpoints = data.get("use-internal-endpoints") if use_internal_endpoints: if not isinstance(use_internal_endpoints, bool): use_internal_endpoints = yaml.load(use_internal_endpoints) if not isinstance(use_internal_endpoints, bool): use_internal_endpoints = False config["use_internal_endpoints"] = use_internal_endpoints # TODO: set error if orchestrator is changed and container was started # with another orchestrator if "dpdk" in data: # remote unit is an agent address = data["private-address"] flags = common_utils.json_loads(config.get("agents-info"), dict()) flags[address] = data["dpdk"] config["agents-info"] = json.dumps(flags) if "k8s_info" in data: # remote unit is kubemaster k8s_info = common_utils.json_loads(data.get("k8s_info"), dict()) if k8s_info: cluster_name = k8s_info.get("cluster_name") pod_subnets = k8s_info.get("pod_subnets") kubernetes_workers = k8s_info.get("kubernetes_workers") cluster_info = { cluster_name: { "pod_subnets": pod_subnets, "kubernetes_workers": kubernetes_workers } } agents_info = common_utils.json_loads(config.get("agents-info"), dict()) if agents_info.get("k8s_info"): agents_info["k8s_info"].update(cluster_info) else: agents_info["k8s_info"] = cluster_info config["agents-info"] = json.dumps(agents_info) config.save() _rebuild_orchestrator_info() update_southbound_relations() update_northbound_relations() utils.update_ziu("controller-changed") utils.update_charm_status()
def config_changed(): utils.update_nrpe_config() # Charm doesn't support changing of some parameters. if config.changed("dpdk"): raise Exception("Configuration parameter dpdk couldn't be changed") config[ "config_analytics_ssl_available"] = common_utils.is_config_analytics_ssl_available( ) config.save() docker_utils.config_changed() utils.update_charm_status()
def upgrade_charm(): _rebuild_config_from_controller_relation() config.save() utils.write_configs() _update_status() if is_leader(): utils.update_service_ips() # apply information to base charms _notify_nova() _notify_neutron() _notify_heat() _notify_controller()
def vrouter_plugin_changed(): # accepts 'ready' value in realation (True/False) # accepts 'settings' value as a serialized dict to json for contrail-vrouter-agent.conf: # {"DEFAULT": {"key1": "value1"}, "SECTION_2": {"key1": "value1"}} data = relation_get() plugin_ip = data.get("private-address") plugin_ready = data.get("ready", False) if plugin_ready: plugin_ips = json.loads(config.get("plugin-ips", "{}")) plugin_ips[plugin_ip] = json.loads(data.get("settings", "{}")) config["plugin-ips"] = json.dumps(plugin_ips) config.save() lazy_install()
def contrail_issu_relation_changed(): rel_data = relation_get() if "orchestrator-info" in rel_data: config["orchestrator_info"] = rel_data["orchestrator-info"] else: config.pop("orchestrator_info", None) config.save() update_northbound_relations() utils.update_charm_status() issu_data = dict() for name in ["rabbitmq_connection_details", "cassandra_connection_details", "zookeeper_connection_details"]: issu_data.update(common_utils.json_loads(rel_data.get(name), dict())) utils.update_issu_state(issu_data)
def contrail_controller_changed(): _rebuild_config_from_controller_relation() config.save() utils.write_configs() _update_status() # apply information to base charms _notify_nova() _notify_neutron() _notify_heat() # auth_info can affect endpoints if is_leader() and utils.update_service_ips(): _notify_controller()
def install(): status_set("maintenance", "Installing...") configure_crashes() configure_sources(True, "install-sources", "install-keys") apt_upgrade(fatal=True, dist=True) packages = list() packages.extend(PACKAGES) if not config.get("dpdk"): packages.extend(PACKAGES_DKMS_INIT) else: # services must not be started before config files creation if not init_is_systemd(): with open("/etc/init/supervisor-vrouter.override", "w") as conf: conf.write("manual\n") else: # and another way with systemd for srv in ("contrail-vrouter-agent", "contrail-vrouter-dpdk"): try: os.remove("/etc/systemd/system/{}.service".format(srv)) except OSError: pass os.symlink("/dev/null", "/etc/systemd/system/{}.service" .format(srv)) packages.extend(PACKAGES_DPDK_INIT) # apt-get upgrade can install new kernel so we need to re-install # packages with dpdk drivers kver = check_output(["uname", "-r"]).rstrip() packages.append("linux-image-extra-" + kver) apt_install(packages, fatal=True) try: output = check_output(["dpkg-query", "-f", "${Version}\\n", "-W", "contrail-vrouter-agent"]) version = output.decode('UTF-8').rstrip() application_version_set(version) except CalledProcessError: return None status_set("maintenance", "Configuring...") os.chmod("/etc/contrail", 0o755) os.chown("/etc/contrail", 0, 0) if not config.get("dpdk") and not init_is_systemd(): # supervisord must be started after installation service_restart("supervisor-vrouter") config["vrouter-expected-provision-state"] = False config["vhost-ready"] = False config.save() lazy_install() status_set("blocked", "Missing relation to contrail-controller")
def check_run_prerequisites(name, config_name, update_config_func, services): if is_container_launched(name): # already launched. just sync config if needed. check = True if update_config_func and update_config_func(): check = apply_config_in_container(name, config_name) if check: update_services_status(name, services) return False if is_container_present(name): status_set( "blocked", "Container is present but is not running. Run or remove it.") return False image_name = config.get("image-name") image_tag = config.get("image-tag") if not image_name or not image_tag: image_name, image_tag = load_docker_image(name) if not image_name or not image_tag: status_set( "blocked", "No image is available. Resourse is not " "attached and there are no image-name/image-tag " "defined in charm configuration.") return False config["image-name"] = image_name config["image-tag"] = image_tag config.save() if "version" not in config: # current jinja2 doesn't support version_compare filter. # so build version variable as: a.b.c.d => a*1e4 + b*1e2 + c and then # compare it with integers like: 40002, 40100 # 4.0.0 => 40000 # 4.0.1 => 40001 # 4.0.2 => 40002 # 4.1.0 => 40100 version = get_contrail_version() application_version_set(version) config["version_with_build"] = version version = version.split('-')[0].split('.') m = int(version[0]) r = int(version[1]) if len(version) > 1 else 0 a = int(version[2]) if len(version) > 2 else 0 config["version"] = (m * 1e4) + (r * 1e2) + a config.save() return True
def lazy_install(): if config.get("vhost-ready"): return plugin_ips = json.loads(config.get("plugin-ips", "{}")) my_ip = unit_get("private-address") if config.get("wait-for-external-plugin", False) and my_ip not in plugin_ips: return if config.get("dpdk"): install_dpdk() else: install_dkms() config["vhost-ready"] = True config.save()
def contrail_analytics_changed(): data = relation_get() _value_changed(data, "auth-mode", "auth_mode") _value_changed(data, "auth-info", "auth_info") _value_changed(data, "orchestrator-info", "orchestrator_info") _value_changed(data, "rabbitmq_hosts", "rabbitmq_hosts") _value_changed(data, "maintenance", "maintenance") _value_changed(data, "controller_ips", "controller_ips") _value_changed(data, "controller_data_ips", "controller_data_ips") config.save() # TODO: handle changing of all values # TODO: set error if orchestrator is changing and container was started utils.update_ziu("analytics-changed") utils.update_charm_status() _notify_proxy_services()
def contrail_kubernetes_config_changed(): def _add_to_config(key): value = relation_get(key) if value: config[key] = value _add_to_config("pod_subnets") _add_to_config("nested_mode_config") nested_mode = relation_get("nested_mode") if nested_mode is not None: if isinstance(nested_mode, str): nested_mode = yaml.load(nested_mode) config["nested_mode"] = nested_mode config.save() _notify_kubernetes() utils.update_charm_status()
def config_changed(): utils.update_nrpe_config() if config.changed("control-network"): settings = {'private-address': common_utils.get_ip()} rnames = ("contrail-analyticsdb", "analyticsdb-cluster") for rname in rnames: for rid in relation_ids(rname): relation_set(relation_id=rid, relation_settings=settings) config[ "config_analytics_ssl_available"] = common_utils.is_config_analytics_ssl_available( ) config.save() docker_utils.config_changed() utils.update_charm_status()
def update_charm_status_for_upgrade(): ctx = get_context() if config.get('maintenance') == 'issu': ctx["controller_servers"] = common_utils.json_loads( config.get("issu_controller_ips"), list()) ctx["control_servers"] = common_utils.json_loads( config.get("issu_controller_data_ips"), list()) ctx["analytics_servers"] = common_utils.json_loads( config.get("issu_analytics_ips"), list()) # orchestrator_info and auth_info can be taken from old relation _update_charm_status(ctx) if config.get('maintenance') == 'ziu': config["upgraded"] = True config.save()
def kube_api_endpoint_changed(): data = relation_get() changed = _update_config(data, "kubernetes_api_server", "hostname") changed |= _update_config(data, "kubernetes_api_port", "port") config.save() if is_leader(): changed |= utils.update_kubernetes_token() if not changed: return # notify clients _notify_controller() # and update self utils.update_charm_status()
def config_changed(): utils.update_nrpe_config() if config.changed("control-network"): _update_cluster() if is_leader() and _address_changed(local_unit(), common_utils.get_ip()): _update_analyticsdb() docker_utils.config_changed() utils.update_charm_status() # leave it as latest - in case of exception in previous steps # config.changed doesn't work sometimes (when we saved config in this hook before) if config.get("saved-image-tag") != config["image-tag"]: utils.update_ziu("image-tag") config["saved-image-tag"] = config["image-tag"] config.save()