def leader_elected(): for var_name in [("ip", "unit-address", "control-network"), ("data_ip", "data-address", "data-network")]: ip_list = common_utils.json_loads( leader_get("controller_{}_list".format(var_name[0])), list()) ips = utils.get_controller_ips(var_name[1], var_name[2]) if not ip_list: ip_list = ips.values() log("{}_LIST: {} {}S: {}".format(var_name[0].upper(), str(ip_list), var_name[0].upper(), str(ips))) settings = { "controller_{}_list".format(var_name[0]): json.dumps(list(ip_list)), "controller_{}s".format(var_name[0]): json.dumps(ips) } leader_set(settings=settings) else: current_ip_list = ips.values() dead_ips = set(ip_list).difference(current_ip_list) new_ips = set(current_ip_list).difference(ip_list) if new_ips: log("There are a new controllers that are not in the list: " + str(new_ips), level=ERROR) if dead_ips: log("There are a dead controllers that are in the list: " + str(dead_ips), level=ERROR) update_northbound_relations() update_southbound_relations() utils.update_charm_status()
def controller_ctx(): """Get the ipaddress of all contrail control nodes""" auth_mode = config.get("auth_mode") if auth_mode is None: # NOTE: auth_mode must be transmitted by controller return {} controller_ip_list = common_utils.json_loads(config.get("controller_ips"), list()) controller_data_ip_list = common_utils.json_loads( config.get("controller_data_ips"), list()) return { "auth_mode": auth_mode, "controller_servers": controller_ip_list, "control_servers": controller_data_ip_list, }
def get_context(): ctx = {} ctx.update(json_loads(config.get("orchestrator_info"), dict())) ssl_ca = decode_cert("ssl_ca") ctx["ssl_ca"] = ssl_ca ctx["ssl_cert"] = decode_cert("ssl_cert") ctx["ssl_key"] = decode_cert("ssl_key") ctx["ssl_enabled"] = (ssl_ca is not None and len(ssl_ca) > 0) ctx["db_user"] = leader_get("db_user") ctx["db_password"] = leader_get("db_password") ctx.update(servers_ctx()) ctx.update(analyticsdb_ctx()) ctx.update(json_loads(config.get("auth_info"), dict())) return ctx
def _address_changed(unit, ip): ip_list = json_loads(leader_get("controller_ip_list"), list()) ips = json_loads(leader_get("controller_ips"), dict()) if ip in ip_list: return old_ip = ips.get(unit) if old_ip: index = ip_list.index(old_ip) ip_list[index] = ip ips[unit] = ip else: ip_list.append(ip) ips[unit] = ip log("IP_LIST: {} IPS: {}".format(str(ip_list), str(ips))) leader_set(controller_ip_list=json.dumps(ip_list), controller_ips=json.dumps(ips))
def servers_ctx(): analytics_ip_list = [] for rid in relation_ids("contrail-analyticsdb"): for unit in related_units(rid): utype = relation_get("unit-type", unit, rid) ip = relation_get("private-address", unit, rid) if ip and utype == "analytics": analytics_ip_list.append(ip) return { "controller_servers": common_utils.json_loads(config.get("controller_ips"), list()), "control_servers": common_utils.json_loads(config.get("controller_data_ips"), list()), "analytics_servers": analytics_ip_list }
def get_context(): ctx = {} ctx["module"] = MODULE ctx["log_level"] = config.get("log-level", "SYS_NOTICE") ctx["bgp_asn"] = config.get("bgp-asn", "64512") ctx["enable_4byte_as"] = config.get("enable-4byte-as") ctx["encap_priority"] = config.get("encap-priority") ctx["vxlan_vn_id_mode"] = config.get("vxlan-vn-id-mode") ctx["flow_export_rate"] = config.get("flow-export-rate") ctx["auth_mode"] = config.get("auth-mode") ctx["cloud_admin_role"] = config.get("cloud-admin-role") ctx["global_read_only_role"] = config.get("global-read-only-role") ctx["configdb_minimum_diskgb"] = config.get("cassandra-minimum-diskgb") ctx["jvm_extra_opts"] = config.get("cassandra-jvm-extra-opts") ctx["container_registry"] = config.get("docker-registry") ctx["contrail_version_tag"] = config.get("image-tag") ctx["contrail_version"] = common_utils.get_contrail_version() ctx["config_api_worker_count"] = config.get("config-api-worker-count") ctx["apply_defaults"] = config.get("apply-defaults") ctx["huge_scale"] = config.get("huge-scale", False) ctx.update(common_utils.json_loads(config.get("orchestrator_info"), dict())) if not ctx.get("cloud_orchestrators"): ctx["cloud_orchestrators"] = [ ctx.get("cloud_orchestrator") ] if ctx.get("cloud_orchestrator") else list() ctx["ssl_enabled"] = config.get("ssl_enabled", False) ctx["certs_hash"] = common_utils.get_certs_hash( MODULE) if ctx["ssl_enabled"] else '' ctx["config_analytics_ssl_available"] = common_utils.is_config_analytics_ssl_available( ) ctx["use_internal_endpoints"] = config.get("use_internal_endpoints", False) ctx["logging"] = docker_utils.render_logging() ips = common_utils.json_loads(leader_get("controller_ip_list"), list()) data_ips = common_utils.json_loads(leader_get("controller_data_ip_list"), list()) ctx["controller_servers"] = ips ctx["control_servers"] = data_ips ctx["analytics_servers"] = get_analytics_list() ctx["analyticsdb_enabled"] = analyticsdb_enabled() log("CTX: " + str(ctx)) ctx.update(common_utils.json_loads(config.get("auth_info"), dict())) return ctx
def _address_changed(unit, ip): cluster_info = common_utils.json_loads(leader_get("cluster_info"), dict()) if unit in cluster_info and ip == cluster_info[unit]: return False cluster_info[unit] = ip log("Cluster info: {}".format(str(cluster_info))) settings = {"cluster_info": json.dumps(cluster_info)} leader_set(settings=settings) return True
def _address_changed(unit, ip, var_name): ip_list = common_utils.json_loads(leader_get("controller_{}_list".format(var_name)), list()) ips = common_utils.json_loads(leader_get("controller_{}s".format(var_name)), dict()) if ip in ip_list: return old_ip = ips.get(unit) if old_ip: index = ip_list.index(old_ip) ip_list[index] = ip ips[unit] = ip else: ip_list.append(ip) ips[unit] = ip log("{}_LIST: {} {}S: {}".format(var_name.upper(), str(ip_list), var_name.upper(), str(ips))) settings = { "controller_{}_list".format(var_name): json.dumps(ip_list), "controller_{}s".format(var_name): json.dumps(ips) } leader_set(settings=settings)
def get_rabbitmq_connection_details(): return { "rabbit_q_name": "vnc-config.issu-queue", "rabbit_vhost": "/", "rabbit_port": "5673", "rabbit_address_list": common_utils.json_loads(leader_get("controller_ip_list"), list()), }
def get_context(): ctx = {} ctx["module"] = MODULE ctx["log_level"] = config.get("log-level", "SYS_NOTICE") ctx["container_registry"] = config.get("docker-registry") ctx["contrail_version_tag"] = config.get("image-tag") ctx["contrail_version"] = common_utils.get_contrail_version() ctx["kubemanager_servers"] = list(common_utils.json_loads(leader_get("cluster_info"), dict()).values()) # get contrail configuration from relation ips = common_utils.json_loads(config.get("controller_ips"), list()) data_ips = common_utils.json_loads(config.get("controller_data_ips"), list()) ctx["controller_servers"] = ips ctx["control_servers"] = data_ips ips = common_utils.json_loads(config.get("analytics_servers"), list()) ctx["analytics_servers"] = ips ctx["analyticsdb_enabled"] = config.get("analyticsdb_enabled", True) ctx["ssl_enabled"] = config.get("ssl_enabled", False) ctx["cluster_name"] = config.get("cluster_name") ctx["cluster_project"] = config.get("cluster_project") ctx["cluster_network"] = config.get("cluster_network") ctx["pod_subnets"] = config.get("pod_subnets") ctx["ip_fabric_subnets"] = config.get("ip_fabric_subnets") ctx["service_subnets"] = config.get("service_subnets") ctx["ip_fabric_forwarding"] = config.get("ip_fabric_forwarding") ctx["ip_fabric_snat"] = config.get("ip_fabric_snat") ctx["host_network_service"] = config.get("host_network_service") ctx["public_fip_pool"] = config.get("public_fip_pool") ctx.update(common_utils.json_loads(config.get("orchestrator_info"), dict())) if not ctx.get("cloud_orchestrators"): ctx["cloud_orchestrators"] = list(ctx.get("cloud_orchestrator")) if ctx.get("cloud_orchestrator") else list() # TODO: switch to use context for this ctx["kube_manager_token"] = leader_get("kube_manager_token") if config.get("kubernetes_api_hostname") and config.get("kubernetes_api_secure_port"): ctx["kubernetes_api_server"] = config.get("kubernetes_api_hostname") ctx["kubernetes_api_secure_port"] = config.get("kubernetes_api_secure_port") else: ctx["kubernetes_api_server"] = config.get("kubernetes_api_server") ctx["kubernetes_api_secure_port"] = config.get("kubernetes_api_port") ctx["nested_mode"] = config.get("nested_mode") if ctx["nested_mode"]: # TODO: create KUBERNETES_NESTED_VROUTER_VIP link-local services in Contrail via config API ctx["nested_mode_config"] = common_utils.json_loads(config.get("nested_mode_config"), dict()) ctx["config_analytics_ssl_available"] = common_utils.is_config_analytics_ssl_available() ctx["logging"] = docker_utils.render_logging() log("CTX: {}".format(ctx)) ctx.update(common_utils.json_loads(config.get("auth_info"), dict())) return ctx
def _rebuild_orchestrator_info(): cloud_orchestrators = set() info = dict() for rid in relation_ids("contrail-controller"): for unit in related_units(rid): rel_info = relation_get('orchestrator-info', unit, rid) if not rel_info: continue rel_info = common_utils.json_loads(rel_info) rel_orchestrator = rel_info.pop("cloud_orchestrator", None) if not rel_orchestrator: continue cloud_orchestrators.add(rel_orchestrator) info.update(rel_info) info['cloud_orchestrators'] = list(cloud_orchestrators) info['cloud_orchestrator'] = _choose_main_orchestrator(cloud_orchestrators) current_info = common_utils.json_loads(config.get("orchestrator-info"), dict()) config["orchestrator_info"] = json.dumps(info) # returns 'changed' flag. this structure doesn't have nested dicts - will work. return current_info != info
def _update_analyticsdb(rid=None): rids = [rid] if rid else relation_ids("contrail-analyticsdb") if not rids: return cluster_info = common_utils.json_loads(leader_get("cluster_info"), dict()) ip_list = '[]' if len(cluster_info) >= config.get("min-cluster-size"): ip_list = json.dumps(list(cluster_info.values())) settings = {"analyticsdb_ips": ip_list} for rid in rids: relation_set(relation_id=rid, relation_settings=settings)
def analyticsdb_cluster_departed(): if not is_leader(): return unit = remote_unit() cluster_info = common_utils.json_loads(leader_get("cluster_info"), dict()) cluster_info.pop(unit, None) log("Unit {} departed. Cluster info: {}".format(unit, str(cluster_info))) settings = {"cluster_info": json.dumps(cluster_info)} leader_set(settings=settings) _update_analyticsdb() utils.update_charm_status()
def upgrade_charm(): _update_cluster() saved_info = common_utils.json_loads(leader_get("cluster_info"), dict()) if is_leader() and not saved_info: current_info = utils.get_cluster_info("unit-address", common_utils.get_ip()) log("Cluster info: {}".format(str(current_info))) settings = {"cluster_info": json.dumps(current_info)} leader_set(settings=settings) _update_analyticsdb() utils.update_charm_status()
def update_southbound_relations(rid=None): rids = [rid] if rid else relation_ids("contrail-controller") if not rids: return # controller_ips/data_ips are already dumped json ip_list = leader_get("controller_ip_list") data_ip_list = leader_get("controller_data_ip_list") if len(common_utils.json_loads(leader_get("controller_ip_list"), list())) < config.get("min-cluster-size"): ip_list = '[]' data_ip_list = '[]' settings = { "maintenance": config.get("maintenance"), "analytics-server": json.dumps(utils.get_analytics_list()), "analyticsdb_enabled": utils.analyticsdb_enabled(), "auth-mode": config.get("auth-mode"), "auth-info": config.get("auth_info"), "orchestrator-info": config.get("orchestrator_info"), "agents-info": config.get("agents-info"), "ssl-enabled": config.get("ssl_enabled") and common_utils.is_config_analytics_ssl_available(), # base64 encoded ca-cert "ca-cert": config.get("ca_cert"), "controller_ips": ip_list, "controller_data_ips": data_ip_list, "issu_controller_ips": config.get("issu_controller_ips"), "issu_controller_data_ips": config.get("issu_controller_data_ips"), "issu_analytics_ips": config.get("issu_analytics_ips"), "rabbitmq_connection_details": json.dumps(utils.get_rabbitmq_connection_details()), "cassandra_connection_details": json.dumps(utils.get_cassandra_connection_details()), "zookeeper_connection_details": json.dumps(utils.get_zookeeper_connection_details()), } for rid in rids: relation_set(relation_id=rid, relation_settings=settings)
def cluster_departed(): if is_leader(): unit = remote_unit() for var_name in ["ip", "data_ip"]: ips = common_utils.json_loads(leader_get("controller_{}s".format(var_name)), dict()) if unit not in ips: return old_ip = ips.pop(unit) ip_list = common_utils.json_loads(leader_get("controller_{}_list".format(var_name)), list()) ip_list.remove(old_ip) log("{}_LIST: {} {}S: {}".format(var_name.upper(), str(ip_list), var_name.upper(), str(ips))) settings = { "controller_{}_list".format(var_name): json.dumps(ip_list), "controller_{}s".format(var_name): json.dumps(ips) } leader_set(settings=settings) update_northbound_relations() update_southbound_relations() update_issu_relations() utils.update_charm_status()
def get_context(): ctx = {} ctx["log_level"] = config.get("log-level", "SYS_NOTICE") ctx["version"] = config.get("version", "4.0.0") ctx.update(json_loads(config.get("orchestrator_info"), dict())) ctx["ssl_enabled"] = config.get("ssl_enabled", False) ctx["db_user"] = config.get("db_user") ctx["db_password"] = config.get("db_password") ctx["rabbitmq_user"] = config.get("rabbitmq_user") ctx["rabbitmq_password"] = config.get("rabbitmq_password") ctx["rabbitmq_vhost"] = config.get("rabbitmq_vhost") ctx["rabbitmq_hosts"] = config.get("rabbitmq_hosts") ctx["configdb_cassandra_user"] = config.get("configdb_cassandra_user") ctx["configdb_cassandra_password"] = config.get("configdb_cassandra_password") ctx.update(controller_ctx()) ctx.update(analytics_ctx()) ctx.update(analyticsdb_ctx()) log("CTX: {}".format(ctx)) ctx.update(json_loads(config.get("auth_info"), dict())) return ctx
def leader_elected(): current_info = utils.get_cluster_info("unit-address", common_utils.get_ip()) saved_info = common_utils.json_loads(leader_get("cluster_info"), dict()) log("Cluster current info: {}".format(str(current_info))) log("Cluster saved info: {}".format(str(saved_info))) if not saved_info: log("Cluster info: {}".format(str(current_info))) settings = {"cluster_info": json.dumps(current_info)} leader_set(settings=settings) _notify_controller() utils.update_charm_status()
def contrail_issu_relation_changed(): rel_data = relation_get() if "orchestrator-info" in rel_data: config["orchestrator_info"] = rel_data["orchestrator-info"] else: config.pop("orchestrator_info", None) config.save() update_northbound_relations() utils.update_charm_status() issu_data = dict() for name in ["rabbitmq_connection_details", "cassandra_connection_details", "zookeeper_connection_details"]: issu_data.update(common_utils.json_loads(rel_data.get(name), dict())) utils.update_issu_state(issu_data)
def get_context(): ctx = {} ctx["module"] = MODULE ctx["ssl_enabled"] = config.get("ssl_enabled", False) ctx["log_level"] = config.get("log-level", "SYS_NOTICE") ctx["container_registry"] = config.get("docker-registry") ctx["contrail_version_tag"] = config.get("image-tag") ctx["sriov_physical_interface"] = config.get("sriov-physical-interface") ctx["sriov_numvfs"] = config.get("sriov-numvfs") ctx["max_vm_flows"] = config.get("max-vm-flows") ctx["contrail_version"] = common_utils.get_contrail_version() # NOTE: charm should set non-fqdn hostname to be compatible with R5.0 deployments ctx["hostname"] = socket.getfqdn() if config.get( "hostname-use-fqdn", True) else socket.gethostname() iface = config.get("physical-interface") ctx["physical_interface"] = iface gateway_ip = config.get("vhost-gateway") if gateway_ip == "auto": gateway_ip = _get_iface_gateway_ip(iface) ctx["vrouter_gateway"] = gateway_ip if gateway_ip else '' ctx["agent_mode"] = "dpdk" if config["dpdk"] else "kernel" if config["dpdk"]: ctx["dpdk_additional_args"] = _get_dpdk_args() ctx["dpdk_driver"] = config.get("dpdk-driver") ctx["dpdk_coremask"] = config.get("dpdk-coremask") ctx["dpdk_hugepages"] = _get_hugepages() else: ctx["hugepages_1g"] = config.get("kernel-hugepages-1g") ctx["hugepages_2m"] = config.get("kernel-hugepages-2m") ctx.update(tsn_ctx()) info = common_utils.json_loads(config.get("orchestrator_info"), dict()) ctx.update(info) ctx["controller_servers"] = common_utils.json_loads( config.get("controller_ips"), list()) ctx["control_servers"] = common_utils.json_loads( config.get("controller_data_ips"), list()) ctx["analytics_servers"] = common_utils.json_loads( config.get("analytics_servers"), list()) ctx["config_analytics_ssl_available"] = config.get( "config_analytics_ssl_available", False) if "plugin-ips" in config: plugin_ips = common_utils.json_loads(config["plugin-ips"], dict()) my_ip = unit_get("private-address") if my_ip in plugin_ips: ctx["plugin_settings"] = plugin_ips[my_ip] ctx["logging"] = docker_utils.render_logging() log("CTX: " + str(ctx)) ctx.update(common_utils.json_loads(config.get("auth_info"), dict())) return ctx
def get_context(): ctx = {} ctx["module"] = MODULE ctx["log_level"] = config.get("log-level", "SYS_NOTICE") # previous versions of charm may store next value in config as string. ssl_enabled = config.get("ssl_enabled", False) if not isinstance(ssl_enabled, bool): ssl_enabled = yaml.load(ssl_enabled) if not isinstance(ssl_enabled, bool): ssl_enabled = False ctx["ssl_enabled"] = ssl_enabled ctx["container_registry"] = config.get("docker-registry") ctx["contrail_version_tag"] = config.get("image-tag") ctx.update(common_utils.json_loads(config.get("orchestrator_info"), dict())) ctx["config_analytics_ssl_available"] = config.get("config_analytics_ssl_available", False) ctx["logging"] = docker_utils.render_logging() ctx["contrail_version"] = common_utils.get_contrail_version() ctx.update(controller_ctx()) ctx.update(analytics_ctx()) ctx.update(analyticsdb_ctx()) log("CTX: {}".format(ctx)) ctx.update(common_utils.json_loads(config.get("auth_info"), dict())) return ctx
def get_context(): ctx = {} ctx["module"] = MODULE ctx["log_level"] = config.get("log-level", "SYS_NOTICE") ctx["container_registry"] = config.get("docker-registry") ctx["container_tag"] = config.get("image-tag") ctx["command_ip"] = common_utils.get_ip() ctx["contrail_container_tag"] = config.get("image-tag") ctx.update(common_utils.json_loads(config.get("orchestrator_info"), dict())) log("CTX: {}".format(ctx)) return ctx
def get_context(): ctx = {} ctx["module"] = MODULE ctx["log_level"] = config.get("log-level", "SYS_NOTICE") ctx["container_registry"] = config.get("docker-registry") ctx["contrail_version_tag"] = config.get("image-tag") ctx["contrail_version"] = common_utils.get_contrail_version() ips = common_utils.json_loads(config.get("analytics_servers"), list()) ctx["analytics_servers"] = ips ctx["ssl_enabled"] = config.get("ssl_enabled", False) ctx["certs_hash"] = common_utils.get_certs_hash( MODULE) if ctx["ssl_enabled"] else '' ctx["rabbitmq_user"] = config.get("rabbit-user") ctx["rabbitmq_password"] = config.get("rabbit-password") ctx["rabbitmq_hostname"] = config.get("rabbit-hostname") ctx["rabbitmq_vhost"] = config.get("rabbit-vhost") ctx["ironic_notification_level"] = config.get('ironic-notification-level') ctx.update(common_utils.json_loads(config.get("orchestrator_info"), dict())) if not ctx.get("cloud_orchestrators"): ctx["cloud_orchestrators"] = [ ctx.get("cloud_orchestrator") ] if ctx.get("cloud_orchestrator") else list() ctx["config_analytics_ssl_available"] = common_utils.is_config_analytics_ssl_available( ) ctx["logging"] = docker_utils.render_logging() log("CTX: {}".format(ctx)) ctx.update(common_utils.json_loads(config.get("auth_info"), dict())) return ctx
def contrail_controller_changed(): data = relation_get() log("RelData: " + str(data)) def _update_config(key, data_key): if data_key in data: config[key] = data[data_key] else: config.pop(key, None) _update_config("analytics_servers", "analytics-server") _update_config("analyticsdb_enabled", "analyticsdb_enabled") _update_config("auth_info", "auth-info") _update_config("orchestrator_info", "orchestrator-info") _update_config("controller_ips", "controller_ips") _update_config("controller_data_ips", "controller_data_ips") _update_config("issu_controller_ips", "issu_controller_ips") _update_config("issu_controller_data_ips", "issu_controller_data_ips") _update_config("issu_analytics_ips", "issu_analytics_ips") maintenance = None if "maintenance" in data: maintenance = "issu" if "ziu" in data or "ziu_done" in data: maintenance = "ziu" if maintenance: config["maintenance"] = maintenance else: config.pop("maintenance", None) info = common_utils.json_loads(data.get("agents-info"), dict()) k8s_info = info.get("k8s_info") if k8s_info: ip = unit_private_ip() for cluster in k8s_info: kubernetes_workers = k8s_info[cluster].get("kubernetes_workers", []) if kubernetes_workers and ip in kubernetes_workers: config["pod_subnets"] = k8s_info[cluster].get("pod_subnets") break if "controller_data_ips" in data: settings = {"vhost-address": utils.get_vhost_ip()} for rid in relation_ids("agent-cluster"): relation_set(relation_id=rid, relation_settings=settings) utils.update_ziu("controller-changed") utils.update_charm_status()
def get_context(): ctx = {} ctx["module"] = MODULE ctx["log_level"] = config.get("log-level", "SYS_NOTICE") ctx["container_registry"] = config.get("docker-registry") ctx["contrail_version_tag"] = config.get("image-tag") ctx["nested_mode"] = config.get("nested_mode") if ctx["nested_mode"]: ctx["nested_mode_config"] = common_utils.json_loads( config.get("nested_mode_config"), dict()) ctx["logging"] = docker_utils.render_logging() log("CTX: {}".format(ctx)) return ctx
def get_analytics_list(): analytics_ip_list = config.get("analytics_ips") if analytics_ip_list is not None: return common_utils.json_loads(analytics_ip_list, list()) # NOTE: use old way of collecting ips. # previously we collected units by private-address # now we take collected list from leader through relation log("analytics_ips is not in config. calculating...") analytics_ip_list = [] for rid in relation_ids("contrail-analytics"): for unit in related_units(rid): ip = relation_get("private-address", unit, rid) if ip: analytics_ip_list.append(ip) return analytics_ip_list
def contrail_controller_changed(): data = relation_get() if "orchestrator-info" in data: config["orchestrator_info"] = data["orchestrator-info"] # TODO: set error if orchestrator is changed and container was started # with another orchestrator if is_leader(): if "dpdk" in data: # remote unit is an agent address = data["private-address"] flags = json_loads(config.get("agents-info"), dict()) flags[address] = data["dpdk"] config["agents-info"] = json.dumps(flags) config.save() update_southbound_relations() update_northbound_relations() update_charm_status()
def upgrade_charm(): _update_cluster() saved_info = common_utils.json_loads(leader_get("cluster_info"), dict()) if is_leader() and not saved_info: current_info = utils.get_cluster_info("unit-address", common_utils.get_ip()) log("Cluster info: {}".format(str(current_info))) settings = {"cluster_info": json.dumps(current_info)} leader_set(settings=settings) _update_analytics() _update_analyticsdb() _notify_proxy_services() # to update config flags and certs params if any was changed _update_tls() utils.update_charm_status()
def get_context(): ctx = {} ctx["api_servers"] = common_utils.json_loads(config.get("controller_ips"), list()) for rid in relation_ids("contrail-controller"): for unit in related_units(rid): ssl_enabled = relation_get("ssl-enabled", unit, rid) if ssl_enabled: ctx["ssl_enabled"] = ssl_enabled ca_cert = relation_get("ca-cert", unit, rid) if ca_cert: ctx["ca_cert_data"] = ca_cert log("CTX: " + str(ctx)) auth_info = config.get("auth_info") if auth_info: ctx.update(json.loads(auth_info)) return ctx
def update_northbound_relations(rid=None): rids = [rid] if rid else relation_ids("contrail-analytics") + relation_ids("contrail-analyticsdb") if not rids: return # controller_ips/data_ips are already dumped json ip_list = leader_get("controller_ip_list") data_ip_list = leader_get("controller_data_ip_list") if len(common_utils.json_loads(leader_get("controller_ip_list"), list())) < config.get("min-cluster-size"): ip_list = '[]' data_ip_list = '[]' settings = { "unit-type": "controller", "maintenance": config.get("maintenance"), "auth-mode": config.get("auth-mode"), "auth-info": config.get("auth_info"), "orchestrator-info": config.get("orchestrator_info"), "controller_ips": ip_list, "controller_data_ips": data_ip_list, } for rid in rids: relation_set(relation_id=rid, relation_settings=settings)