def leader_elected(): ip = common_utils.get_ip() data_ip = common_utils.get_ip(config_param="data-network", fallback=ip) for var_name in [("ip", "unit-address", ip), ("data_ip", "data-address", data_ip)]: ip_list = common_utils.json_loads( leader_get("controller_{}_list".format(var_name[0])), list()) ips = utils.get_controller_ips(var_name[1], var_name[2]) if not ip_list: ip_list = ips.values() log("{}_LIST: {} {}S: {}".format(var_name[0].upper(), str(ip_list), var_name[0].upper(), str(ips))) settings = { "controller_{}_list".format(var_name[0]): json.dumps(list(ip_list)), "controller_{}s".format(var_name[0]): json.dumps(ips) } leader_set(settings=settings) else: current_ip_list = ips.values() dead_ips = set(ip_list).difference(current_ip_list) new_ips = set(current_ip_list).difference(ip_list) if new_ips: log("There are a new controllers that are not in the list: " + str(new_ips), level=ERROR) if dead_ips: log("There are a dead controllers that are in the list: " + str(dead_ips), level=ERROR) update_northbound_relations() update_southbound_relations() utils.update_charm_status()
def config_changed(): utils.update_nrpe_config() auth_mode = config.get("auth-mode") if auth_mode not in ("rbac", "cloud-admin", "no-auth"): raise Exception("Config is invalid. auth-mode must one of: " "rbac, cloud-admin, no-auth.") if config.changed("control-network") or config.changed("data-network"): ip = common_utils.get_ip() data_ip = common_utils.get_ip(config_param="data-network", fallback=ip) rel_settings = {"private-address": ip} for rname in ("http-services", "https-services"): for rid in relation_ids(rname): relation_set(relation_id=rid, relation_settings=rel_settings) cluster_settings = {"unit-address": ip, "data-address": data_ip} if config.get('local-rabbitmq-hostname-resolution'): cluster_settings.update({ "rabbitmq-hostname": utils.get_contrail_rabbit_hostname(), }) # this will also take care of updating the hostname in case # control-network changes to something different although # such host reconfiguration is unlikely utils.update_rabbitmq_cluster_hostnames() for rid in relation_ids("controller-cluster"): relation_set(relation_id=rid, relation_settings=cluster_settings) if is_leader(): _address_changed(local_unit(), ip, 'ip') _address_changed(local_unit(), data_ip, 'data_ip') if config.changed("local-rabbitmq-hostname-resolution"): if config.get("local-rabbitmq-hostname-resolution"): # enabling this option will trigger events on other units # so their hostnames will be added as -changed events fire # we just need to set our hostname utils.update_rabbitmq_cluster_hostnames() else: kvstore = kv() rabbitmq_hosts = kvstore.get(key='rabbitmq_hosts', default={}) for ip, hostname in rabbitmq_hosts: utils.update_hosts_file(ip, hostname, remove_hostname=True) docker_utils.config_changed() utils.update_charm_status() # leave it after update_charm_status - in case of exception in previous steps # config.changed doesn't work sometimes... if config.get("saved-image-tag") != config["image-tag"]: utils.update_ziu("image-tag") config["saved-image-tag"] = config["image-tag"] config.save() update_http_relations() update_https_relations() update_northbound_relations() update_southbound_relations() update_issu_relations()
def cluster_joined(rel_id=None): ip = common_utils.get_ip() settings = { "unit-address": ip, "data-address": common_utils.get_ip(config_param="data-network", fallback=ip) } if config.get('local-rabbitmq-hostname-resolution'): settings.update({ "rabbitmq-hostname": utils.get_contrail_rabbit_hostname(), }) # a remote unit might have already set rabbitmq-hostname if # it came up before this unit was provisioned so the -changed # event will not fire for it and we have to handle it here data = relation_get() log("Joined the peer relation with {}: {}".format(remote_unit(), data)) ip = data.get("unit-address") rabbit_hostname = data.get('rabbitmq-hostname') if ip and rabbit_hostname: utils.update_hosts_file(ip, rabbit_hostname) relation_set(relation_id=rel_id, relation_settings=settings) utils.update_charm_status()
def cluster_joined(rel_id=None): ip = common_utils.get_ip() settings = { "unit-address": ip, "data-address": common_utils.get_ip(config_param="data-network", fallback=ip) } if config.get('local-rabbitmq-hostname-resolution'): settings["rabbitmq-hostname"] = utils.get_contrail_rabbit_hostname() relation_set(relation_id=rel_id, relation_settings=settings) utils.update_charm_status()
def update_charm_status(update_config=True): update_config_func = render_config if update_config else None result = check_run_prerequisites(CONTAINER_NAME, CONFIG_NAME, update_config_func, SERVICES_TO_CHECK) if not result: return ctx = get_context() missing_relations = [] if not ctx.get("db_user"): # NOTE: Charms don't allow to deploy cassandra in AllowAll mode missing_relations.append("contrail-controller-cluster") if not ctx.get("analytics_servers"): missing_relations.append("contrail-analytics") if get_ip() not in ctx.get("controller_servers"): missing_relations.append("contrail-cluster") if missing_relations: status_set('blocked', 'Missing relations: ' + ', '.join(missing_relations)) return if not ctx.get("cloud_orchestrator"): status_set('blocked', 'Missing cloud orchestrator info in relations.') return if not ctx.get("keystone_ip"): status_set('blocked', 'Missing auth info in relation with contrail-auth.') return # TODO: what should happens if relation departed? render_config(ctx) for port in ("8082", "8080", "8143"): open_port(port, "TCP") run_container(CONTAINER_NAME, "contrail-control")
def config_changed(): auth_mode = config.get("auth-mode") if auth_mode not in ("rbac", "cloud-admin", "no-auth"): raise Exception("Config is invalid. auth-mode must one of: " "rbac, cloud-admin, no-auth.") if config.changed("control-network"): ip = get_ip() settings = {"private-address": ip} rnames = ("contrail-controller", "contrail-analytics", "contrail-analyticsdb", "http-services", "https-services") for rname in rnames: for rid in relation_ids(rname): relation_set(relation_id=rid, relation_settings=settings) settings = {"unit-address": ip} for rid in relation_ids("controller-cluster"): relation_set(relation_id=rid, relation_settings=settings) if is_leader(): _address_changed(local_unit(), ip) if config.changed("docker-registry"): apply_docker_insecure() if config.changed("docker-user") or config.changed("docker-password"): docker_login() update_charm_status() _notify_proxy_services() if not is_leader(): return update_northbound_relations() update_southbound_relations()
def update_nrpe_config(): plugins_dir = '/usr/local/lib/nagios/plugins' nrpe_compat = nrpe.NRPE() component_ip = common_utils.get_ip() common_utils.rsync_nrpe_checks(plugins_dir) common_utils.add_nagios_to_sudoers() check_ui_cmd = 'check_http -H {} -p 8143 -S'.format(component_ip) nrpe_compat.add_check(shortname='check_contrail_web_ui', description='Check Contrail WebUI', check_cmd=check_ui_cmd) ssl_on_backend = config.get( "ssl_enabled", False) and common_utils.is_config_analytics_ssl_available() if ssl_on_backend: check_api_cmd = 'check_http -S -H {} -p 8082'.format(component_ip) else: check_api_cmd = 'check_http -H {} -p 8082'.format(component_ip) nrpe_compat.add_check(shortname='check_contrail_api', description='Check Contrail API', check_cmd=check_api_cmd) ctl_status_shortname = 'check_contrail_status_' + MODULE nrpe_compat.add_check(shortname=ctl_status_shortname, description='Check contrail-status', check_cmd=common_utils.contrail_status_cmd( MODULE, plugins_dir)) nrpe_compat.write()
def _update_charm_status(ctx, services_to_run=None): # services to run: config-api, control, config-database, webui, redis missing_relations = [] if not ctx.get("analytics_servers"): missing_relations.append("contrail-analytics") if common_utils.get_ip() not in ctx.get("controller_servers"): missing_relations.append("contrail-cluster") if config.get('tls_present', False) != config.get('ssl_enabled', False): missing_relations.append("tls-certificates") if missing_relations: status_set( 'blocked', 'Missing or incomplete relations: ' + ', '.join(missing_relations)) return if len(ctx.get("controller_servers")) < config.get("min-cluster-size"): status_set( 'blocked', 'Count of cluster nodes is not enough ({} < {}).'.format( len(ctx.get("controller_servers")), config.get("min-cluster-size"))) return if not ctx.get("cloud_orchestrator"): status_set('blocked', 'Missing cloud orchestrator info in relations.') return if "openstack" in ctx.get( "cloud_orchestrators") and not ctx.get("keystone_ip"): status_set('blocked', 'Missing auth info in relation with contrail-auth.') return # TODO: what should happens if relation departed? changed_dict = _render_configs(ctx) changed = changed_dict["common"] service_changed = changed_dict["config-api"] docker_utils.compose_run(CONFIG_API_CONFIGS_PATH + "/docker-compose.yaml", changed or service_changed) service_changed = changed_dict["config-database"] docker_utils.compose_run( CONFIG_DATABASE_CONFIGS_PATH + "/docker-compose.yaml", changed or service_changed) service_changed = changed_dict["control"] docker_utils.compose_run(CONTROL_CONFIGS_PATH + "/docker-compose.yaml", changed or service_changed) service_changed = changed_dict["webui"] docker_utils.compose_run(WEBUI_CONFIGS_PATH + "/docker-compose.yaml", changed or service_changed) # redis is a common service that needs own synchronized env service_changed = changed_dict["redis"] docker_utils.compose_run(REDIS_CONFIGS_PATH + "/docker-compose.yaml", changed or service_changed) common_utils.update_services_status(MODULE, SERVICES) if _has_provisioning_finished(): config['apply-defaults'] = False
def config_changed(): if config.changed("nested_mode"): raise Exception('Nested mode cannot be changed after deployment.') # TODO: analyze other params and raise exception if readonly params were changed utils.update_nrpe_config() if config.changed("control-network"): settings = {'private-address': common_utils.get_ip()} rnames = ("contrail-controller", "contrail-kubernetes-config") for rname in rnames: for rid in relation_ids(rname): relation_set(relation_id=rid, relation_settings=settings) _notify_contrail_kubernetes_node() if config.changed("kubernetes_api_hostname") or config.changed( "kubernetes_api_secure_port"): _notify_controller() config[ "config_analytics_ssl_available"] = common_utils.is_config_analytics_ssl_available( ) config.save() docker_utils.config_changed() utils.update_charm_status()
def analytics_joined(): settings = {"private-address": get_ip(), 'unit-type': 'controller'} relation_set(relation_settings=settings) if is_leader(): update_northbound_relations(rid=relation_id()) update_southbound_relations() update_charm_status()
def _https_services_http(vip): name = local_unit().replace("/", "-") addr = common_utils.get_ip() return [ { "service_name": "contrail-webui-https", "service_host": vip, "service_port": 8143, "crts": ["DEFAULT"], "service_options": [ "mode http", "balance source", "hash-type consistent", "http-request set-header X-Forwarded-Proto https if { ssl_fc }", "http-request set-header X-Forwarded-Proto http if !{ ssl_fc }", "option httpchk GET /", "option forwardfor", "redirect scheme https code 301 if { hdr(host) -i " + str(vip) + " } !{ ssl_fc }", "rsprep ^Location:\\ http://(.*) Location:\\ https://\\1", ], "servers": [[ name, addr, 8143, "check fall 5 inter 2000 rise 2 ssl verify none" ]] }, ]
def _https_services(vip): name = local_unit().replace("/", "-") addr = get_ip() return [ { "service_name": "contrail-webui-https", "service_host": vip, "service_port": 8143, "service_options": [ "timeout client 86400000", "mode tcp", "balance roundrobin", "cookie SERVERID insert indirect nocache", "timeout server 30000", "timeout connect 4000", ], "servers": [[ name, addr, 8143, "cookie " + addr + " weight 1 maxconn 1024 check port 8082" ]] }, ]
def update_cluster_relations(rid=None): rids = [rid] if rid else relation_ids("controller-cluster") if not rids: return ip = common_utils.get_ip() settings = { "unit-address": ip, "data-address": common_utils.get_ip(config_param="data-network", fallback=ip) } if config.get('local-rabbitmq-hostname-resolution'): settings["rabbitmq-hostname"] = utils.get_contrail_rabbit_hostname() for rid in rids: relation_set(relation_id=rid, relation_settings=settings)
def _update_cluster(rid=None): rids = [rid] if rid else relation_ids("analyticsdb-cluster") if not rids: return settings = {"unit-address": common_utils.get_ip()} for rid in rids: relation_set(relation_id=rid, relation_settings=settings)
def _notify_cluster(rid=None): rids = [rid] if rid else relation_ids("kubernetes-master-cluster") if not rids: return settings = {"unit-address": common_utils.get_ip()} for rid in rids: relation_set(relation_id=rid, relation_settings=settings)
def _update_tls(rid=None): rids = [rid] if rid else relation_ids("tls-certificates") if not rids: return config['tls_present'] = True settings = common_utils.get_tls_settings(common_utils.get_ip()) for rid in rids: relation_set(relation_id=rid, relation_settings=settings)
def get_controller_ips(): controller_ips = dict() for rid in relation_ids("controller-cluster"): for unit in related_units(rid): ip = relation_get("unit-address", unit, rid) controller_ips[unit] = ip # add it's own ip address controller_ips[local_unit()] = get_ip() return controller_ips
def config_changed(): if config.changed("control-network"): settings = {'private-address': get_ip()} rnames = ("contrail-analyticsdb", "analyticsdb-cluster") for rname in rnames: for rid in relation_ids(rname): relation_set(relation_id=rid, relation_settings=settings) update_charm_status()
def get_controller_ips(address_type, config_param): controller_ips = dict() for rid in relation_ids("controller-cluster"): for unit in related_units(rid): ip = relation_get(address_type, unit, rid) controller_ips[unit] = ip # add it's own ip address controller_ips[local_unit()] = common_utils.get_ip( config_param=config_param) return controller_ips
def _http_services(): name = local_unit().replace("/", "-") addr = get_ip() return [{ "service_name": "contrail-analytics-api", "service_host": "*", "service_port": 8081, "service_options": ["option nolinger", "balance roundrobin"], "servers": [[name, addr, 8081, "check inter 2000 rise 2 fall 3"]] }]
def analyticsdb_ctx(): """Get the ipaddres of all analyticsdb nodes""" analyticsdb_ip_list = list() for rid in relation_ids("analyticsdb-cluster"): for unit in related_units(rid): ip = relation_get("private-address", unit, rid) if ip: analyticsdb_ip_list.append(ip) # add it's own ip address analyticsdb_ip_list.append(common_utils.get_ip()) return {"analyticsdb_servers": analyticsdb_ip_list}
def analyticsdb_ctx(): """Get the ipaddres of all analyticsdb nodes""" analyticsdb_ip_list = [ relation_get("private-address", unit, rid) for rid in relation_ids("analyticsdb-cluster") for unit in related_units(rid)] # add it's own ip address analyticsdb_ip_list.append(get_ip()) sort_key = lambda ip: struct.unpack("!L", inet_aton(ip))[0] analyticsdb_ip_list = sorted(analyticsdb_ip_list, key=sort_key) return {"analyticsdb_servers": analyticsdb_ip_list}
def upgrade_charm(): _update_cluster() saved_info = common_utils.json_loads(leader_get("cluster_info"), dict()) if is_leader() and not saved_info: current_info = utils.get_cluster_info("unit-address", common_utils.get_ip()) log("Cluster info: {}".format(str(current_info))) settings = {"cluster_info": json.dumps(current_info)} leader_set(settings=settings) _update_analyticsdb() utils.update_charm_status()
def leader_elected(): current_info = utils.get_cluster_info("unit-address", common_utils.get_ip()) saved_info = common_utils.json_loads(leader_get("cluster_info"), dict()) log("Cluster current info: {}".format(str(current_info))) log("Cluster saved info: {}".format(str(saved_info))) if not saved_info: log("Cluster info: {}".format(str(current_info))) settings = {"cluster_info": json.dumps(current_info)} leader_set(settings=settings) _notify_controller() utils.update_charm_status()
def config_changed(): if config.changed("control-network"): settings = {'private-address': get_ip()} rnames = ("contrail-analyticsdb", "analyticsdb-cluster") for rname in rnames: for rid in relation_ids(rname): relation_set(relation_id=rid, relation_settings=settings) if config.changed("docker-registry"): apply_docker_insecure() if config.changed("docker-user") or config.changed("docker-password"): docker_login() update_charm_status()
def get_context(): ctx = {} ctx["module"] = MODULE ctx["log_level"] = config.get("log-level", "SYS_NOTICE") ctx["container_registry"] = config.get("docker-registry") ctx["container_tag"] = config.get("image-tag") ctx["command_ip"] = common_utils.get_ip() ctx["contrail_container_tag"] = config.get("image-tag") ctx.update(common_utils.json_loads(config.get("orchestrator_info"), dict())) log("CTX: {}".format(ctx)) return ctx
def update_status(): command_ip = common_utils.get_ip() try: output = check_output( "curl -k https://{}:8079 | grep '<title>'".format(command_ip), shell=True).decode('UTF-8') except Exception: status_set("waiting", "URL is not ready {}:8079".format(command_ip)) return False if 'Contrail Command' not in output: status_set("waiting", "URL is not ready {}:8079".format(command_ip)) return False status_set("active", "Unit is ready") return True
def config_changed(): utils.update_nrpe_config() if config.changed("control-network"): settings = {'private-address': common_utils.get_ip()} rnames = ("contrail-analyticsdb", "analyticsdb-cluster") for rname in rnames: for rid in relation_ids(rname): relation_set(relation_id=rid, relation_settings=settings) config[ "config_analytics_ssl_available"] = common_utils.is_config_analytics_ssl_available( ) config.save() docker_utils.config_changed() utils.update_charm_status()
def config_changed(): utils.update_nrpe_config() if config.changed("control-network"): _update_cluster() if is_leader() and _address_changed(local_unit(), common_utils.get_ip()): _update_analyticsdb() docker_utils.config_changed() utils.update_charm_status() # leave it as latest - in case of exception in previous steps # config.changed doesn't work sometimes (when we saved config in this hook before) if config.get("saved-image-tag") != config["image-tag"]: utils.update_ziu("image-tag") config["saved-image-tag"] = config["image-tag"] config.save()
def upgrade_charm(): _update_cluster() saved_info = common_utils.json_loads(leader_get("cluster_info"), dict()) if is_leader() and not saved_info: current_info = utils.get_cluster_info("unit-address", common_utils.get_ip()) log("Cluster info: {}".format(str(current_info))) settings = {"cluster_info": json.dumps(current_info)} leader_set(settings=settings) _update_analytics() _update_analyticsdb() _notify_proxy_services() # to update config flags and certs params if any was changed _update_tls() utils.update_charm_status()