Exemple #1
0
def cluster_changed():
    data = relation_get()
    log("Peer relation changed with {}: {}".format(remote_unit(), data))

    ip = data.get("unit-address")
    data_ip = data.get("data-address")
    if not ip or not data_ip:
        log("There is no unit-address or data-address in the relation")
        return

    if config.get('local-rabbitmq-hostname-resolution'):
        rabbit_hostname = data.get('rabbitmq-hostname')
        if ip and rabbit_hostname:
            utils.update_hosts_file(ip, rabbit_hostname)

    if is_leader():
        unit = remote_unit()
        _address_changed(unit, ip, 'ip')
        _address_changed(unit, data_ip, 'data_ip')

    update_northbound_relations()
    update_southbound_relations()
    update_issu_relations()
    utils.update_ziu("cluster-changed")
    utils.update_charm_status()
Exemple #2
0
def config_changed():
    utils.update_nrpe_config()
    auth_mode = config.get("auth-mode")
    if auth_mode not in ("rbac", "cloud-admin", "no-auth"):
        raise Exception("Config is invalid. auth-mode must one of: "
                        "rbac, cloud-admin, no-auth.")

    if config.changed("control-network") or config.changed("data-network"):
        ip = common_utils.get_ip()
        data_ip = common_utils.get_ip(config_param="data-network", fallback=ip)

        rel_settings = {"private-address": ip}
        for rname in ("http-services", "https-services"):
            for rid in relation_ids(rname):
                relation_set(relation_id=rid, relation_settings=rel_settings)

        cluster_settings = {"unit-address": ip, "data-address": data_ip}
        if config.get('local-rabbitmq-hostname-resolution'):
            cluster_settings.update({
                "rabbitmq-hostname":
                utils.get_contrail_rabbit_hostname(),
            })
            # this will also take care of updating the hostname in case
            # control-network changes to something different although
            # such host reconfiguration is unlikely
            utils.update_rabbitmq_cluster_hostnames()
        for rid in relation_ids("controller-cluster"):
            relation_set(relation_id=rid, relation_settings=cluster_settings)

        if is_leader():
            _address_changed(local_unit(), ip, 'ip')
            _address_changed(local_unit(), data_ip, 'data_ip')

    if config.changed("local-rabbitmq-hostname-resolution"):
        if config.get("local-rabbitmq-hostname-resolution"):
            # enabling this option will trigger events on other units
            # so their hostnames will be added as -changed events fire
            # we just need to set our hostname
            utils.update_rabbitmq_cluster_hostnames()
        else:
            kvstore = kv()
            rabbitmq_hosts = kvstore.get(key='rabbitmq_hosts', default={})
            for ip, hostname in rabbitmq_hosts:
                utils.update_hosts_file(ip, hostname, remove_hostname=True)

    docker_utils.config_changed()
    utils.update_charm_status()

    # leave it after update_charm_status - in case of exception in previous steps
    # config.changed doesn't work sometimes...
    if config.get("saved-image-tag") != config["image-tag"]:
        utils.update_ziu("image-tag")
        config["saved-image-tag"] = config["image-tag"]
        config.save()

    update_http_relations()
    update_https_relations()
    update_northbound_relations()
    update_southbound_relations()
    update_issu_relations()
Exemple #3
0
def contrail_controller_changed():
    data = relation_get()
    if "orchestrator-info" in data:
        config["orchestrator_info"] = data["orchestrator-info"]
    if data.get("unit-type") == 'issu':
        config["maintenance"] = 'issu'
        config["issu_controller_ips"] = data.get("issu_controller_ips")
        config["issu_controller_data_ips"] = data.get("issu_controller_data_ips")
        config["issu_analytics_ips"] = data.get("issu_analytics_ips")
    use_internal_endpoints = data.get("use-internal-endpoints")
    if use_internal_endpoints:
        if not isinstance(use_internal_endpoints, bool):
            use_internal_endpoints = yaml.load(use_internal_endpoints)
            if not isinstance(use_internal_endpoints, bool):
                use_internal_endpoints = False
        config["use_internal_endpoints"] = use_internal_endpoints

    # TODO: set error if orchestrator is changed and container was started
    # with another orchestrator
    if "dpdk" in data:
        # remote unit is an agent
        address = data["private-address"]
        flags = common_utils.json_loads(config.get("agents-info"), dict())
        flags[address] = data["dpdk"]
        config["agents-info"] = json.dumps(flags)
    config.save()

    update_southbound_relations()
    update_northbound_relations()
    utils.update_ziu("controller-changed")
    utils.update_charm_status()
Exemple #4
0
def contrail_controller_changed():
    data = relation_get()
    if data.get("unit-type") == 'issu':
        config["maintenance"] = 'issu'
        config["issu_controller_ips"] = data.get("issu_controller_ips")
        config["issu_controller_data_ips"] = data.get(
            "issu_controller_data_ips")
        config["issu_analytics_ips"] = data.get("issu_analytics_ips")
    use_internal_endpoints = data.get("use-internal-endpoints")
    if use_internal_endpoints:
        if not isinstance(use_internal_endpoints, bool):
            use_internal_endpoints = yaml.load(use_internal_endpoints)
            if not isinstance(use_internal_endpoints, bool):
                use_internal_endpoints = False
        config["use_internal_endpoints"] = use_internal_endpoints

    # TODO: set error if orchestrator is changed and container was started
    # with another orchestrator
    if "dpdk" in data:
        # remote unit is an agent
        address = data["private-address"]
        flags = common_utils.json_loads(config.get("agents-info"), dict())
        flags[address] = data["dpdk"]
        config["agents-info"] = json.dumps(flags)
    if "k8s_info" in data:
        # remote unit is kubemaster
        k8s_info = common_utils.json_loads(data.get("k8s_info"), dict())
        if k8s_info:
            cluster_name = k8s_info.get("cluster_name")
            pod_subnets = k8s_info.get("pod_subnets")
            kubernetes_workers = k8s_info.get("kubernetes_workers")

            cluster_info = {
                cluster_name: {
                    "pod_subnets": pod_subnets,
                    "kubernetes_workers": kubernetes_workers
                }
            }
        agents_info = common_utils.json_loads(config.get("agents-info"),
                                              dict())
        if agents_info.get("k8s_info"):
            agents_info["k8s_info"].update(cluster_info)
        else:
            agents_info["k8s_info"] = cluster_info
        config["agents-info"] = json.dumps(agents_info)
    config.save()

    _rebuild_orchestrator_info()

    update_southbound_relations()
    update_northbound_relations()
    utils.update_ziu("controller-changed")
    utils.update_charm_status()
Exemple #5
0
def upgrade_ziu(args):
    utils.signal_ziu("ziu", 0)
    utils.update_ziu("start")
    utils.update_charm_status()
Exemple #6
0
def update_status():
    utils.update_ziu("update-status")
    utils.update_charm_status()
Exemple #7
0
def analyticsdb_changed_changed():
    utils.update_ziu("analyticsdb-changed")
Exemple #8
0
def analytics_changed_departed():
    data = relation_get()
    _value_changed(data, "analytics_ips", "analytics_ips")
    update_southbound_relations()
    utils.update_ziu("analytics-changed")
    utils.update_charm_status()
Exemple #9
0
def analytics_changed_departed():
    update_southbound_relations()
    utils.update_ziu("analytics-changed")
    utils.update_charm_status()