Ejemplo n.º 1
0
def leader_elected():
    if not leader_get("db_user"):
        user = "******"
        password = uuid.uuid4().hex
        leader_set(db_user=user, db_password=password)

    if not leader_get("rabbitmq_password_int"):
        password = uuid.uuid4().hex
        leader_set(rabbitmq_password_int=password)
        update_northbound_relations()

    ip_list = json_loads(leader_get("controller_ip_list"), list())
    ips = get_controller_ips()
    if not ip_list:
        ip_list = ips.values()
        log("IP_LIST: {}    IPS: {}".format(str(ip_list), str(ips)))
        leader_set(controller_ip_list=json.dumps(ip_list),
                   controller_ips=json.dumps(ips))
        # TODO: pass this list to all south/north relations
    else:
        current_ip_list = ips.values()
        dead_ips = set(ip_list).difference(current_ip_list)
        new_ips = set(current_ip_list).difference(ip_list)
        if new_ips:
            log("There are a new controllers that are not in the list: " +
                str(new_ips),
                level=ERROR)
        if dead_ips:
            log("There are a dead controllers that are in the list: " +
                str(dead_ips),
                level=ERROR)

    update_charm_status()
def cluster_joined(rel_id=None):
    ip = common_utils.get_ip()
    settings = {
        "unit-address":
        ip,
        "data-address":
        common_utils.get_ip(config_param="data-network", fallback=ip)
    }

    if config.get('local-rabbitmq-hostname-resolution'):
        settings.update({
            "rabbitmq-hostname":
            utils.get_contrail_rabbit_hostname(),
        })

        # a remote unit might have already set rabbitmq-hostname if
        # it came up before this unit was provisioned so the -changed
        # event will not fire for it and we have to handle it here
        data = relation_get()
        log("Joined the peer relation with {}: {}".format(remote_unit(), data))
        ip = data.get("unit-address")
        rabbit_hostname = data.get('rabbitmq-hostname')
        if ip and rabbit_hostname:
            utils.update_hosts_file(ip, rabbit_hostname)

    relation_set(relation_id=rel_id, relation_settings=settings)
    utils.update_charm_status()
Ejemplo n.º 3
0
def config_changed():
    utils.update_nrpe_config()
    auth_mode = config.get("auth-mode")
    if auth_mode not in ("rbac", "cloud-admin", "no-auth"):
        raise Exception("Config is invalid. auth-mode must one of: "
                        "rbac, cloud-admin, no-auth.")

    if config.changed("control-network") or config.changed("data-network"):
        ip = common_utils.get_ip()
        data_ip = common_utils.get_ip(config_param="data-network", fallback=ip)

        rel_settings = {"private-address": ip}
        for rname in ("http-services", "https-services"):
            for rid in relation_ids(rname):
                relation_set(relation_id=rid, relation_settings=rel_settings)

        cluster_settings = {"unit-address": ip, "data-address": data_ip}
        if config.get('local-rabbitmq-hostname-resolution'):
            cluster_settings.update({
                "rabbitmq-hostname":
                utils.get_contrail_rabbit_hostname(),
            })
            # this will also take care of updating the hostname in case
            # control-network changes to something different although
            # such host reconfiguration is unlikely
            utils.update_rabbitmq_cluster_hostnames()
        for rid in relation_ids("controller-cluster"):
            relation_set(relation_id=rid, relation_settings=cluster_settings)

        if is_leader():
            _address_changed(local_unit(), ip, 'ip')
            _address_changed(local_unit(), data_ip, 'data_ip')

    if config.changed("local-rabbitmq-hostname-resolution"):
        if config.get("local-rabbitmq-hostname-resolution"):
            # enabling this option will trigger events on other units
            # so their hostnames will be added as -changed events fire
            # we just need to set our hostname
            utils.update_rabbitmq_cluster_hostnames()
        else:
            kvstore = kv()
            rabbitmq_hosts = kvstore.get(key='rabbitmq_hosts', default={})
            for ip, hostname in rabbitmq_hosts:
                utils.update_hosts_file(ip, hostname, remove_hostname=True)

    docker_utils.config_changed()
    utils.update_charm_status()

    # leave it after update_charm_status - in case of exception in previous steps
    # config.changed doesn't work sometimes...
    if config.get("saved-image-tag") != config["image-tag"]:
        utils.update_ziu("image-tag")
        config["saved-image-tag"] = config["image-tag"]
        config.save()

    update_http_relations()
    update_https_relations()
    update_northbound_relations()
    update_southbound_relations()
    update_issu_relations()
Ejemplo n.º 4
0
def cluster_changed():
    data = relation_get()
    log("Peer relation changed with {}: {}".format(remote_unit(), data))

    ip = data.get("unit-address")
    data_ip = data.get("data-address")
    if not ip or not data_ip:
        log("There is no unit-address or data-address in the relation")
        return

    if config.get('local-rabbitmq-hostname-resolution'):
        rabbit_hostname = data.get('rabbitmq-hostname')
        if ip and rabbit_hostname:
            utils.update_hosts_file(ip, rabbit_hostname)

    if is_leader():
        unit = remote_unit()
        _address_changed(unit, ip, 'ip')
        _address_changed(unit, data_ip, 'data_ip')

    update_northbound_relations()
    update_southbound_relations()
    update_issu_relations()
    utils.update_ziu("cluster-changed")
    utils.update_charm_status()
Ejemplo n.º 5
0
def cluster_departed():
    if is_leader():
        unit = remote_unit()
        for var_name in ["ip", "data_ip"]:
            ips = common_utils.json_loads(
                leader_get("controller_{}s".format(var_name)), dict())
            if unit not in ips:
                return
            old_ip = ips.pop(unit)
            ip_list = common_utils.json_loads(
                leader_get("controller_{}_list".format(var_name)), list())
            ip_list.remove(old_ip)
            log("{}_LIST: {}    {}S: {}".format(var_name.upper(), str(ip_list),
                                                var_name.upper(), str(ips)))

            settings = {
                "controller_{}_list".format(var_name): json.dumps(ip_list),
                "controller_{}s".format(var_name): json.dumps(ips)
            }
            leader_set(settings=settings)

    update_northbound_relations()
    update_southbound_relations()
    update_issu_relations()
    utils.update_charm_status()
Ejemplo n.º 6
0
def config_changed():
    auth_mode = config.get("auth-mode")
    if auth_mode not in ("rbac", "cloud-admin", "no-auth"):
        raise Exception("Config is invalid. auth-mode must one of: "
                        "rbac, cloud-admin, no-auth.")

    if config.changed("control-network"):
        ip = get_ip()
        settings = {"private-address": ip}
        rnames = ("contrail-controller", "contrail-analytics",
                  "contrail-analyticsdb", "http-services", "https-services")
        for rname in rnames:
            for rid in relation_ids(rname):
                relation_set(relation_id=rid, relation_settings=settings)
        settings = {"unit-address": ip}
        for rid in relation_ids("controller-cluster"):
            relation_set(relation_id=rid, relation_settings=settings)
        if is_leader():
            _address_changed(local_unit(), ip)

    if config.changed("docker-registry"):
        apply_docker_insecure()
    if config.changed("docker-user") or config.changed("docker-password"):
        docker_login()

    update_charm_status()
    _notify_proxy_services()

    if not is_leader():
        return

    update_northbound_relations()
    update_southbound_relations()
Ejemplo n.º 7
0
def leader_elected():
    ip = common_utils.get_ip()
    data_ip = common_utils.get_ip(config_param="data-network", fallback=ip)
    for var_name in [("ip", "unit-address", ip),
                     ("data_ip", "data-address", data_ip)]:
        ip_list = common_utils.json_loads(
            leader_get("controller_{}_list".format(var_name[0])), list())
        ips = utils.get_controller_ips(var_name[1], var_name[2])
        if not ip_list:
            ip_list = ips.values()
            log("{}_LIST: {}    {}S: {}".format(var_name[0].upper(),
                                                str(ip_list),
                                                var_name[0].upper(), str(ips)))
            settings = {
                "controller_{}_list".format(var_name[0]):
                json.dumps(list(ip_list)),
                "controller_{}s".format(var_name[0]): json.dumps(ips)
            }
            leader_set(settings=settings)
        else:
            current_ip_list = ips.values()
            dead_ips = set(ip_list).difference(current_ip_list)
            new_ips = set(current_ip_list).difference(ip_list)
            if new_ips:
                log("There are a new controllers that are not in the list: " +
                    str(new_ips),
                    level=ERROR)
            if dead_ips:
                log("There are a dead controllers that are in the list: " +
                    str(dead_ips),
                    level=ERROR)

    update_northbound_relations()
    update_southbound_relations()
    utils.update_charm_status()
Ejemplo n.º 8
0
def upgrade_charm():
    # NOTE: old image can not be deleted if container is running.
    # TODO: so think about killing the container

    # NOTE: this hook can be fired when either resource changed or charm code
    # changed. so if code was changed then we may need to update config
    update_charm_status()
Ejemplo n.º 9
0
def contrail_controller_changed():
    data = relation_get()
    if "orchestrator-info" in data:
        config["orchestrator_info"] = data["orchestrator-info"]
    if data.get("unit-type") == 'issu':
        config["maintenance"] = 'issu'
        config["issu_controller_ips"] = data.get("issu_controller_ips")
        config["issu_controller_data_ips"] = data.get("issu_controller_data_ips")
        config["issu_analytics_ips"] = data.get("issu_analytics_ips")
    use_internal_endpoints = data.get("use-internal-endpoints")
    if use_internal_endpoints:
        if not isinstance(use_internal_endpoints, bool):
            use_internal_endpoints = yaml.load(use_internal_endpoints)
            if not isinstance(use_internal_endpoints, bool):
                use_internal_endpoints = False
        config["use_internal_endpoints"] = use_internal_endpoints

    # TODO: set error if orchestrator is changed and container was started
    # with another orchestrator
    if "dpdk" in data:
        # remote unit is an agent
        address = data["private-address"]
        flags = common_utils.json_loads(config.get("agents-info"), dict())
        flags[address] = data["dpdk"]
        config["agents-info"] = json.dumps(flags)
    config.save()

    update_southbound_relations()
    update_northbound_relations()
    utils.update_ziu("controller-changed")
    utils.update_charm_status()
Ejemplo n.º 10
0
def analytics_joined():
    settings = {"private-address": get_ip(), 'unit-type': 'controller'}
    relation_set(relation_settings=settings)
    if is_leader():
        update_northbound_relations(rid=relation_id())
        update_southbound_relations()
    update_charm_status()
def tls_certificates_relation_departed():
    config['tls_present'] = False
    common_utils.tls_changed(utils.MODULE, None)
    update_southbound_relations()
    update_http_relations()
    update_https_relations()
    utils.update_nrpe_config()
    utils.update_charm_status()
Ejemplo n.º 12
0
def tls_certificates_relation_departed():
    if not common_utils.tls_changed(utils.MODULE, None):
        return

    update_southbound_relations()
    update_http_relations()
    update_https_relations()
    utils.update_nrpe_config()
    utils.update_charm_status()
Ejemplo n.º 13
0
def upgrade_charm():
    utils.update_charm_status()
    config_changed()
    update_northbound_relations()
    update_southbound_relations()
    update_issu_relations()
    update_cluster_relations()
    update_http_relations()
    update_https_relations()
def tls_certificates_relation_changed():
    # it can be fired several times without server's cert
    if not common_utils.tls_changed(utils.MODULE, relation_get()):
        return

    update_southbound_relations()
    update_http_relations()
    update_https_relations()
    utils.update_nrpe_config()
    utils.update_charm_status()
Ejemplo n.º 15
0
def contrail_controller_changed():
    data = relation_get()
    if "orchestrator-info" in data:
        config["orchestrator_info"] = data["orchestrator-info"]
    # TODO: set error if orchestrator is changed and container was started
    # with another orchestrator
    if is_leader():
        update_southbound_relations()
        update_northbound_relations()
    update_charm_status()
Ejemplo n.º 16
0
def contrail_auth_departed():
    units = [unit for rid in relation_ids("contrail-auth")
             for unit in related_units(rid)]
    if units:
        return
    config.pop("auth_info", None)

    update_northbound_relations()
    update_southbound_relations()
    utils.update_charm_status()
Ejemplo n.º 17
0
def contrail_auth_changed():
    auth_info = relation_get("auth-info")
    if auth_info is not None:
        config["auth_info"] = auth_info
    else:
        config.pop("auth_info", None)

    update_northbound_relations()
    update_southbound_relations()
    utils.update_charm_status()
def upgrade_charm():
    utils.update_charm_status()
    config_changed()
    update_northbound_relations()
    update_southbound_relations()
    update_issu_relations()
    update_cluster_relations()
    update_http_relations()
    update_https_relations()
    # to update config flags and certs params if any was changed
    _update_tls()
Ejemplo n.º 19
0
def install():
    status_set("maintenance", "Installing...")
    config['apply-defaults'] = True
    # TODO: try to remove this call
    common_utils.fix_hostname()

    if config.get('local-rabbitmq-hostname-resolution'):
        utils.update_rabbitmq_cluster_hostnames()

    docker_utils.install()
    utils.update_charm_status()
Ejemplo n.º 20
0
def _tls_changed(cert, key, ca):
    changed = update_certificates(cert, key, ca)
    if not changed:
        return

    # save certs & notify relations
    config["ssl_enabled"] = (cert is not None and len(cert) > 0)
    config.save()
    update_northbound_relations()

    update_charm_status(force=True)
Ejemplo n.º 21
0
def cluster_changed():
    if not is_leader():
        return
    data = relation_get()
    ip = data.get("unit-address")
    if not ip:
        log("There is no unit-address in the relation")
        return
    unit = remote_unit()
    _address_changed(unit, ip)
    update_charm_status()
Ejemplo n.º 22
0
def contrail_controller_changed():
    data = relation_get()
    if data.get("unit-type") == 'issu':
        config["maintenance"] = 'issu'
        config["issu_controller_ips"] = data.get("issu_controller_ips")
        config["issu_controller_data_ips"] = data.get(
            "issu_controller_data_ips")
        config["issu_analytics_ips"] = data.get("issu_analytics_ips")
    use_internal_endpoints = data.get("use-internal-endpoints")
    if use_internal_endpoints:
        if not isinstance(use_internal_endpoints, bool):
            use_internal_endpoints = yaml.load(use_internal_endpoints)
            if not isinstance(use_internal_endpoints, bool):
                use_internal_endpoints = False
        config["use_internal_endpoints"] = use_internal_endpoints

    # TODO: set error if orchestrator is changed and container was started
    # with another orchestrator
    if "dpdk" in data:
        # remote unit is an agent
        address = data["private-address"]
        flags = common_utils.json_loads(config.get("agents-info"), dict())
        flags[address] = data["dpdk"]
        config["agents-info"] = json.dumps(flags)
    if "k8s_info" in data:
        # remote unit is kubemaster
        k8s_info = common_utils.json_loads(data.get("k8s_info"), dict())
        if k8s_info:
            cluster_name = k8s_info.get("cluster_name")
            pod_subnets = k8s_info.get("pod_subnets")
            kubernetes_workers = k8s_info.get("kubernetes_workers")

            cluster_info = {
                cluster_name: {
                    "pod_subnets": pod_subnets,
                    "kubernetes_workers": kubernetes_workers
                }
            }
        agents_info = common_utils.json_loads(config.get("agents-info"),
                                              dict())
        if agents_info.get("k8s_info"):
            agents_info["k8s_info"].update(cluster_info)
        else:
            agents_info["k8s_info"] = cluster_info
        config["agents-info"] = json.dumps(agents_info)
    config.save()

    _rebuild_orchestrator_info()

    update_southbound_relations()
    update_northbound_relations()
    utils.update_ziu("controller-changed")
    utils.update_charm_status()
Ejemplo n.º 23
0
def upgrade_charm():
    # NOTE: old image can not be deleted if container is running.
    # TODO: so think about killing the container

    # clear cached version of image
    config.pop("version_with_build", None)
    config.pop("version", None)
    config.save()

    # NOTE: this hook can be fired when either resource changed or charm code
    # changed. so if code was changed then we may need to update config
    update_charm_status()
Ejemplo n.º 24
0
def cluster_joined(rel_id=None):
    ip = common_utils.get_ip()
    settings = {
        "unit-address": ip,
        "data-address": common_utils.get_ip(config_param="data-network", fallback=ip)
    }

    if config.get('local-rabbitmq-hostname-resolution'):
        settings["rabbitmq-hostname"] = utils.get_contrail_rabbit_hostname()

    relation_set(relation_id=rel_id, relation_settings=settings)
    utils.update_charm_status()
Ejemplo n.º 25
0
def install():
    status_set("maintenance", "Installing...")

    # TODO: try to remove this call
    fix_hostname()

    apt_upgrade(fatal=True, dist=True)
    add_docker_repo()
    apt_update(fatal=False)
    apt_install(PACKAGES + DOCKER_PACKAGES, fatal=True)

    update_charm_status()
Ejemplo n.º 26
0
def contrail_issu_relation_changed():
    rel_data = relation_get()
    if "orchestrator-info" in rel_data:
        config["orchestrator_info"] = rel_data["orchestrator-info"]
    else:
        config.pop("orchestrator_info", None)
    config.save()
    update_northbound_relations()
    utils.update_charm_status()

    issu_data = dict()
    for name in ["rabbitmq_connection_details", "cassandra_connection_details", "zookeeper_connection_details"]:
        issu_data.update(common_utils.json_loads(rel_data.get(name), dict()))
    utils.update_issu_state(issu_data)
Ejemplo n.º 27
0
def cluster_departed():
    if not is_leader():
        return
    unit = remote_unit()
    ips = json_loads(leader_get("controller_ips"), dict())
    if unit not in ips:
        return
    old_ip = ips.pop(unit)
    ip_list = json_loads(leader_get("controller_ip_list"), list())
    ip_list.remove(old_ip)

    log("IP_LIST: {}    IPS: {}".format(str(ip_list), str(ips)))
    leader_set(controller_ip_list=json.dumps(ip_list),
               controller_ips=json.dumps(ips))
    update_charm_status()
Ejemplo n.º 28
0
def amqp_changed():
    # collect information about connected RabbitMQ server
    password = relation_get("password")
    clustered = relation_get('clustered')
    if clustered:
        vip = relation_get('vip')
        vip = format_ipv6_addr(vip) or vip
        rabbitmq_host = vip
    else:
        host = relation_get('private-address')
        host = format_ipv6_addr(host) or host
        rabbitmq_host = host

    ssl_port = relation_get('ssl_port')
    if ssl_port:
        log("Underlayed software is not capable to use non-default port",
            level=ERROR)
        return 1
    ssl_ca = relation_get('ssl_ca')
    if ssl_ca:
        log("Charm can't setup ssl support but ssl ca found", level=WARNING)
    if relation_get('ha_queues') is not None:
        log("Charm can't setup HA queues but flag is found", level=WARNING)

    rabbitmq_hosts = []
    ha_vip_only = relation_get('ha-vip-only', ) is not None
    # Used for active/active rabbitmq >= grizzly
    if ((not clustered or ha_vip_only) and len(related_units()) > 1):
        for unit in related_units():
            host = relation_get('private-address', unit=unit)
            host = format_ipv6_addr(host) or host
            rabbitmq_hosts.append(host)

    if not rabbitmq_hosts:
        rabbitmq_hosts.append(rabbitmq_host)
    rabbitmq_hosts = ','.join(sorted(rabbitmq_hosts))

    # Here we have:
    # password - password from RabbitMQ server for user passed in joined
    # rabbitmq_hosts - list of hosts with RabbitMQ servers
    config["rabbitmq_password"] = password
    config["rabbitmq_hosts"] = rabbitmq_hosts
    config.save()

    update_northbound_relations()
    update_charm_status()
Ejemplo n.º 29
0
def contrail_controller_changed():
    data = relation_get()
    if "orchestrator-info" in data:
        config["orchestrator_info"] = data["orchestrator-info"]
    # TODO: set error if orchestrator is changed and container was started
    # with another orchestrator
    if is_leader():
        if "dpdk" in data:
            # remote unit is an agent
            address = data["private-address"]
            flags = json_loads(config.get("agents-info"), dict())
            flags[address] = data["dpdk"]
            config["agents-info"] = json.dumps(flags)
            config.save()
        update_southbound_relations()
        update_northbound_relations()
    update_charm_status()
Ejemplo n.º 30
0
def upgrade_charm():
    utils.update_charm_status()
    config_changed()
    for rid in relation_ids("contrail-analytics"):
        if related_units(rid):
            analytics_joined(rel_id=rid)
    for rid in relation_ids("contrail-analyticsdb"):
        if related_units(rid):
            analyticsdb_joined(rel_id=rid)
    for rid in relation_ids("contrail-controller"):
        if related_units(rid):
            contrail_controller_joined(rel_id=rid)
    for rid in relation_ids("contrail-issu"):
        if related_units(rid):
            contrail_issu_relation_joined(rel_id=rid)
    for rid in relation_ids("controller-cluster"):
        if related_units(rid):
            cluster_joined(rel_id=rid)