def upgrade_charm(): # NOTE: image can not be deleted if container is running. # TODO: so think about killing the container # NOTE: this hook can be fired when either resource changed or charm code # changed. so if code was changed then we may need to update config update_charm_status()
def leader_elected(): if not leader_get("db_user"): user = "******" password = uuid.uuid4().hex leader_set(db_user=user, db_password=password) _update_relation() update_charm_status()
def install(): status_set('maintenance', 'Installing...') # TODO: try to remove this call common_utils.fix_hostname() docker_utils.install() utils.update_charm_status()
def config_changed(): if config.changed("control-network"): settings = {'private-address': get_ip()} rnames = ("contrail-analyticsdb", "analyticsdb-cluster") for rname in rnames: for rid in relation_ids(rname): relation_set(relation_id=rid, relation_settings=settings) update_charm_status()
def analyticsdb_changed(): data = relation_get() changed = False changed |= _value_changed(data, "auth-info", "auth_info") changed |= _value_changed(data, "orchestrator-info", "orchestrator_info") changed |= _value_changed(data, "ssl-enabled", "ssl_enabled") # TODO: handle changing of all values # TODO: set error if orchestrator is changing and container was started if changed: update_charm_status()
def analyticsdb_changed(): data = relation_get() _value_changed(data, "auth-info", "auth_info") _value_changed(data, "orchestrator-info", "orchestrator_info") _value_changed(data, "ssl-ca", "ssl_ca") _value_changed(data, "ssl-cert", "ssl_cert") _value_changed(data, "ssl-key", "ssl_key") # TODO: handle changing of all values # TODO: set error if orchestrator is changing and container was started update_charm_status()
def analyticsdb_departed(): count = 0 for rid in relation_ids("contrail-analyticsdb"): for unit in related_units(rid): if relation_get("unit-type", unit, rid) == "controller": count += 1 if count == 0: for key in ["auth_info", "orchestrator_info"]: config.pop(key, None) utils.update_charm_status()
def analyticsdb_cluster_departed(): if not is_leader(): return unit = remote_unit() cluster_info = common_utils.json_loads(leader_get("cluster_info"), dict()) cluster_info.pop(unit, None) log("Unit {} departed. Cluster info: {}".format(unit, str(cluster_info))) settings = {"cluster_info": json.dumps(cluster_info)} leader_set(settings=settings) _update_analyticsdb() utils.update_charm_status()
def upgrade_charm(): # NOTE: image can not be deleted if container is running. # TODO: so think about killing the container # clear cached version of image config.pop("version_with_build", None) config.pop("version", None) config.save() # NOTE: this hook can be fired when either resource changed or charm code # changed. so if code was changed then we may need to update config update_charm_status()
def upgrade_charm(): _update_cluster() saved_info = common_utils.json_loads(leader_get("cluster_info"), dict()) if is_leader() and not saved_info: current_info = utils.get_cluster_info("unit-address", common_utils.get_ip()) log("Cluster info: {}".format(str(current_info))) settings = {"cluster_info": json.dumps(current_info)} leader_set(settings=settings) _update_analyticsdb() utils.update_charm_status()
def install(): status_set('maintenance', 'Installing...') # TODO: try to remove this call fix_hostname() apt_upgrade(fatal=True, dist=True) add_docker_repo() apt_update(fatal=False) apt_install(PACKAGES + DOCKER_PACKAGES, fatal=True) update_charm_status()
def analyticsdb_departed(): units = [unit for rid in relation_ids("contrail-controller") for unit in related_units(rid)] if not units: for key in ["auth_info", "orchestrator_info", "ssl_enabled"]: config.pop(key, None) if is_container_launched(CONTAINER_NAME): status_set( "blocked", "Container is present but cloud orchestrator was disappeared." " Please kill container by yourself or restore" " cloud orchestrator.") update_charm_status()
def analyticsdb_changed(): # this method catches hook from controller and from analytics - so read all data = relation_get() _value_changed(data, "auth-info", "auth_info") _value_changed(data, "orchestrator-info", "orchestrator_info") _value_changed(data, "maintenance", "maintenance") _value_changed(data, "controller_ips", "controller_ips") _value_changed(data, "controller_data_ips", "controller_data_ips") _value_changed(data, "analytics_ips", "analytics_ips") # TODO: handle changing of all values # TODO: set error if orchestrator is changing and container was started utils.update_ziu("analyticsdb-changed") utils.update_charm_status()
def analyticsdb_changed(): data = relation_get() changed = False changed |= _value_changed(data, "auth-info", "auth_info") changed |= _value_changed(data, "orchestrator-info", "orchestrator_info") changed |= _value_changed(data, "maintenance", "maintenance") changed |= _value_changed(data, "controller_ips", "controller_ips") changed |= _value_changed(data, "controller_data_ips", "controller_data_ips") # TODO: handle changing of all values # TODO: set error if orchestrator is changing and container was started if changed: utils.update_charm_status()
def analyticsdb_cluster_changed(): data = relation_get() log("Peer relation changed with {}: {}".format(remote_unit(), data)) ip = data.get("unit-address") if not ip: log("There is no unit-address in the relation") elif is_leader(): unit = remote_unit() if _address_changed(unit, ip): _update_analyticsdb() utils.update_charm_status() utils.update_ziu("cluster-changed")
def config_changed(): if config.changed("control-network"): settings = {'private-address': get_ip()} rnames = ("contrail-analyticsdb", "analyticsdb-cluster") for rname in rnames: for rid in relation_ids(rname): relation_set(relation_id=rid, relation_settings=settings) if config.changed("docker-registry"): apply_docker_insecure() if config.changed("docker-user") or config.changed("docker-password"): docker_login() update_charm_status()
def config_changed(): utils.update_nrpe_config() if config.changed("control-network"): settings = {'private-address': common_utils.get_ip()} rnames = ("contrail-analyticsdb", "analyticsdb-cluster") for rname in rnames: for rid in relation_ids(rname): relation_set(relation_id=rid, relation_settings=settings) config[ "config_analytics_ssl_available"] = common_utils.is_config_analytics_ssl_available( ) config.save() docker_utils.config_changed() utils.update_charm_status()
def config_changed(): utils.update_nrpe_config() if config.changed("control-network"): _update_cluster() if is_leader() and _address_changed(local_unit(), common_utils.get_ip()): _update_analyticsdb() docker_utils.config_changed() utils.update_charm_status() # leave it as latest - in case of exception in previous steps # config.changed doesn't work sometimes (when we saved config in this hook before) if config.get("saved-image-tag") != config["image-tag"]: utils.update_ziu("image-tag") config["saved-image-tag"] = config["image-tag"] config.save()
def update_status(): utils.update_charm_status()
def leader_settings_changed(): _update_analyticsdb() utils.update_charm_status()
def update_status(): update_charm_status(update_config=False)
def update_status(): utils.update_ziu("update-status") utils.update_charm_status()
def tls_certificates_relation_departed(): if common_utils.tls_changed(utils.MODULE, None): utils.update_charm_status()
def tls_certificates_relation_changed(): if common_utils.tls_changed(utils.MODULE, relation_get()): utils.update_charm_status()
def leader_settings_changed(): update_charm_status()
def tls_certificates_relation_departed(): config['tls_present'] = False common_utils.tls_changed(utils.MODULE, None) utils.update_charm_status()
def upgrade_charm(): utils.update_charm_status()
def tls_certificates_relation_changed(): # it can be fired several times without server's cert if common_utils.tls_changed(utils.MODULE, relation_get()): utils.update_charm_status()