def ssh_authorized_peers(peer_interface, user, group=None, ensure_local_user=False): """ Main setup function, should be called from both peer -changed and -joined hooks with the same parameters. """ if ensure_local_user: ensure_user(user, group) priv_key, pub_key = get_keypair(user) hook = os.path.basename(sys.argv[0]) if hook == '%s-relation-joined' % peer_interface: utils.relation_set(ssh_pub_key=pub_key) print 'joined' elif hook == '%s-relation-changed' % peer_interface: hosts = [] keys = [] for r_id in utils.relation_ids(peer_interface): for unit in utils.relation_list(r_id): settings = utils.relation_get_dict(relation_id=r_id, remote_unit=unit) if 'ssh_pub_key' in settings: keys.append(settings['ssh_pub_key']) hosts.append(settings['private-address']) else: utils.juju_log('INFO', 'ssh_authorized_peers(): ssh_pub_key '\ 'missing for unit %s, skipping.' % unit) write_authorized_keys(user, keys) write_known_hosts(user, hosts) authed_hosts = ':'.join(hosts) utils.relation_set(ssh_authorized_hosts=authed_hosts)
def is_clustered(): for r_id in relation_ids("ha") or []: for unit in relation_list(r_id) or []: clustered = relation_get("clustered", rid=r_id, unit=unit) if clustered: return True return False
def ha_relation_changed(): clustered = utils.relation_get('clustered') if (clustered and cluster.is_leader(LEADER_RES)): utils.juju_log('INFO', 'Cluster configured, notifying other services') # Tell all related services to start using the VIP for r_id in utils.relation_ids('shared-db'): utils.relation_set(rid=r_id, db_host=utils.config_get('vip'))
def get_ceph_nodes(): hosts = [] for r_id in utils.relation_ids('ceph'): for unit in utils.relation_list(r_id): hosts.append( utils.relation_get('private-address', unit=unit, rid=r_id)) return hosts
def ha_relation_changed(): clustered = utils.relation_get("clustered") if clustered and cluster.is_leader(LEADER_RES): utils.juju_log("INFO", "Cluster configured, notifying other services") # Tell all related services to start using the VIP for r_id in utils.relation_ids("shared-db"): utils.relation_set(rid=r_id, db_host=utils.config_get("vip"))
def get_ceph_nodes(): hosts = [] for r_id in utils.relation_ids('ceph'): for unit in utils.relation_list(r_id): hosts.append(utils.relation_get('private-address', unit=unit, rid=r_id)) return hosts
def is_clustered(): for r_id in (relation_ids('ha') or []): for unit in (relation_list(r_id) or []): clustered = relation_get('clustered', rid=r_id, unit=unit) if clustered: return True return False
def configure_haproxy(service_ports): ''' Configure HAProxy based on the current peers in the service cluster using the provided port map: "swift": [ 8080, 8070 ] HAproxy will also be reloaded/started if required service_ports: dict: dict of lists of [ frontend, backend ] ''' cluster_hosts = {} cluster_hosts[os.getenv('JUJU_UNIT_NAME').replace('/', '-')] = \ unit_get('private-address') for r_id in relation_ids('cluster'): for unit in relation_list(r_id): cluster_hosts[unit.replace('/', '-')] = \ relation_get(attribute='private-address', rid=r_id, unit=unit) context = { 'units': cluster_hosts, 'service_ports': service_ports } with open(HAPROXY_CONF, 'w') as f: f.write(render_template(os.path.basename(HAPROXY_CONF), context)) with open(HAPROXY_DEFAULT, 'w') as f: f.write('ENABLED=1') reload('haproxy')
def ha_relation_joined(): vip = utils.config_get('vip') vip_iface = utils.config_get('vip_iface') vip_cidr = utils.config_get('vip_cidr') corosync_bindiface = utils.config_get('ha-bindiface') corosync_mcastport = utils.config_get('ha-mcastport') if None in [vip, vip_cidr, vip_iface]: utils.juju_log('WARNING', 'Insufficient VIP information to configure cluster') sys.exit(1) # Starting configuring resources. init_services = {'res_mysqld': 'mysql'} # If the 'ha' relation has been made *before* the 'ceph' relation, # it doesn't make sense to make it until after the 'ceph' relation is made if not utils.is_relation_made('ceph', 'auth'): utils.juju_log('INFO', '*ceph* relation does not exist. ' 'Not sending *ha* relation data yet') return else: utils.juju_log('INFO', '*ceph* relation exists. Sending *ha* relation data') block_storage = 'ceph' resources = { 'res_mysql_rbd': 'ocf:ceph:rbd', 'res_mysql_fs': 'ocf:heartbeat:Filesystem', 'res_mysql_vip': 'ocf:heartbeat:IPaddr2', 'res_mysqld': 'upstart:mysql'} rbd_name = utils.config_get('rbd-name') resource_params = { 'res_mysql_rbd': 'params name="%s" pool="%s" user="******" ' 'secret="%s"' % (rbd_name, POOL_NAME, SERVICE_NAME, ceph.keyfile_path(SERVICE_NAME)), 'res_mysql_fs': 'params device="/dev/rbd/%s/%s" directory="%s" ' 'fstype="ext4" op start start-delay="10s"' % (POOL_NAME, rbd_name, DATA_SRC_DST), 'res_mysql_vip': 'params ip="%s" cidr_netmask="%s" nic="%s"' % (vip, vip_cidr, vip_iface), 'res_mysqld': 'op start start-delay="5s" op monitor interval="5s"'} groups = { 'grp_mysql': 'res_mysql_rbd res_mysql_fs res_mysql_vip res_mysqld'} for rel_id in utils.relation_ids('ha'): utils.relation_set(rid=rel_id, block_storage=block_storage, corosync_bindiface=corosync_bindiface, corosync_mcastport=corosync_mcastport, resources=resources, resource_params=resource_params, init_services=init_services, groups=groups)
def get_ca_cert(): ca_cert = None juju_log('INFO', "Inspecting identity-service relations for CA SSL certificate.") for r_id in relation_ids('identity-service'): for unit in relation_list(r_id): if not ca_cert: ca_cert = relation_get('ca_cert', rid=r_id, unit=unit) return ca_cert
def cluster_with(): vers = rabbit_version() if vers >= '3.0.1-1': cluster_cmd = 'join_cluster' cmd = [ RABBITMQ_CTL, 'set_policy HA \'^(?!amq\.).*\' ' '\'{"ha-mode": "all"}\'' ] subprocess.check_call(cmd) else: cluster_cmd = 'cluster' out = subprocess.check_output([RABBITMQ_CTL, 'cluster_status']) current_host = subprocess.check_output(['hostname']).strip() # check all peers and try to cluster with them available_nodes = [] first_hostname = utils.relation_get('host') available_nodes.append(first_hostname) for r_id in (utils.relation_ids('cluster') or []): for unit in (utils.relation_list(r_id) or []): address = utils.relation_get('private_address', rid=r_id, unit=unit) if address is not None: node = get_hostname(address, fqdn=False) if current_host != node: available_nodes.append(node) # iterate over all the nodes, join to the first available for node in available_nodes: utils.juju_log('INFO', 'Clustering with remote rabbit host (%s).' % node) for line in out.split('\n'): if re.search(node, line): utils.juju_log('INFO', 'Host already clustered with %s.' % node) return try: cmd = [RABBITMQ_CTL, 'stop_app'] subprocess.check_call(cmd) cmd = [RABBITMQ_CTL, cluster_cmd, 'rabbit@%s' % node] subprocess.check_call(cmd) cmd = [RABBITMQ_CTL, 'start_app'] subprocess.check_call(cmd) utils.juju_log('INFO', 'Host clustered with %s.' % node) return except: # continue to the next node pass # error, no nodes available for clustering utils.juju_log('ERROR', 'No nodes available for clustering') sys.exit(1)
def ha_relation_joined(): vip = utils.config_get("vip") vip_iface = utils.config_get("vip_iface") vip_cidr = utils.config_get("vip_cidr") corosync_bindiface = utils.config_get("ha-bindiface") corosync_mcastport = utils.config_get("ha-mcastport") if None in [vip, vip_cidr, vip_iface]: utils.juju_log("WARNING", "Insufficient VIP information to configure cluster") sys.exit(1) # Starting configuring resources. init_services = {"res_mysqld": "mysql"} # If the 'ha' relation has been made *before* the 'ceph' relation, # it doesn't make sense to make it until after the 'ceph' relation is made if not utils.is_relation_made("ceph", "auth"): utils.juju_log("INFO", "*ceph* relation does not exist. " "Not sending *ha* relation data yet") return else: utils.juju_log("INFO", "*ceph* relation exists. Sending *ha* relation data") block_storage = "ceph" resources = { "res_mysql_rbd": "ocf:ceph:rbd", "res_mysql_fs": "ocf:heartbeat:Filesystem", "res_mysql_vip": "ocf:heartbeat:IPaddr2", "res_mysqld": "upstart:mysql", } rbd_name = utils.config_get("rbd-name") resource_params = { "res_mysql_rbd": 'params name="%s" pool="%s" user="******" ' 'secret="%s"' % (rbd_name, POOL_NAME, SERVICE_NAME, ceph.keyfile_path(SERVICE_NAME)), "res_mysql_fs": 'params device="/dev/rbd/%s/%s" directory="%s" ' 'fstype="ext4" op start start-delay="10s"' % (POOL_NAME, rbd_name, DATA_SRC_DST), "res_mysql_vip": 'params ip="%s" cidr_netmask="%s" nic="%s"' % (vip, vip_cidr, vip_iface), "res_mysqld": 'op start start-delay="5s" op monitor interval="5s"', } groups = {"grp_mysql": "res_mysql_rbd res_mysql_fs res_mysql_vip res_mysqld"} for rel_id in utils.relation_ids("ha"): utils.relation_set( rid=rel_id, block_storage=block_storage, corosync_bindiface=corosync_bindiface, corosync_mcastport=corosync_mcastport, resources=resources, resource_params=resource_params, init_services=init_services, groups=groups, )
def get_ceph_nodes(): hosts = [] for r_id in utils.relation_ids('ceph'): for unit in utils.relation_list(r_id): ceph_addr = \ utils.relation_get('ceph-public-address', rid=r_id, unit=unit) or \ utils.relation_get('private-address', rid=r_id, unit=unit) hosts.append(ceph_addr) return hosts
def ha_changed(): if not cluster.is_clustered(): return vip = utils.config_get('vip') utils.juju_log( 'INFO', 'ha_changed(): We are now HA clustered. ' 'Advertising our VIP (%s) to all AMQP clients.' % vip) # need to re-authenticate all clients since node-name changed. for rid in utils.relation_ids('amqp'): for unit in utils.relation_list(rid): amqp_changed(relation_id=rid, remote_unit=unit)
def ha_changed(): if not cluster.is_clustered(): return vip = utils.config_get('vip') utils.juju_log('INFO', 'ha_changed(): We are now HA clustered. ' 'Advertising our VIP (%s) to all AMQP clients.' % vip) # need to re-authenticate all clients since node-name changed. for rid in utils.relation_ids('amqp'): for unit in utils.relation_list(rid): amqp_changed(relation_id=rid, remote_unit=unit)
def cluster_with(): vers = rabbit_version() if vers >= '3.0.1-1': cluster_cmd = 'join_cluster' cmd = [RABBITMQ_CTL, 'set_policy HA \'^(?!amq\.).*\' ' '\'{"ha-mode": "all"}\''] subprocess.check_call(cmd) else: cluster_cmd = 'cluster' out = subprocess.check_output([RABBITMQ_CTL, 'cluster_status']) current_host = subprocess.check_output(['hostname']).strip() # check all peers and try to cluster with them available_nodes = [] first_hostname = utils.relation_get('host') available_nodes.append(first_hostname) for r_id in (utils.relation_ids('cluster') or []): for unit in (utils.relation_list(r_id) or []): address = utils.relation_get('private_address', rid=r_id, unit=unit) if address is not None: node = get_hostname(address, fqdn=False) if current_host != node: available_nodes.append(node) # iterate over all the nodes, join to the first available for node in available_nodes: utils.juju_log('INFO', 'Clustering with remote rabbit host (%s).' % node) for line in out.split('\n'): if re.search(node, line): utils.juju_log('INFO', 'Host already clustered with %s.' % node) return try: cmd = [RABBITMQ_CTL, 'stop_app'] subprocess.check_call(cmd) cmd = [RABBITMQ_CTL, cluster_cmd, 'rabbit@%s' % node] subprocess.check_call(cmd) cmd = [RABBITMQ_CTL, 'start_app'] subprocess.check_call(cmd) utils.juju_log('INFO', 'Host clustered with %s.' % node) return except: # continue to the next node pass # error, no nodes available for clustering utils.juju_log('ERROR', 'No nodes available for clustering') sys.exit(1)
def get_ceph_nodes(): hosts = [] for r_id in utils.relation_ids('ceph'): for unit in utils.relation_list(r_id): ceph_addr = \ utils.relation_get('ceph-public-address', rid=r_id, unit=unit) or \ utils.relation_get('private-address', rid=r_id, unit=unit) # We host is an ipv6 address we need to wrap it in [] ceph_addr = format_ipv6_addr(ceph_addr) or ceph_addr hosts.append(ceph_addr) return hosts
def config_changed(): unison.ensure_user(user=SSH_USER, group='keystone') execute("chmod -R g+wrx /var/lib/keystone/") # Determine whether or not we should do an upgrade, based on the # the version offered in keyston-release. available = get_os_codename_install_source(config['openstack-origin']) installed = get_os_codename_package('keystone') if (available and get_os_version_codename(available) > \ get_os_version_codename(installed)): # TODO: fixup this call to work like utils.install() do_openstack_upgrade(config['openstack-origin'], ' '.join(packages)) # Ensure keystone group permissions execute("chmod -R g+wrx /var/lib/keystone/") env_vars = {'OPENSTACK_SERVICE_KEYSTONE': 'keystone', 'OPENSTACK_PORT_ADMIN': cluster.determine_api_port( config['admin-port']), 'OPENSTACK_PORT_PUBLIC': cluster.determine_api_port( config['service-port'])} save_script_rc(**env_vars) set_admin_token(config['admin-token']) if cluster.eligible_leader(CLUSTER_RES): utils.juju_log('INFO', 'Cluster leader - ensuring endpoint configuration' ' is up to date') ensure_initial_admin(config) update_config_block('logger_root', level=config['log-level'], file='/etc/keystone/logging.conf') if get_os_version_package('keystone') >= '2013.1': # PKI introduced in Grizzly configure_pki_tokens(config) if config_dirty(): utils.restart('keystone') if cluster.eligible_leader(CLUSTER_RES): utils.juju_log('INFO', 'Firing identity_changed hook' ' for all related services.') # HTTPS may have been set - so fire all identity relations # again for r_id in utils.relation_ids('identity-service'): for unit in utils.relation_list(r_id): identity_changed(relation_id=r_id, remote_unit=unit)
def ha_relation_changed(): relation_data = utils.relation_get_dict() if ('clustered' in relation_data and cluster.is_leader(CLUSTER_RES)): utils.juju_log('INFO', 'Cluster configured, notifying other services' ' and updating keystone endpoint configuration') # Update keystone endpoint to point at VIP ensure_initial_admin(config) # Tell all related services to start using # the VIP and haproxy ports instead for r_id in utils.relation_ids('identity-service'): utils.relation_set(rid=r_id, auth_host=config['vip'], service_host=config['vip'])
def sync_to_peers(peer_interface, user, paths=[], verbose=False): base_cmd = [ 'unison', '-auto', '-batch=true', '-confirmbigdel=false', '-fastcheck=true', '-group=false', '-owner=false', '-prefer=newer', '-times=true' ] if not verbose: base_cmd.append('-silent') hosts = [] for r_id in (utils.relation_ids(peer_interface) or []): for unit in utils.relation_list(r_id): settings = utils.relation_get_dict(relation_id=r_id, remote_unit=unit) try: authed_hosts = settings['ssh_authorized_hosts'].split(':') except KeyError: print 'unison sync_to_peers: peer has not authorized *any* '\ 'hosts yet.' return unit_hostname = utils.unit_get('private-address') add_host = None for authed_host in authed_hosts: if unit_hostname == authed_host: add_host = settings['private-address'] if add_host: hosts.append(settings['private-address']) else: print 'unison sync_to_peers: peer (%s) has not authorized '\ '*this* host yet, skipping.' %\ settings['private-address'] for path in paths: # removing trailing slash from directory paths, unison # doesn't like these. if path.endswith('/'): path = path[:(len(path) - 1)] for host in hosts: try: cmd = base_cmd + [path, 'ssh://%s@%s/%s' % (user, host, path)] utils.juju_log( 'INFO', 'Syncing local path %s to %s@%s:%s' % (path, user, host, path)) run_as_user(user, cmd) except: # it may fail for permissions on some files pass
def get_cert(): cert = config_get('ssl_cert') key = config_get('ssl_key') if not (cert and key): juju_log('INFO', "Inspecting identity-service relations for SSL certificate.") cert = key = None for r_id in relation_ids('identity-service'): for unit in relation_list(r_id): if not cert: cert = relation_get('ssl_cert', rid=r_id, unit=unit) if not key: key = relation_get('ssl_key', rid=r_id, unit=unit) return (cert, key)
def sync_to_peers(peer_interface, user, paths=[], verbose=False): base_cmd = ['unison', '-auto', '-batch=true', '-confirmbigdel=false', '-fastcheck=true', '-group=false', '-owner=false', '-prefer=newer', '-times=true'] if not verbose: base_cmd.append('-silent') hosts = [] for r_id in (utils.relation_ids(peer_interface) or []): for unit in utils.relation_list(r_id): settings = utils.relation_get_dict(relation_id=r_id, remote_unit=unit) try: authed_hosts = settings['ssh_authorized_hosts'].split(':') except KeyError: print 'unison sync_to_peers: peer has not authorized *any* '\ 'hosts yet.' return unit_hostname = utils.unit_get('private-address') add_host = None for authed_host in authed_hosts: if unit_hostname == authed_host: add_host = settings['private-address'] if add_host: hosts.append(settings['private-address']) else: print 'unison sync_to_peers: peer (%s) has not authorized '\ '*this* host yet, skipping.' %\ settings['private-address'] for path in paths: # removing trailing slash from directory paths, unison # doesn't like these. if path.endswith('/'): path = path[:(len(path) - 1)] for host in hosts: try: cmd = base_cmd + [path, 'ssh://%s@%s/%s' % (user, host, path)] utils.juju_log('INFO', 'Syncing local path %s to %s@%s:%s' % (path, user, host, path)) run_as_user(user, cmd) except: # it may fail for permissions on some files pass
def https(): ''' Determines whether enough data has been provided in configuration or relation data to configure HTTPS . returns: boolean ''' if config_get('use-https') == "yes": return True if config_get('ssl_cert') and config_get('ssl_key'): return True for r_id in relation_ids('identity-service'): for unit in relation_list(r_id): if (relation_get('https_keystone', rid=r_id, unit=unit) and relation_get('ssl_cert', rid=r_id, unit=unit) and relation_get('ssl_key', rid=r_id, unit=unit) and relation_get('ca_cert', rid=r_id, unit=unit)): return True return False
def https(): """ Determines whether enough data has been provided in configuration or relation data to configure HTTPS . returns: boolean """ if config_get("use-https") == "yes": return True if config_get("ssl_cert") and config_get("ssl_key"): return True for r_id in relation_ids("identity-service"): for unit in relation_list(r_id): if ( relation_get("https_keystone", rid=r_id, unit=unit) and relation_get("ssl_cert", rid=r_id, unit=unit) and relation_get("ssl_key", rid=r_id, unit=unit) and relation_get("ca_cert", rid=r_id, unit=unit) ): return True return False
def db_changed(): relation_data = utils.relation_get_dict() if ('password' not in relation_data or 'db_host' not in relation_data): utils.juju_log('INFO', "db_host or password not set. Peer not ready, exit 0") return update_config_block('sql', connection="mysql://%s:%s@%s/%s" % (config["database-user"], relation_data["password"], relation_data["db_host"], config["database"])) if cluster.eligible_leader(CLUSTER_RES): utils.juju_log('INFO', 'Cluster leader, performing db-sync') execute("keystone-manage db_sync", echo=True) if config_dirty(): utils.restart('keystone') time.sleep(5) if cluster.eligible_leader(CLUSTER_RES): ensure_initial_admin(config) # If the backend database has been switched to something new and there # are existing identity-service relations,, service entries need to be # recreated in the new database. Re-executing identity-service-changed # will do this. for rid in utils.relation_ids('identity-service'): for unit in utils.relation_list(rid=rid): utils.juju_log('INFO', "Re-exec'ing identity-service-changed" " for: %s - %s" % (rid, unit)) identity_changed(relation_id=rid, remote_unit=unit)
def ha_joined(): corosync_bindiface = utils.config_get('ha-bindiface') corosync_mcastport = utils.config_get('ha-mcastport') vip = utils.config_get('vip') vip_iface = utils.config_get('vip_iface') vip_cidr = utils.config_get('vip_cidr') rbd_name = utils.config_get('rbd-name') if None in [corosync_bindiface, corosync_mcastport, vip, vip_iface, vip_cidr, rbd_name]: utils.juju_log('ERROR', 'Insufficient configuration data to ' 'configure hacluster.') sys.exit(1) if not utils.is_relation_made('ceph', 'auth'): utils.juju_log('INFO', 'ha_joined: No ceph relation yet, deferring.') return name = '%s@localhost' % SERVICE_NAME if rabbit.get_node_name() != name: utils.juju_log('INFO', 'Stopping rabbitmq-server.') utils.stop('rabbitmq-server') rabbit.set_node_name('%s@localhost' % SERVICE_NAME) else: utils.juju_log('INFO', 'Node name already set to %s.' % name) relation_settings = {} relation_settings['corosync_bindiface'] = corosync_bindiface relation_settings['corosync_mcastport'] = corosync_mcastport relation_settings['resources'] = { 'res_rabbitmq_rbd': 'ocf:ceph:rbd', 'res_rabbitmq_fs': 'ocf:heartbeat:Filesystem', 'res_rabbitmq_vip': 'ocf:heartbeat:IPaddr2', 'res_rabbitmq-server': 'lsb:rabbitmq-server', } relation_settings['resource_params'] = { 'res_rabbitmq_rbd': 'params name="%s" pool="%s" user="******" ' 'secret="%s"' % (rbd_name, POOL_NAME, SERVICE_NAME, ceph.keyfile_path(SERVICE_NAME)), 'res_rabbitmq_fs': 'params device="/dev/rbd/%s/%s" directory="%s" ' 'fstype="ext4" op start start-delay="10s"' % (POOL_NAME, rbd_name, RABBIT_DIR), 'res_rabbitmq_vip': 'params ip="%s" cidr_netmask="%s" nic="%s"' % (vip, vip_cidr, vip_iface), 'res_rabbitmq-server': 'op start start-delay="5s" ' 'op monitor interval="5s"', } relation_settings['groups'] = { 'grp_rabbitmq': 'res_rabbitmq_rbd res_rabbitmq_fs res_rabbitmq_vip ' 'res_rabbitmq-server', } for rel_id in utils.relation_ids('ha'): utils.relation_set(rid=rel_id, **relation_settings) env_vars = { 'OPENSTACK_PORT_EPMD': 4369, 'OPENSTACK_PORT_MCASTPORT': utils.config_get('ha-mcastport'), } openstack.save_script_rc(**env_vars)
def peer_units(): peers = [] for r_id in (relation_ids('cluster') or []): for unit in (relation_list(r_id) or []): peers.append(unit) return peers
def do_openstack_upgrade(install_src, packages): '''Upgrade packages from a given install src.''' config = config_get() old_vers = get_os_codename_package('keystone') new_vers = get_os_codename_install_source(install_src) utils.juju_log('INFO', "Beginning Keystone upgrade: %s -> %s" % \ (old_vers, new_vers)) # Backup previous config. utils.juju_log('INFO', "Backing up contents of /etc/keystone.") stamp = time.strftime('%Y%m%d%H%M') cmd = 'tar -pcf /var/lib/juju/keystone-backup-%s.tar /etc/keystone' % stamp execute(cmd, die=True, echo=True) configure_installation_source(install_src) execute('apt-get update', die=True, echo=True) os.environ['DEBIAN_FRONTEND'] = 'noninteractive' cmd = 'apt-get --option Dpkg::Options::=--force-confnew -y '\ 'install %s' % packages execute(cmd, echo=True, die=True) # we have new, fresh config files that need updating. # set the admin token, which is still stored in config. set_admin_token(config['admin-token']) # set the sql connection string if a shared-db relation is found. ids = utils.relation_ids('shared-db') if ids: for rid in ids: for unit in utils.relation_list(rid): utils.juju_log('INFO', 'Configuring new keystone.conf for ' 'database access on existing database' ' relation to %s' % unit) relation_data = utils.relation_get_dict(relation_id=rid, remote_unit=unit) update_config_block('sql', connection="mysql://%s:%s@%s/%s" % (config["database-user"], relation_data["password"], relation_data["private-address"], config["database"])) utils.stop('keystone') if (cluster.eligible_leader(CLUSTER_RES)): utils.juju_log('INFO', 'Running database migrations for %s' % new_vers) execute('keystone-manage db_sync', echo=True, die=True) else: utils.juju_log('INFO', 'Not cluster leader; snoozing whilst' ' leader upgrades DB') time.sleep(10) utils.start('keystone') time.sleep(5) utils.juju_log('INFO', 'Completed Keystone upgrade: ' '%s -> %s' % (old_vers, new_vers))
def peer_units(): peers = [] for r_id in relation_ids("cluster") or []: for unit in relation_list(r_id) or []: peers.append(unit) return peers
def ha_relation_joined(): vip = utils.config_get('vip') vip_iface = utils.config_get('vip_iface') vip_cidr = utils.config_get('vip_cidr') corosync_bindiface = utils.config_get('ha-bindiface') corosync_mcastport = utils.config_get('ha-mcastport') if None in [vip, vip_cidr, vip_iface]: utils.juju_log('WARNING', 'Insufficient VIP information to configure cluster') sys.exit(1) # Starting configuring resources. init_services = { 'res_mysqld': 'mysql', } # If the 'ha' relation has been made *before* the 'ceph' relation, # it doesn't make sense to make it until after the 'ceph' relation is made if not utils.is_relation_made('ceph', 'auth'): utils.juju_log( 'INFO', '*ceph* relation does not exist. ' 'Not sending *ha* relation data yet') return else: utils.juju_log('INFO', '*ceph* relation exists. Sending *ha* relation data') block_storage = 'ceph' resources = { 'res_mysql_rbd': 'ocf:ceph:rbd', 'res_mysql_fs': 'ocf:heartbeat:Filesystem', 'res_mysql_vip': 'ocf:heartbeat:IPaddr2', 'res_mysqld': 'upstart:mysql', } rbd_name = utils.config_get('rbd-name') resource_params = { 'res_mysql_rbd': 'params name="%s" pool="%s" user="******" ' 'secret="%s"' % \ (rbd_name, POOL_NAME, SERVICE_NAME, ceph.keyfile_path(SERVICE_NAME)), 'res_mysql_fs': 'params device="/dev/rbd/%s/%s" directory="%s" ' 'fstype="ext4" op start start-delay="10s"' % \ (POOL_NAME, rbd_name, DATA_SRC_DST), 'res_mysql_vip': 'params ip="%s" cidr_netmask="%s" nic="%s"' % \ (vip, vip_cidr, vip_iface), 'res_mysqld': 'op start start-delay="5s" op monitor interval="5s"', } groups = { 'grp_mysql': 'res_mysql_rbd res_mysql_fs res_mysql_vip res_mysqld', } for rel_id in utils.relation_ids('ha'): utils.relation_set(rid=rel_id, block_storage=block_storage, corosync_bindiface=corosync_bindiface, corosync_mcastport=corosync_mcastport, resources=resources, resource_params=resource_params, init_services=init_services, groups=groups)
def ha_joined(): corosync_bindiface = utils.config_get('ha-bindiface') corosync_mcastport = utils.config_get('ha-mcastport') vip = utils.config_get('vip') vip_iface = utils.config_get('vip_iface') vip_cidr = utils.config_get('vip_cidr') rbd_name = utils.config_get('rbd-name') if None in [ corosync_bindiface, corosync_mcastport, vip, vip_iface, vip_cidr, rbd_name ]: utils.juju_log( 'ERROR', 'Insufficient configuration data to ' 'configure hacluster.') sys.exit(1) if not utils.is_relation_made('ceph', 'auth'): utils.juju_log('INFO', 'ha_joined: No ceph relation yet, deferring.') return name = '%s@localhost' % SERVICE_NAME if rabbit.get_node_name() != name: utils.juju_log('INFO', 'Stopping rabbitmq-server.') utils.stop('rabbitmq-server') rabbit.set_node_name('%s@localhost' % SERVICE_NAME) else: utils.juju_log('INFO', 'Node name already set to %s.' % name) relation_settings = {} relation_settings['corosync_bindiface'] = corosync_bindiface relation_settings['corosync_mcastport'] = corosync_mcastport relation_settings['resources'] = { 'res_rabbitmq_rbd': 'ocf:ceph:rbd', 'res_rabbitmq_fs': 'ocf:heartbeat:Filesystem', 'res_rabbitmq_vip': 'ocf:heartbeat:IPaddr2', 'res_rabbitmq-server': 'lsb:rabbitmq-server', } relation_settings['resource_params'] = { 'res_rabbitmq_rbd': 'params name="%s" pool="%s" user="******" ' 'secret="%s"' % (rbd_name, POOL_NAME, SERVICE_NAME, ceph.keyfile_path(SERVICE_NAME)), 'res_rabbitmq_fs': 'params device="/dev/rbd/%s/%s" directory="%s" ' 'fstype="ext4" op start start-delay="10s"' % (POOL_NAME, rbd_name, RABBIT_DIR), 'res_rabbitmq_vip': 'params ip="%s" cidr_netmask="%s" nic="%s"' % (vip, vip_cidr, vip_iface), 'res_rabbitmq-server': 'op start start-delay="5s" ' 'op monitor interval="5s"', } relation_settings['groups'] = { 'grp_rabbitmq': 'res_rabbitmq_rbd res_rabbitmq_fs res_rabbitmq_vip ' 'res_rabbitmq-server', } for rel_id in utils.relation_ids('ha'): utils.relation_set(rid=rel_id, **relation_settings) env_vars = { 'OPENSTACK_PORT_EPMD': 4369, 'OPENSTACK_PORT_MCASTPORT': utils.config_get('ha-mcastport'), } openstack.save_script_rc(**env_vars)