コード例 #1
0
ファイル: ceph_hooks.py プロジェクト: marosg42/charm-ceph-mon
def assess_status():
    '''Assess status of current unit'''
    application_version_set(get_upstream_version(VERSION_PACKAGE))
    if is_unit_upgrading_set():
        status_set(
            "blocked", "Ready for do-release-upgrade and reboot. "
            "Set complete when finished.")
        return

    # Check that the no-bootstrap config option is set in conjunction with
    # having the bootstrap-source relation established
    if not config('no-bootstrap') and is_relation_made('bootstrap-source'):
        status_set(
            'blocked', 'Cannot join the bootstrap-source relation when '
            'no-bootstrap is False')
        return

    moncount = int(config('monitor-count'))
    units = get_peer_units()
    # not enough peers and mon_count > 1
    if len(units.keys()) < moncount:
        status_set(
            'blocked', 'Insufficient peer units to bootstrap'
            ' cluster (require {})'.format(moncount))
        return

    # mon_count > 1, peers, but no ceph-public-address
    ready = sum(1 for unit_ready in units.values() if unit_ready)
    if ready < moncount:
        status_set('waiting', 'Peer units detected, waiting for addresses')
        return

    configured_rbd_features = config('default-rbd-features')
    if has_rbd_mirrors() and configured_rbd_features:
        if add_rbd_mirror_features(
                configured_rbd_features) != configured_rbd_features:
            # The configured RBD features bitmap does not contain the features
            # required for RBD Mirroring
            status_set(
                'blocked', 'Configuration mismatch: RBD Mirroring '
                'enabled but incorrect value set for '
                '``default-rbd-features``')
            return

    # active - bootstrapped + quorum status check
    if ceph.is_bootstrapped() and ceph.is_quorum():
        expected_osd_count = config('expected-osd-count') or 3
        if sufficient_osds(expected_osd_count):
            status_set('active', 'Unit is ready and clustered')
        else:
            status_set(
                'waiting', 'Monitor bootstrapped but waiting for number of'
                ' OSDs to reach expected-osd-count ({})'.format(
                    expected_osd_count))
    else:
        # Unit should be running and clustered, but no quorum
        # TODO: should this be blocked or waiting?
        status_set('blocked', 'Unit not clustered (no quorum)')
コード例 #2
0
ファイル: utils.py プロジェクト: iac-projects/charm-hacluster
def assess_status_helper():
    """Assess status of unit

    @returns status, message - status is workload status and message is any
                               corresponding messages
    """
    if config('stonith_enabled') in ['true', 'True', True]:
        return ('blocked',
                'stonith_enabled config option is no longer supported')

    if is_unit_upgrading_set():
        return ("blocked",
                "Ready for do-release-upgrade. Set complete when finished")
    if is_waiting_unit_series_upgrade_set():
        return ("blocked",
                "HA services shutdown, peers are ready for series upgrade")
    if is_unit_paused_set():
        return ("maintenance",
                "Paused. Use 'resume' action to resume normal service.")

    node_count = int(config('cluster_count'))
    status = 'active'
    message = 'Unit is ready and clustered'
    try:
        try_pcmk_wait()
    except pcmk.ServicesNotUp:
        message = 'Pacemaker is down'
        status = 'blocked'
    for relid in relation_ids('hanode'):
        if len(related_units(relid)) + 1 < node_count:
            status = 'blocked'
            message = ("Insufficient peer units for ha cluster "
                       "(require {})".format(node_count))

    # if the status was not changed earlier, we verify the maintenance status
    try:
        if status == 'active':
            prop = pcmk.get_property('maintenance-mode').strip()
    except pcmk.PropertyNotFound:
        # the property is not the output of 'crm configure show xml', so we use
        # the default value for this property. For crmsh>=2.2.0 the default
        # value is automatically provided by show-property or get-property.
        prop = 'false'

    if (status == 'active' and prop == 'true'):
        # maintenance mode enabled in pacemaker
        status = 'maintenance'
        message = 'Pacemaker in maintenance mode'

    for resource in get_resources().keys():
        if not pcmk.is_resource_present(resource):
            return ("waiting",
                    "Resource: {} not yet configured".format(resource))
        if not pcmk.crm_res_running_on_node(resource, get_hostname()):
            return ("blocked", "Resource: {} not running".format(resource))

    return status, message
コード例 #3
0
def assess_status():
    """Assess status of current unit"""
    # check to see if the unit is paused.
    application_version_set(get_upstream_version(VERSION_PACKAGE))
    if is_unit_upgrading_set():
        status_set("blocked",
                   "Ready for do-release-upgrade and reboot. "
                   "Set complete when finished.")
        return
    if is_unit_paused_set():
        status_set('maintenance',
                   "Paused. Use 'resume' action to resume normal service.")
        return
    # Check for mon relation
    if len(relation_ids('mon')) < 1:
        status_set('blocked', 'Missing relation: monitor')
        return

    # Check for monitors with presented addresses
    # Check for bootstrap key presentation
    monitors = get_mon_hosts()
    if len(monitors) < 1 or not get_conf('osd_bootstrap_key'):
        status_set('waiting', 'Incomplete relation: monitor')
        return

    # Check for vault
    if use_vaultlocker():
        if not relation_ids('secrets-storage'):
            status_set('blocked', 'Missing relation: vault')
            return
        if not vaultlocker.vault_relation_complete():
            status_set('waiting', 'Incomplete relation: vault')
            return

    # Check for OSD device creation parity i.e. at least some devices
    # must have been presented and used for this charm to be operational
    (prev_status, prev_message) = status_get()
    running_osds = ceph.get_running_osds()
    if not prev_message.startswith('Non-pristine'):
        if not running_osds:
            status_set('blocked',
                       'No block devices detected using current configuration')
        else:
            status_set('active',
                       'Unit is ready ({} OSD)'.format(len(running_osds)))
    else:
        pristine = True
        osd_journals = get_journal_devices()
        for dev in list(set(ceph.unmounted_disks()) - set(osd_journals)):
            if (not ceph.is_active_bluestore_device(dev) and
                    not ceph.is_pristine_disk(dev)):
                pristine = False
                break
        if pristine:
            status_set('active',
                       'Unit is ready ({} OSD)'.format(len(running_osds)))
コード例 #4
0
ファイル: lxd_utils.py プロジェクト: openstack/charm-lxd
def assess_status():
    '''Determine status of current unit'''
    if is_unit_upgrading_set():
        status_set('blocked',
                   'Ready for do-release-upgrade and reboot. '
                   'Set complete when finished.')
    elif lxd_running():
        status_set('active', 'Unit is ready')
    else:
        status_set('blocked', 'LXD is not running')
    application_version_set(get_upstream_version(VERSION_PACKAGE))
コード例 #5
0
 def assess_status(self):
     """Determine the current application status for the charm"""
     hookenv.application_version_set(self.application_version)
     if not self.configuration_complete():
         hookenv.status_set('blocked', 'LDAP configuration incomplete')
     elif os_utils.is_unit_upgrading_set():
         hookenv.status_set(
             'blocked', 'Ready for do-release-upgrade and reboot. '
             'Set complete when finished.')
     else:
         hookenv.status_set('active', 'Unit is ready')
コード例 #6
0
def assess_status():
    '''Determine status of current unit'''
    if is_unit_upgrading_set():
        status_set(
            'blocked', 'Ready for do-release-upgrade and reboot. '
            'Set complete when finished.')
    elif lxd_running():
        status_set('active', 'Unit is ready')
    else:
        status_set('blocked', 'LXD is not running')
    application_version_set(get_upstream_version(VERSION_PACKAGE))
コード例 #7
0
def assess_status():
    '''Assess status of current unit'''
    if is_unit_upgrading_set():
        status_set("blocked",
                   "Ready for do-release-upgrade and reboot. "
                   "Set complete when finished.")
        return

    if ready():
        status_set('active', 'Ready to proxy settings')
    else:
        status_set('blocked', 'Ensure FSID and admin-key are set')
コード例 #8
0
ファイル: ceph_hooks.py プロジェクト: aasbin/charm-ceph-osd
def assess_status():
    """Assess status of current unit"""
    # check to see if the unit is paused.
    application_version_set(get_upstream_version(VERSION_PACKAGE))
    if is_unit_upgrading_set():
        status_set(
            "blocked", "Ready for do-release-upgrade and reboot. "
            "Set complete when finished.")
        return
    if is_unit_paused_set():
        status_set('maintenance',
                   "Paused. Use 'resume' action to resume normal service.")
        return
    # Check for mon relation
    if len(relation_ids('mon')) < 1:
        status_set('blocked', 'Missing relation: monitor')
        return

    # Check for monitors with presented addresses
    # Check for bootstrap key presentation
    monitors = get_mon_hosts()
    if len(monitors) < 1 or not get_conf('osd_bootstrap_key'):
        status_set('waiting', 'Incomplete relation: monitor')
        return

    # Check for vault
    if use_vaultlocker():
        if not relation_ids('secrets-storage'):
            status_set('blocked', 'Missing relation: vault')
            return
        if not vaultlocker.vault_relation_complete():
            status_set('waiting', 'Incomplete relation: vault')
            return

    # Check for OSD device creation parity i.e. at least some devices
    # must have been presented and used for this charm to be operational
    (prev_status, prev_message) = status_get()
    running_osds = ceph.get_running_osds()
    if not prev_message.startswith('Non-pristine'):
        if not running_osds:
            status_set(
                'blocked',
                'No block devices detected using current configuration')
        else:
            status_set('active',
                       'Unit is ready ({} OSD)'.format(len(running_osds)))
コード例 #9
0
def charm_check_func():
    """Custom function to assess the status of the current unit

    @returns (status, message) - tuple of strings if an issue
    """
    if is_unit_upgrading_set():
        # Avoid looping through attempting to determine cluster_in_sync
        return ("blocked", "Unit upgrading.")

    @retry_on_exception(num_retries=10,
                        base_delay=2,
                        exc_type=DesyncedException)
    def _cluster_in_sync():
        '''Helper func to wait for a while for resync to occur

        @raise DesynedException: raised if local unit is not in sync
                                 with its peers
        '''
        if not cluster_in_sync():
            raise DesyncedException()

    min_size = config('min-cluster-size')
    # Ensure that number of peers > cluster size configuration
    if not is_sufficient_peers():
        return ('blocked', 'Insufficient peers to bootstrap cluster')

    if min_size and int(min_size) > 1:
        # Once running, ensure that cluster is in sync
        # and has the required peers
        if not is_bootstrapped():
            return ('waiting', 'Unit waiting for cluster bootstrap')
        elif cluster_ready():
            try:
                _cluster_in_sync()
                return ('active', 'Unit is ready and clustered')
            except DesyncedException:
                return ('blocked', 'Unit is not in sync')
        else:
            return ('waiting', 'Unit waiting on hacluster relation')
    else:
        if seeded():
            return ('active', 'Unit is ready')
        else:
            return ('waiting', 'Unit waiting to bootstrap')
コード例 #10
0
ファイル: utils.py プロジェクト: gnuoy/charm-hacluster
def assess_status_helper():
    """Assess status of unit

    @returns status, message - status is workload status and message is any
                               corresponding messages
    """

    if is_unit_upgrading_set():
        return ("blocked",
                "Ready for do-release-upgrde. Set complete when finished")
    if is_unit_paused_set():
        return ("maintenance",
                "Paused. Use 'resume' action to resume normal service.")

    node_count = int(config('cluster_count'))
    status = 'active'
    message = 'Unit is ready and clustered'
    try:
        try_pcmk_wait()
    except pcmk.ServicesNotUp:
        message = 'Pacemaker is down'
        status = 'blocked'
    for relid in relation_ids('hanode'):
        if len(related_units(relid)) + 1 < node_count:
            status = 'blocked'
            message = ("Insufficient peer units for ha cluster "
                       "(require {})".format(node_count))

    # if the status was not changed earlier, we verify the maintenance status
    try:
        if status == 'active':
            prop = pcmk.get_property('maintenance-mode').strip()
    except pcmk.PropertyNotFound:
        # the property is not the output of 'crm configure show xml', so we use
        # the default value for this property. For crmsh>=2.2.0 the default
        # value is automatically provided by show-property or get-property.
        prop = 'false'

    if (status == 'active' and prop == 'true'):
        # maintenance mode enabled in pacemaker
        status = 'maintenance'
        message = 'Pacemaker in maintenance mode'

    return status, message
コード例 #11
0
def assess_status():
    '''Assess status of current unit'''
    application_version_set(get_upstream_version(VERSION_PACKAGE))
    if is_unit_upgrading_set():
        status_set(
            "blocked", "Ready for do-release-upgrade and reboot. "
            "Set complete when finished.")
        return

    # Check that the no-bootstrap config option is set in conjunction with
    # having the bootstrap-source relation established
    if not config('no-bootstrap') and is_relation_made('bootstrap-source'):
        status_set(
            'blocked', 'Cannot join the bootstrap-source relation when '
            'no-bootstrap is False')
        return

    moncount = int(config('monitor-count'))
    units = get_peer_units()
    # not enough peers and mon_count > 1
    if len(units.keys()) < moncount:
        status_set(
            'blocked', 'Insufficient peer units to bootstrap'
            ' cluster (require {})'.format(moncount))
        return

    # mon_count > 1, peers, but no ceph-public-address
    ready = sum(1 for unit_ready in units.values() if unit_ready)
    if ready < moncount:
        status_set('waiting', 'Peer units detected, waiting for addresses')
        return

    # active - bootstrapped + quorum status check
    if ceph.is_bootstrapped() and ceph.is_quorum():
        status_set('active', 'Unit is ready and clustered')
    else:
        # Unit should be running and clustered, but no quorum
        # TODO: should this be blocked or waiting?
        status_set('blocked', 'Unit not clustered (no quorum)')
コード例 #12
0
ファイル: utils.py プロジェクト: iac-projects/charm-hacluster
def pause_unit():
    """Pause services on this unit and update the units status

    @returns None
    """
    node_name = get_hostname()
    messages = []
    enter_standby_mode(node_name)
    if not is_in_standby_mode(node_name):
        messages.append("Node not in standby mode")

    # some resources may take some time to be migrated out from the node. So 3
    # retries are made with a 5 seconds wait between each one.
    i = 0
    ready = False
    has_resources = False
    while i < PCMKR_MAX_RETRIES and not ready:
        if node_has_resources(node_name):
            has_resources = True
            i += 1
            time.sleep(PCMKR_SLEEP_SECS)
        else:
            ready = True
            has_resources = False

    if has_resources:
        messages.append("Resources still running on unit")
    status, message = assess_status_helper()
    # New status message will indicate the resource is not running
    if status != 'active' and 'not running' not in message:
        messages.append(message)
    if messages and not is_unit_upgrading_set():
        raise Exception("Couldn't pause: {}".format("; ".join(messages)))
    else:
        set_unit_paused()
        status_set("maintenance",
                   "Paused. Use 'resume' action to resume normal service.")
コード例 #13
0
def upgrade():

    if is_leader():
        if is_unit_paused_set() or is_unit_upgrading_set():
            log('Unit is paused, skiping upgrade', level=INFO)
            return

        # Leader sets on upgrade
        leader_set(**{'leader-ip': get_relation_ip('cluster')})
        configure_sstuser(sst_password())
        if not leader_get('root-password') and leader_get('mysql.passwd'):
            leader_set(**{'root-password': leader_get('mysql.passwd')})

        # On upgrade-charm we assume the cluster was complete at some point
        kvstore = kv()
        initial_clustered = kvstore.get(INITIAL_CLUSTERED_KEY, False)
        if not initial_clustered:
            kvstore.set(key=INITIAL_CLUSTERED_KEY, value=True)
            kvstore.flush()

        # broadcast the bootstrap-uuid
        wsrep_ready = get_wsrep_value('wsrep_ready') or ""
        if wsrep_ready.lower() in ['on', 'ready']:
            cluster_state_uuid = get_wsrep_value('wsrep_cluster_state_uuid')
            if cluster_state_uuid:
                mark_seeded()
                notify_bootstrapped(cluster_uuid=cluster_state_uuid)
    else:
        # Ensure all the peers have the bootstrap-uuid attribute set
        # as this is all happening during the upgrade-charm hook is reasonable
        # to expect the cluster is running.

        # Wait until the leader has set the
        try:
            update_bootstrap_uuid()
        except LeaderNoBootstrapUUIDError:
            status_set('waiting', "Waiting for bootstrap-uuid set by leader")
コード例 #14
0
def upgrade():

    if is_leader():
        if is_unit_paused_set() or is_unit_upgrading_set():
            log('Unit is paused, skiping upgrade', level=INFO)
            return

        # Leader sets on upgrade
        leader_set(**{'leader-ip': get_relation_ip('cluster')})
        configure_sstuser(sst_password())
        if not leader_get('root-password') and leader_get('mysql.passwd'):
            leader_set(**{'root-password': leader_get('mysql.passwd')})

        # On upgrade-charm we assume the cluster was complete at some point
        kvstore = kv()
        initial_clustered = kvstore.get(INITIAL_CLUSTERED_KEY, False)
        if not initial_clustered:
            kvstore.set(key=INITIAL_CLUSTERED_KEY, value=True)
            kvstore.flush()

        # broadcast the bootstrap-uuid
        wsrep_ready = get_wsrep_value('wsrep_ready') or ""
        if wsrep_ready.lower() in ['on', 'ready']:
            cluster_state_uuid = get_wsrep_value('wsrep_cluster_state_uuid')
            if cluster_state_uuid:
                mark_seeded()
                notify_bootstrapped(cluster_uuid=cluster_state_uuid)
    else:
        # Ensure all the peers have the bootstrap-uuid attribute set
        # as this is all happening during the upgrade-charm hook is reasonable
        # to expect the cluster is running.

        # Wait until the leader has set the
        try:
            update_bootstrap_uuid()
        except LeaderNoBootstrapUUIDError:
            status_set('waiting', "Waiting for bootstrap-uuid set by leader")
コード例 #15
0
ファイル: utils.py プロジェクト: gnuoy/charm-hacluster
def pause_unit():
    """Pause services on this unit and update the units status

    @returns None
    """
    node_name = get_hostname()
    messages = []
    enter_standby_mode(node_name)
    if not is_in_standby_mode(node_name):
        messages.append("Node not in standby mode")

    # some resources may take some time to be migrated out from the node. So 3
    # retries are made with a 5 seconds wait between each one.
    i = 0
    ready = False
    has_resources = False
    while i < PCMKR_MAX_RETRIES and not ready:
        if node_has_resources(node_name):
            has_resources = True
            i += 1
            time.sleep(PCMKR_SLEEP_SECS)
        else:
            ready = True
            has_resources = False

    if has_resources:
        messages.append("Resources still running on unit")
    status, message = assess_status_helper()
    if status != 'active':
        messages.append(message)
    if messages and not is_unit_upgrading_set():
        raise Exception("Couldn't pause: {}".format("; ".join(messages)))
    else:
        set_unit_paused()
        status_set("maintenance",
                   "Paused. Use 'resume' action to resume normal service.")
コード例 #16
0
def assess_status():
    """Assess status of current unit"""
    # check to see if the unit is paused.
    application_version_set(get_upstream_version(VERSION_PACKAGE))
    if is_unit_upgrading_set():
        status_set(
            "blocked", "Ready for do-release-upgrade and reboot. "
            "Set complete when finished.")
        return
    if is_unit_paused_set():
        status_set('maintenance',
                   "Paused. Use 'resume' action to resume normal service.")
        return
    # Check for mon relation
    if len(relation_ids('mon')) < 1:
        status_set('blocked', 'Missing relation: monitor')
        return

    # Check for monitors with presented addresses
    # Check for bootstrap key presentation
    monitors = get_mon_hosts()
    if len(monitors) < 1 or not get_conf('osd_bootstrap_key'):
        status_set('waiting', 'Incomplete relation: monitor')
        return

    # Check for vault
    if use_vaultlocker():
        if not relation_ids('secrets-storage'):
            status_set('blocked', 'Missing relation: vault')
            return
        try:
            if not vaultlocker.vault_relation_complete():
                status_set('waiting', 'Incomplete relation: vault')
                return
        except Exception as e:
            status_set('blocked', "Warning: couldn't verify vault relation")
            log("Exception when verifying vault relation - maybe it was "
                "offline?:\n{}".format(str(e)))
            log("Traceback: {}".format(traceback.format_exc()))

    # Check for OSD device creation parity i.e. at least some devices
    # must have been presented and used for this charm to be operational
    (prev_status, prev_message) = status_get()
    running_osds = ceph.get_running_osds()
    if not prev_message.startswith('Non-pristine'):
        if not running_osds:
            status_set(
                'blocked',
                'No block devices detected using current configuration')
        else:
            status_set('active',
                       'Unit is ready ({} OSD)'.format(len(running_osds)))
    else:
        pristine = True
        osd_journals = get_journal_devices()
        for dev in list(set(ceph.unmounted_disks()) - set(osd_journals)):
            if (not ceph.is_active_bluestore_device(dev)
                    and not ceph.is_pristine_disk(dev)):
                pristine = False
                break
        if pristine:
            status_set('active',
                       'Unit is ready ({} OSD)'.format(len(running_osds)))

    try:
        get_bdev_enable_discard()
    except ValueError as ex:
        status_set('blocked', str(ex))

    try:
        bluestore_compression = ch_context.CephBlueStoreCompressionContext()
        bluestore_compression.validate()
    except ValueError as e:
        status_set('blocked', 'Invalid configuration: {}'.format(str(e)))
コード例 #17
0
def config_changed():

    # if we are paused or upgrading, delay doing any config changed hooks.
    # It is forced on the resume.
    if is_unit_paused_set() or is_unit_upgrading_set():
        log("Unit is paused or upgrading. Skipping config_changed", "WARN")
        return

    # It is critical that the installation is attempted first before any
    # rendering of the configuration files occurs.
    # install_percona_xtradb_cluster has the code to decide if this is the
    # leader or if the leader is bootstrapped and therefore ready for install.
    install_percona_xtradb_cluster()

    if config('prefer-ipv6'):
        assert_charm_supports_ipv6()

    hosts = get_cluster_hosts()
    leader_bootstrapped = is_leader_bootstrapped()
    leader_ip = leader_get('leader-ip')

    # Cluster upgrade adds some complication
    cluster_series_upgrading = leader_get("cluster_series_upgrading")
    if cluster_series_upgrading:
        leader = (leader_get('cluster_series_upgrade_leader') ==
                  get_relation_ip('cluster'))
        leader_ip = leader_get('cluster_series_upgrade_leader')
    else:
        leader = is_leader()
        leader_ip = leader_get('leader-ip')

    # (re)install pcmkr agent
    install_mysql_ocf()

    if leader:
        # If the cluster has not been fully bootstrapped once yet, use an empty
        # hosts list to avoid restarting the leader node's mysqld during
        # cluster buildup.
        # After, the cluster has bootstrapped at least one time, it is much
        # less likely to have restart collisions. It is then safe to use the
        # full hosts list and have the leader node's mysqld restart.
        # Empty hosts if cluster_series_upgrading
        if not clustered_once() or cluster_series_upgrading:
            hosts = []
        log(
            "Leader unit - bootstrap required={}".format(
                not leader_bootstrapped), DEBUG)
        render_config_restart_on_changed(hosts,
                                         bootstrap=not leader_bootstrapped)
    elif (leader_bootstrapped and is_sufficient_peers()
          and not cluster_series_upgrading):
        # Skip if cluster_series_upgrading
        # Speed up cluster process by bootstrapping when the leader has
        # bootstrapped if we have expected number of peers
        # However, in a cold boot scenario do not add the "old" leader
        # when it matches this host.
        if (leader_ip not in hosts and leader_ip != get_cluster_host_ip()):
            # Fix Bug #1738896
            hosts = [leader_ip] + hosts
        log("Leader is bootstrapped - configuring mysql on this node", DEBUG)
        # Rendering the mysqld.cnf and restarting is bootstrapping for a
        # non-leader node.
        render_config_restart_on_changed(hosts)
        # Assert we are bootstrapped. This will throw an
        # InconsistentUUIDError exception if UUIDs do not match.
        update_bootstrap_uuid()
    else:
        # Until the bootstrap-uuid attribute is set by the leader,
        # cluster_ready() will evaluate to False. So it is necessary to
        # feed this information to the user.
        status_set('waiting', "Waiting for bootstrap-uuid set by leader")
        log('Non-leader waiting on leader bootstrap, skipping render', DEBUG)
        return

    # Notify any changes to the access network
    update_client_db_relations()

    for rid in relation_ids('ha'):
        # make sure all the HA resources are (re)created
        ha_relation_joined(relation_id=rid)

    if is_relation_made('nrpe-external-master'):
        update_nrpe_config()

    open_port(DEFAULT_MYSQL_PORT)

    # the password needs to be updated only if the node was already
    # bootstrapped
    if is_bootstrapped():
        if is_leader():
            update_root_password()
        set_ready_on_peers()

    # NOTE(tkurek): re-set 'master' relation data
    if relation_ids('master'):
        master_joined()
コード例 #18
0
def render_config(hosts=None):
    if hosts is None:
        hosts = []

    config_file = resolve_cnf_file()
    if not os.path.exists(os.path.dirname(config_file)):
        os.makedirs(os.path.dirname(config_file))

    context = {
        'cluster_name': 'juju_cluster',
        'private_address': get_cluster_host_ip(),
        'cluster_hosts': ",".join(hosts),
        'sst_method': config('sst-method'),
        'sst_password': sst_password(),
        'innodb_file_per_table': config('innodb-file-per-table'),
        'table_open_cache': config('table-open-cache'),
        'binlogs_path': config('binlogs-path'),
        'enable_binlogs': config('enable-binlogs'),
        'binlogs_max_size': config('binlogs-max-size'),
        'binlogs_expire_days': config('binlogs-expire-days'),
        'performance_schema': config('performance-schema'),
        'is_leader': is_leader(),
        'server_id': get_server_id(),
        'series_upgrade': is_unit_upgrading_set(),
    }

    if config('prefer-ipv6'):
        # NOTE(hopem): this is a kludge to get percona working with ipv6.
        # See lp 1380747 for more info. This is intended as a stop gap until
        # percona package is fixed to support ipv6.
        context['bind_address'] = '::'
        context['ipv6'] = True
    else:
        context['ipv6'] = False

    wsrep_provider_options = get_wsrep_provider_options()
    if wsrep_provider_options:
        context['wsrep_provider_options'] = wsrep_provider_options

    if config('wsrep-slave-threads') is not None:
        context['wsrep_slave_threads'] = config('wsrep-slave-threads')

    if CompareHostReleases(lsb_release()['DISTRIB_CODENAME']) < 'bionic':
        # myisam_recover is not valid for PXC 5.7 (introduced in Bionic) so we
        # only set it for PXC 5.6.
        context['myisam_recover'] = 'BACKUP'
        context['wsrep_provider'] = '/usr/lib/libgalera_smm.so'
        if 'wsrep_slave_threads' not in context:
            context['wsrep_slave_threads'] = 1
    elif CompareHostReleases(lsb_release()['DISTRIB_CODENAME']) >= 'bionic':
        context['wsrep_provider'] = '/usr/lib/galera3/libgalera_smm.so'
        context['default_storage_engine'] = 'InnoDB'
        context['wsrep_log_conflicts'] = True
        context['innodb_autoinc_lock_mode'] = '2'
        context['pxc_strict_mode'] = config('pxc-strict-mode')
        if 'wsrep_slave_threads' not in context:
            context['wsrep_slave_threads'] = 48

    if config('databases-to-replicate'):
        context['databases_to_replicate'] = get_databases_to_replicate()

    context['server-id'] = get_server_id()

    context.update(PerconaClusterHelper().parse_config())
    render(os.path.basename(config_file), config_file, context, perms=0o444)
コード例 #19
0
def render_config(hosts=None):
    if hosts is None:
        hosts = []

    config_file = resolve_cnf_file()
    if not os.path.exists(os.path.dirname(config_file)):
        os.makedirs(os.path.dirname(config_file))

    context = {
        'cluster_name': 'juju_cluster',
        'private_address': get_cluster_host_ip(),
        'cluster_hosts': ",".join(hosts),
        'sst_method': config('sst-method'),
        'sst_password': sst_password(),
        'innodb_file_per_table': config('innodb-file-per-table'),
        'table_open_cache': config('table-open-cache'),
        'binlogs_path': config('binlogs-path'),
        'enable_binlogs': config('enable-binlogs'),
        'binlogs_max_size': config('binlogs-max-size'),
        'binlogs_expire_days': config('binlogs-expire-days'),
        'performance_schema': config('performance-schema'),
        'is_leader': is_leader(),
        'server_id': get_server_id(),
        'series_upgrade': is_unit_upgrading_set(),
    }

    if config('prefer-ipv6'):
        # NOTE(hopem): this is a kludge to get percona working with ipv6.
        # See lp 1380747 for more info. This is intended as a stop gap until
        # percona package is fixed to support ipv6.
        context['bind_address'] = '::'
        context['ipv6'] = True
    else:
        context['ipv6'] = False

    wsrep_provider_options = get_wsrep_provider_options()
    if wsrep_provider_options:
        context['wsrep_provider_options'] = wsrep_provider_options

    if config('wsrep-slave-threads') is not None:
        context['wsrep_slave_threads'] = config('wsrep-slave-threads')

    if CompareHostReleases(lsb_release()['DISTRIB_CODENAME']) < 'bionic':
        # myisam_recover is not valid for PXC 5.7 (introduced in Bionic) so we
        # only set it for PXC 5.6.
        context['myisam_recover'] = 'BACKUP'
        context['wsrep_provider'] = '/usr/lib/libgalera_smm.so'
        if 'wsrep_slave_threads' not in context:
            context['wsrep_slave_threads'] = 1
    elif CompareHostReleases(lsb_release()['DISTRIB_CODENAME']) >= 'bionic':
        context['wsrep_provider'] = '/usr/lib/galera3/libgalera_smm.so'
        context['default_storage_engine'] = 'InnoDB'
        context['wsrep_log_conflicts'] = True
        context['innodb_autoinc_lock_mode'] = '2'
        context['pxc_strict_mode'] = config('pxc-strict-mode')
        if 'wsrep_slave_threads' not in context:
            context['wsrep_slave_threads'] = 48

    if config('databases-to-replicate'):
        context['databases_to_replicate'] = get_databases_to_replicate()

    context['server-id'] = get_server_id()

    context.update(PerconaClusterHelper().parse_config())
    render(os.path.basename(config_file), config_file, context, perms=0o444)
コード例 #20
0
def config_changed():

    # if we are paused or upgrading, delay doing any config changed hooks.
    # It is forced on the resume.
    if is_unit_paused_set() or is_unit_upgrading_set():
        log("Unit is paused or upgrading. Skipping config_changed", "WARN")
        return

    # It is critical that the installation is attempted first before any
    # rendering of the configuration files occurs.
    # install_percona_xtradb_cluster has the code to decide if this is the
    # leader or if the leader is bootstrapped and therefore ready for install.
    install_percona_xtradb_cluster()

    if config('prefer-ipv6'):
        assert_charm_supports_ipv6()

    hosts = get_cluster_hosts()
    leader_bootstrapped = is_leader_bootstrapped()
    leader_ip = leader_get('leader-ip')

    # Cluster upgrade adds some complication
    cluster_series_upgrading = leader_get("cluster_series_upgrading")
    if cluster_series_upgrading:
        leader = (leader_get('cluster_series_upgrade_leader') ==
                  get_relation_ip('cluster'))
        leader_ip = leader_get('cluster_series_upgrade_leader')
    else:
        leader = is_leader()
        leader_ip = leader_get('leader-ip')

    if leader:
        # If the cluster has not been fully bootstrapped once yet, use an empty
        # hosts list to avoid restarting the leader node's mysqld during
        # cluster buildup.
        # After, the cluster has bootstrapped at least one time, it is much
        # less likely to have restart collisions. It is then safe to use the
        # full hosts list and have the leader node's mysqld restart.
        # Empty hosts if cluster_series_upgrading
        if not clustered_once() or cluster_series_upgrading:
            hosts = []
        log("Leader unit - bootstrap required=%s" % (not leader_bootstrapped),
            DEBUG)
        render_config_restart_on_changed(hosts,
                                         bootstrap=not leader_bootstrapped)
    elif (leader_bootstrapped and
          is_sufficient_peers() and not
          cluster_series_upgrading):
        # Skip if cluster_series_upgrading
        # Speed up cluster process by bootstrapping when the leader has
        # bootstrapped if we have expected number of peers
        if leader_ip not in hosts:
            # Fix Bug #1738896
            hosts = [leader_ip] + hosts
        log("Leader is bootstrapped - configuring mysql on this node",
            DEBUG)
        # Rendering the mysqld.cnf and restarting is bootstrapping for a
        # non-leader node.
        render_config_restart_on_changed(hosts)
        # Assert we are bootstrapped. This will throw an
        # InconsistentUUIDError exception if UUIDs do not match.
        update_bootstrap_uuid()
    else:
        # Until the bootstrap-uuid attribute is set by the leader,
        # cluster_ready() will evaluate to False. So it is necessary to
        # feed this information to the user.
        status_set('waiting', "Waiting for bootstrap-uuid set by leader")
        log('Non-leader waiting on leader bootstrap, skipping render',
            DEBUG)
        return

    # Notify any changes to the access network
    update_client_db_relations()

    # (re)install pcmkr agent
    install_mysql_ocf()

    for rid in relation_ids('ha'):
        # make sure all the HA resources are (re)created
        ha_relation_joined(relation_id=rid)

    if is_relation_made('nrpe-external-master'):
        update_nrpe_config()

    open_port(DEFAULT_MYSQL_PORT)

    # the password needs to be updated only if the node was already
    # bootstrapped
    if is_bootstrapped():
        update_root_password()
        set_ready_on_peers()

    # NOTE(tkurek): re-set 'master' relation data
    if relation_ids('master'):
        master_joined()