コード例 #1
0
def ceph_changed():
    utils.juju_log('INFO', 'Start Ceph Relation Changed')
    auth = utils.relation_get('auth')
    key = utils.relation_get('key')
    if None in [auth, key]:
        utils.juju_log('INFO', 'Missing key or auth in relation')
        sys.exit(0)

    ceph.configure(service=SERVICE_NAME, key=key, auth=auth)

    if cluster.eligible_leader('res_rabbitmq_vip'):
        rbd_img = utils.config_get('rbd-name')
        rbd_size = utils.config_get('rbd-size')
        sizemb = int(rbd_size.split('G')[0]) * 1024
        blk_device = '/dev/rbd/%s/%s' % (POOL_NAME, rbd_img)
        ceph.ensure_ceph_storage(service=SERVICE_NAME, pool=POOL_NAME,
                                 rbd_img=rbd_img, sizemb=sizemb,
                                 fstype='ext4', mount_point=RABBIT_DIR,
                                 blk_device=blk_device,
                                 system_services=['rabbitmq-server'])
    else:
        utils.juju_log('INFO',
                       'This is not the peer leader. Not configuring RBD.')
        utils.juju_log('INFO', 'Stopping rabbitmq-server.')
        utils.stop('rabbitmq-server')

    # If 'ha' relation has been made before the 'ceph' relation
    # it is important to make sure the ha-relation data is being
    # sent.
    if utils.is_relation_made('ha'):
        utils.juju_log('INFO', '*ha* relation exists. Triggering ha_joined()')
        ha_joined()
    else:
        utils.juju_log('INFO', '*ha* relation does not exist.')
    utils.juju_log('INFO', 'Finish Ceph Relation Changed')
コード例 #2
0
def install_hook():
    execd_preinstall()
    utils.configure_source()
    utils.install(*packages)
    update_config_block('DEFAULT',
                public_port=cluster.determine_api_port(config["service-port"]))
    update_config_block('DEFAULT',
                admin_port=cluster.determine_api_port(config["admin-port"]))
    set_admin_token(config['admin-token'])

    # set all backends to use sql+sqlite, if they are not already by default
    update_config_block('sql',
                        connection='sqlite:////var/lib/keystone/keystone.db')
    update_config_block('identity',
                        driver='keystone.identity.backends.sql.Identity')
    update_config_block('catalog',
                        driver='keystone.catalog.backends.sql.Catalog')
    update_config_block('token',
                        driver='keystone.token.backends.sql.Token')
    update_config_block('ec2',
                        driver='keystone.contrib.ec2.backends.sql.Ec2')

    utils.stop('keystone')
    execute("keystone-manage db_sync")
    utils.start('keystone')

    # ensure user + permissions for peer relations that
    # may be syncing data there via SSH_USER.
    unison.ensure_user(user=SSH_USER, group='keystone')
    execute("chmod -R g+wrx /var/lib/keystone/")

    time.sleep(5)
    ensure_initial_admin(config)
コード例 #3
0
ファイル: ceph_utils.py プロジェクト: peterklipfel/firesuit
def ensure_ceph_storage(service,
                        pool,
                        rbd_img,
                        sizemb,
                        mount_point,
                        blk_device,
                        fstype,
                        system_services=[],
                        rbd_pool_replicas=2):
    """
    To be called from the current cluster leader.
    Ensures given pool and RBD image exists, is mapped to a block device,
    and the device is formatted and mounted at the given mount_point.

    If formatting a device for the first time, data existing at mount_point
    will be migrated to the RBD device before being remounted.

    All services listed in system_services will be stopped prior to data
    migration and restarted when complete.
    """
    # Ensure pool, RBD image, RBD mappings are in place.
    if not pool_exists(service, pool):
        utils.juju_log('INFO', 'ceph: Creating new pool %s.' % pool)
        create_pool(service, pool, replicas=rbd_pool_replicas)

    if not rbd_exists(service, pool, rbd_img):
        utils.juju_log('INFO', 'ceph: Creating RBD image (%s).' % rbd_img)
        create_rbd_image(service, pool, rbd_img, sizemb)

    if not image_mapped(rbd_img):
        utils.juju_log('INFO', 'ceph: Mapping RBD Image as a Block Device.')
        map_block_storage(service, pool, rbd_img)

    # make file system
    # TODO: What happens if for whatever reason this is run again and
    # the data is already in the rbd device and/or is mounted??
    # When it is mounted already, it will fail to make the fs
    # XXX: This is really sketchy!  Need to at least add an fstab entry
    #      otherwise this hook will blow away existing data if its executed
    #      after a reboot.
    if not filesystem_mounted(mount_point):
        make_filesystem(blk_device, fstype)

        for svc in system_services:
            if utils.running(svc):
                utils.juju_log(
                    'INFO', 'Stopping services %s prior to migrating '
                    'data' % svc)
                utils.stop(svc)

        place_data_on_ceph(service, blk_device, mount_point, fstype)

        for svc in system_services:
            utils.start(svc)
コード例 #4
0
ファイル: ceph_utils.py プロジェクト: BillTheBest/hyper-c
def ensure_ceph_storage(service, pool, rbd_img, sizemb, mount_point,
                        blk_device, fstype, system_services=[],
                        rbd_pool_replicas=2):
    """
    To be called from the current cluster leader.
    Ensures given pool and RBD image exists, is mapped to a block device,
    and the device is formatted and mounted at the given mount_point.

    If formatting a device for the first time, data existing at mount_point
    will be migrated to the RBD device before being remounted.

    All services listed in system_services will be stopped prior to data
    migration and restarted when complete.
    """
    # Ensure pool, RBD image, RBD mappings are in place.
    if not pool_exists(service, pool):
        utils.juju_log('INFO', 'ceph: Creating new pool %s.' % pool)
        create_pool(service, pool, replicas=rbd_pool_replicas)

    if not rbd_exists(service, pool, rbd_img):
        utils.juju_log('INFO', 'ceph: Creating RBD image (%s).' % rbd_img)
        create_rbd_image(service, pool, rbd_img, sizemb)

    if not image_mapped(rbd_img):
        utils.juju_log('INFO', 'ceph: Mapping RBD Image as a Block Device.')
        map_block_storage(service, pool, rbd_img)

    # make file system
    # TODO: What happens if for whatever reason this is run again and
    # the data is already in the rbd device and/or is mounted??
    # When it is mounted already, it will fail to make the fs
    # XXX: This is really sketchy!  Need to at least add an fstab entry
    #      otherwise this hook will blow away existing data if its executed
    #      after a reboot.
    if not filesystem_mounted(mount_point):
        make_filesystem(blk_device, fstype)

        for svc in system_services:
            if utils.running(svc):
                utils.juju_log('INFO',
                               'Stopping services %s prior to migrating '
                               'data' % svc)
                utils.stop(svc)

        place_data_on_ceph(service, blk_device, mount_point, fstype)

        for svc in system_services:
            utils.start(svc)
コード例 #5
0
ファイル: ha_relations.py プロジェクト: petevg/mariadb-charm
def ceph_changed():
    utils.juju_log('INFO', 'Start Ceph Relation Changed')
    auth = utils.relation_get('auth')
    key = utils.relation_get('key')
    use_syslog = utils.relation_get('use_syslog')
    if None in [auth, key]:
        utils.juju_log('INFO', 'Missing key or auth in relation')
        return

    ceph.configure(service=SERVICE_NAME,
                   key=key,
                   auth=auth,
                   use_syslog=use_syslog)

    if cluster.eligible_leader(LEADER_RES):
        sizemb = int(utils.config_get('block-size')) * 1024
        rbd_img = utils.config_get('rbd-name')
        blk_device = '/dev/rbd/%s/%s' % (POOL_NAME, rbd_img)
        rbd_pool_rep_count = utils.config_get('ceph-osd-replication-count')
        ceph.ensure_ceph_storage(service=SERVICE_NAME,
                                 pool=POOL_NAME,
                                 rbd_img=rbd_img,
                                 sizemb=sizemb,
                                 fstype='ext4',
                                 mount_point=DATA_SRC_DST,
                                 blk_device=blk_device,
                                 system_services=['mysql'],
                                 rbd_pool_replicas=rbd_pool_rep_count)
    else:
        utils.juju_log('INFO',
                       'This is not the peer leader. Not configuring RBD.')
        # Stopping MySQL
        if utils.running('mysql'):
            utils.juju_log('INFO', 'Stopping MySQL...')
            utils.stop('mysql')

    # If 'ha' relation has been made before the 'ceph' relation
    # it is important to make sure the ha-relation data is being
    # sent.
    if utils.is_relation_made('ha'):
        utils.juju_log(
            'INFO', '*ha* relation exists. Making sure the ha'
            ' relation data is sent.')
        ha_relation_joined()
        return

    utils.juju_log('INFO', 'Finish Ceph Relation Changed')
コード例 #6
0
def ceph_changed():
    utils.juju_log("INFO", "Start Ceph Relation Changed")
    auth = utils.relation_get("auth")
    key = utils.relation_get("key")
    use_syslog = utils.relation_get("use_syslog")
    if None in [auth, key]:
        utils.juju_log("INFO", "Missing key or auth in relation")
        return

    ceph.configure(service=SERVICE_NAME, key=key, auth=auth, use_syslog=use_syslog)

    if cluster.eligible_leader(LEADER_RES):
        sizemb = int(utils.config_get("block-size")) * 1024
        rbd_img = utils.config_get("rbd-name")
        blk_device = "/dev/rbd/%s/%s" % (POOL_NAME, rbd_img)
        rbd_pool_rep_count = utils.config_get("ceph-osd-replication-count")
        ceph.ensure_ceph_storage(
            service=SERVICE_NAME,
            pool=POOL_NAME,
            rbd_img=rbd_img,
            sizemb=sizemb,
            fstype="ext4",
            mount_point=DATA_SRC_DST,
            blk_device=blk_device,
            system_services=["mysql"],
            rbd_pool_replicas=rbd_pool_rep_count,
        )
    else:
        utils.juju_log("INFO", "This is not the peer leader. Not configuring RBD.")
        # Stopping MySQL
        if utils.running("mysql"):
            utils.juju_log("INFO", "Stopping MySQL...")
            utils.stop("mysql")

    # If 'ha' relation has been made before the 'ceph' relation
    # it is important to make sure the ha-relation data is being
    # sent.
    if utils.is_relation_made("ha"):
        utils.juju_log("INFO", "*ha* relation exists. Making sure the ha" " relation data is sent.")
        ha_relation_joined()
        return

    utils.juju_log("INFO", "Finish Ceph Relation Changed")
コード例 #7
0
def ceph_changed():
    utils.juju_log('INFO', 'Start Ceph Relation Changed')
    auth = utils.relation_get('auth')
    key = utils.relation_get('key')
    use_syslog = utils.relation_get('use_syslog')
    if None in [auth, key]:
        utils.juju_log('INFO', 'Missing key or auth in relation')
        return

    ceph.configure(service=SERVICE_NAME, key=key, auth=auth,
                   use_syslog=use_syslog)

    if cluster.eligible_leader(LEADER_RES):
        sizemb = int(utils.config_get('block-size')) * 1024
        rbd_img = utils.config_get('rbd-name')
        blk_device = '/dev/rbd/%s/%s' % (POOL_NAME, rbd_img)
        rbd_pool_rep_count = utils.config_get('ceph-osd-replication-count')
        ceph.ensure_ceph_storage(service=SERVICE_NAME, pool=POOL_NAME,
                                 rbd_img=rbd_img, sizemb=sizemb,
                                 fstype='ext4', mount_point=DATA_SRC_DST,
                                 blk_device=blk_device,
                                 system_services=['mysql'],
                                 rbd_pool_replicas=rbd_pool_rep_count)
    else:
        utils.juju_log('INFO',
                       'This is not the peer leader. Not configuring RBD.')
        # Stopping MySQL
        if utils.running('mysql'):
            utils.juju_log('INFO', 'Stopping MySQL...')
            utils.stop('mysql')

    # If 'ha' relation has been made before the 'ceph' relation
    # it is important to make sure the ha-relation data is being
    # sent.
    if utils.is_relation_made('ha'):
        utils.juju_log('INFO',
                       '*ha* relation exists. Making sure the ha'
                       ' relation data is sent.')
        ha_relation_joined()
        return

    utils.juju_log('INFO', 'Finish Ceph Relation Changed')
コード例 #8
0
def ceph_changed():
    utils.juju_log('INFO', 'Start Ceph Relation Changed')
    auth = utils.relation_get('auth')
    key = utils.relation_get('key')
    if None in [auth, key]:
        utils.juju_log('INFO', 'Missing key or auth in relation')
        sys.exit(0)

    ceph.configure(service=SERVICE_NAME, key=key, auth=auth)

    if cluster.eligible_leader('res_rabbitmq_vip'):
        rbd_img = utils.config_get('rbd-name')
        rbd_size = utils.config_get('rbd-size')
        sizemb = int(rbd_size.split('G')[0]) * 1024
        blk_device = '/dev/rbd/%s/%s' % (POOL_NAME, rbd_img)
        rbd_pool_rep_count = utils.config_get('ceph-osd-replication-count')
        ceph.ensure_ceph_storage(service=SERVICE_NAME,
                                 pool=POOL_NAME,
                                 rbd_img=rbd_img,
                                 sizemb=sizemb,
                                 fstype='ext4',
                                 mount_point=RABBIT_DIR,
                                 blk_device=blk_device,
                                 system_services=['rabbitmq-server'],
                                 rbd_pool_replicas=rbd_pool_rep_count)
    else:
        utils.juju_log('INFO',
                       'This is not the peer leader. Not configuring RBD.')
        utils.juju_log('INFO', 'Stopping rabbitmq-server.')
        utils.stop('rabbitmq-server')

    # If 'ha' relation has been made before the 'ceph' relation
    # it is important to make sure the ha-relation data is being
    # sent.
    if utils.is_relation_made('ha'):
        utils.juju_log('INFO', '*ha* relation exists. Triggering ha_joined()')
        ha_joined()
    else:
        utils.juju_log('INFO', '*ha* relation does not exist.')
    utils.juju_log('INFO', 'Finish Ceph Relation Changed')
コード例 #9
0
def ha_joined():
    corosync_bindiface = utils.config_get('ha-bindiface')
    corosync_mcastport = utils.config_get('ha-mcastport')
    vip = utils.config_get('vip')
    vip_iface = utils.config_get('vip_iface')
    vip_cidr = utils.config_get('vip_cidr')
    rbd_name = utils.config_get('rbd-name')

    if None in [
            corosync_bindiface, corosync_mcastport, vip, vip_iface, vip_cidr,
            rbd_name
    ]:
        utils.juju_log(
            'ERROR', 'Insufficient configuration data to '
            'configure hacluster.')
        sys.exit(1)

    if not utils.is_relation_made('ceph', 'auth'):
        utils.juju_log('INFO', 'ha_joined: No ceph relation yet, deferring.')
        return

    name = '%s@localhost' % SERVICE_NAME
    if rabbit.get_node_name() != name:
        utils.juju_log('INFO', 'Stopping rabbitmq-server.')
        utils.stop('rabbitmq-server')
        rabbit.set_node_name('%s@localhost' % SERVICE_NAME)
    else:
        utils.juju_log('INFO', 'Node name already set to %s.' % name)

    relation_settings = {}
    relation_settings['corosync_bindiface'] = corosync_bindiface
    relation_settings['corosync_mcastport'] = corosync_mcastport

    relation_settings['resources'] = {
        'res_rabbitmq_rbd': 'ocf:ceph:rbd',
        'res_rabbitmq_fs': 'ocf:heartbeat:Filesystem',
        'res_rabbitmq_vip': 'ocf:heartbeat:IPaddr2',
        'res_rabbitmq-server': 'lsb:rabbitmq-server',
    }

    relation_settings['resource_params'] = {
        'res_rabbitmq_rbd':
        'params name="%s" pool="%s" user="******" '
        'secret="%s"' %
        (rbd_name, POOL_NAME, SERVICE_NAME, ceph.keyfile_path(SERVICE_NAME)),
        'res_rabbitmq_fs':
        'params device="/dev/rbd/%s/%s" directory="%s" '
        'fstype="ext4" op start start-delay="10s"' %
        (POOL_NAME, rbd_name, RABBIT_DIR),
        'res_rabbitmq_vip':
        'params ip="%s" cidr_netmask="%s" nic="%s"' %
        (vip, vip_cidr, vip_iface),
        'res_rabbitmq-server':
        'op start start-delay="5s" '
        'op monitor interval="5s"',
    }

    relation_settings['groups'] = {
        'grp_rabbitmq':
        'res_rabbitmq_rbd res_rabbitmq_fs res_rabbitmq_vip '
        'res_rabbitmq-server',
    }

    for rel_id in utils.relation_ids('ha'):
        utils.relation_set(rid=rel_id, **relation_settings)

    env_vars = {
        'OPENSTACK_PORT_EPMD': 4369,
        'OPENSTACK_PORT_MCASTPORT': utils.config_get('ha-mcastport'),
    }
    openstack.save_script_rc(**env_vars)
コード例 #10
0
def ha_joined():
    corosync_bindiface = utils.config_get('ha-bindiface')
    corosync_mcastport = utils.config_get('ha-mcastport')
    vip = utils.config_get('vip')
    vip_iface = utils.config_get('vip_iface')
    vip_cidr = utils.config_get('vip_cidr')
    rbd_name = utils.config_get('rbd-name')

    if None in [corosync_bindiface, corosync_mcastport, vip, vip_iface,
                vip_cidr, rbd_name]:
        utils.juju_log('ERROR', 'Insufficient configuration data to '
                       'configure hacluster.')
        sys.exit(1)

    if not utils.is_relation_made('ceph', 'auth'):
        utils.juju_log('INFO',
                       'ha_joined: No ceph relation yet, deferring.')
        return

    name = '%s@localhost' % SERVICE_NAME
    if rabbit.get_node_name() != name:
        utils.juju_log('INFO', 'Stopping rabbitmq-server.')
        utils.stop('rabbitmq-server')
        rabbit.set_node_name('%s@localhost' % SERVICE_NAME)
    else:
        utils.juju_log('INFO', 'Node name already set to %s.' % name)

    relation_settings = {}
    relation_settings['corosync_bindiface'] = corosync_bindiface
    relation_settings['corosync_mcastport'] = corosync_mcastport

    relation_settings['resources'] = {
        'res_rabbitmq_rbd': 'ocf:ceph:rbd',
        'res_rabbitmq_fs': 'ocf:heartbeat:Filesystem',
        'res_rabbitmq_vip': 'ocf:heartbeat:IPaddr2',
        'res_rabbitmq-server': 'lsb:rabbitmq-server',
    }

    relation_settings['resource_params'] = {
        'res_rabbitmq_rbd': 'params name="%s" pool="%s" user="******" '
                            'secret="%s"' %
                            (rbd_name, POOL_NAME,
                             SERVICE_NAME, ceph.keyfile_path(SERVICE_NAME)),
        'res_rabbitmq_fs': 'params device="/dev/rbd/%s/%s" directory="%s" '
                           'fstype="ext4" op start start-delay="10s"' %
                           (POOL_NAME, rbd_name, RABBIT_DIR),
        'res_rabbitmq_vip': 'params ip="%s" cidr_netmask="%s" nic="%s"' %
                            (vip, vip_cidr, vip_iface),
        'res_rabbitmq-server': 'op start start-delay="5s" '
                               'op monitor interval="5s"',
    }

    relation_settings['groups'] = {
        'grp_rabbitmq': 'res_rabbitmq_rbd res_rabbitmq_fs res_rabbitmq_vip '
                        'res_rabbitmq-server',
    }

    for rel_id in utils.relation_ids('ha'):
        utils.relation_set(rid=rel_id, **relation_settings)

    env_vars = {
        'OPENSTACK_PORT_EPMD': 4369,
        'OPENSTACK_PORT_MCASTPORT': utils.config_get('ha-mcastport'),
    }
    openstack.save_script_rc(**env_vars)
コード例 #11
0
def do_openstack_upgrade(install_src, packages):
    '''Upgrade packages from a given install src.'''

    config = config_get()
    old_vers = get_os_codename_package('keystone')
    new_vers = get_os_codename_install_source(install_src)

    utils.juju_log('INFO',
                   "Beginning Keystone upgrade: %s -> %s" % \
                   (old_vers, new_vers))

    # Backup previous config.
    utils.juju_log('INFO', "Backing up contents of /etc/keystone.")
    stamp = time.strftime('%Y%m%d%H%M')
    cmd = 'tar -pcf /var/lib/juju/keystone-backup-%s.tar /etc/keystone' % stamp
    execute(cmd, die=True, echo=True)

    configure_installation_source(install_src)
    execute('apt-get update', die=True, echo=True)
    os.environ['DEBIAN_FRONTEND'] = 'noninteractive'
    cmd = 'apt-get --option Dpkg::Options::=--force-confnew -y '\
          'install %s' % packages
    execute(cmd, echo=True, die=True)

    # we have new, fresh config files that need updating.
    # set the admin token, which is still stored in config.
    set_admin_token(config['admin-token'])

    # set the sql connection string if a shared-db relation is found.
    ids = utils.relation_ids('shared-db')

    if ids:
        for rid in ids:
            for unit in utils.relation_list(rid):
                utils.juju_log('INFO',
                               'Configuring new keystone.conf for '
                               'database access on existing database'
                               ' relation to %s' % unit)
                relation_data = utils.relation_get_dict(relation_id=rid,
                                                        remote_unit=unit)

                update_config_block('sql', connection="mysql://%s:%s@%s/%s" %
                                        (config["database-user"],
                                         relation_data["password"],
                                         relation_data["private-address"],
                                         config["database"]))

    utils.stop('keystone')
    if (cluster.eligible_leader(CLUSTER_RES)):
        utils.juju_log('INFO',
                       'Running database migrations for %s' % new_vers)
        execute('keystone-manage db_sync', echo=True, die=True)
    else:
        utils.juju_log('INFO',
                       'Not cluster leader; snoozing whilst'
                       ' leader upgrades DB')
        time.sleep(10)
    utils.start('keystone')
    time.sleep(5)
    utils.juju_log('INFO',
                   'Completed Keystone upgrade: '
                   '%s -> %s' % (old_vers, new_vers))