def ensure_ceph_storage(service, pool, rbd_img, sizemb, mount_point, blk_device, fstype, system_services=[], rbd_pool_replicas=2): """ To be called from the current cluster leader. Ensures given pool and RBD image exists, is mapped to a block device, and the device is formatted and mounted at the given mount_point. If formatting a device for the first time, data existing at mount_point will be migrated to the RBD device before being remounted. All services listed in system_services will be stopped prior to data migration and restarted when complete. """ # Ensure pool, RBD image, RBD mappings are in place. if not pool_exists(service, pool): utils.juju_log('INFO', 'ceph: Creating new pool %s.' % pool) create_pool(service, pool, replicas=rbd_pool_replicas) if not rbd_exists(service, pool, rbd_img): utils.juju_log('INFO', 'ceph: Creating RBD image (%s).' % rbd_img) create_rbd_image(service, pool, rbd_img, sizemb) if not image_mapped(rbd_img): utils.juju_log('INFO', 'ceph: Mapping RBD Image as a Block Device.') map_block_storage(service, pool, rbd_img) # make file system # TODO: What happens if for whatever reason this is run again and # the data is already in the rbd device and/or is mounted?? # When it is mounted already, it will fail to make the fs # XXX: This is really sketchy! Need to at least add an fstab entry # otherwise this hook will blow away existing data if its executed # after a reboot. if not filesystem_mounted(mount_point): make_filesystem(blk_device, fstype) for svc in system_services: if utils.running(svc): utils.juju_log( 'INFO', 'Stopping services %s prior to migrating ' 'data' % svc) utils.stop(svc) place_data_on_ceph(service, blk_device, mount_point, fstype) for svc in system_services: utils.start(svc)
def ensure_ceph_storage(service, pool, rbd_img, sizemb, mount_point, blk_device, fstype, system_services=[], rbd_pool_replicas=2): """ To be called from the current cluster leader. Ensures given pool and RBD image exists, is mapped to a block device, and the device is formatted and mounted at the given mount_point. If formatting a device for the first time, data existing at mount_point will be migrated to the RBD device before being remounted. All services listed in system_services will be stopped prior to data migration and restarted when complete. """ # Ensure pool, RBD image, RBD mappings are in place. if not pool_exists(service, pool): utils.juju_log('INFO', 'ceph: Creating new pool %s.' % pool) create_pool(service, pool, replicas=rbd_pool_replicas) if not rbd_exists(service, pool, rbd_img): utils.juju_log('INFO', 'ceph: Creating RBD image (%s).' % rbd_img) create_rbd_image(service, pool, rbd_img, sizemb) if not image_mapped(rbd_img): utils.juju_log('INFO', 'ceph: Mapping RBD Image as a Block Device.') map_block_storage(service, pool, rbd_img) # make file system # TODO: What happens if for whatever reason this is run again and # the data is already in the rbd device and/or is mounted?? # When it is mounted already, it will fail to make the fs # XXX: This is really sketchy! Need to at least add an fstab entry # otherwise this hook will blow away existing data if its executed # after a reboot. if not filesystem_mounted(mount_point): make_filesystem(blk_device, fstype) for svc in system_services: if utils.running(svc): utils.juju_log('INFO', 'Stopping services %s prior to migrating ' 'data' % svc) utils.stop(svc) place_data_on_ceph(service, blk_device, mount_point, fstype) for svc in system_services: utils.start(svc)
def ceph_changed(): utils.juju_log('INFO', 'Start Ceph Relation Changed') auth = utils.relation_get('auth') key = utils.relation_get('key') use_syslog = utils.relation_get('use_syslog') if None in [auth, key]: utils.juju_log('INFO', 'Missing key or auth in relation') return ceph.configure(service=SERVICE_NAME, key=key, auth=auth, use_syslog=use_syslog) if cluster.eligible_leader(LEADER_RES): sizemb = int(utils.config_get('block-size')) * 1024 rbd_img = utils.config_get('rbd-name') blk_device = '/dev/rbd/%s/%s' % (POOL_NAME, rbd_img) rbd_pool_rep_count = utils.config_get('ceph-osd-replication-count') ceph.ensure_ceph_storage(service=SERVICE_NAME, pool=POOL_NAME, rbd_img=rbd_img, sizemb=sizemb, fstype='ext4', mount_point=DATA_SRC_DST, blk_device=blk_device, system_services=['mysql'], rbd_pool_replicas=rbd_pool_rep_count) else: utils.juju_log('INFO', 'This is not the peer leader. Not configuring RBD.') # Stopping MySQL if utils.running('mysql'): utils.juju_log('INFO', 'Stopping MySQL...') utils.stop('mysql') # If 'ha' relation has been made before the 'ceph' relation # it is important to make sure the ha-relation data is being # sent. if utils.is_relation_made('ha'): utils.juju_log( 'INFO', '*ha* relation exists. Making sure the ha' ' relation data is sent.') ha_relation_joined() return utils.juju_log('INFO', 'Finish Ceph Relation Changed')
def ceph_changed(): utils.juju_log("INFO", "Start Ceph Relation Changed") auth = utils.relation_get("auth") key = utils.relation_get("key") use_syslog = utils.relation_get("use_syslog") if None in [auth, key]: utils.juju_log("INFO", "Missing key or auth in relation") return ceph.configure(service=SERVICE_NAME, key=key, auth=auth, use_syslog=use_syslog) if cluster.eligible_leader(LEADER_RES): sizemb = int(utils.config_get("block-size")) * 1024 rbd_img = utils.config_get("rbd-name") blk_device = "/dev/rbd/%s/%s" % (POOL_NAME, rbd_img) rbd_pool_rep_count = utils.config_get("ceph-osd-replication-count") ceph.ensure_ceph_storage( service=SERVICE_NAME, pool=POOL_NAME, rbd_img=rbd_img, sizemb=sizemb, fstype="ext4", mount_point=DATA_SRC_DST, blk_device=blk_device, system_services=["mysql"], rbd_pool_replicas=rbd_pool_rep_count, ) else: utils.juju_log("INFO", "This is not the peer leader. Not configuring RBD.") # Stopping MySQL if utils.running("mysql"): utils.juju_log("INFO", "Stopping MySQL...") utils.stop("mysql") # If 'ha' relation has been made before the 'ceph' relation # it is important to make sure the ha-relation data is being # sent. if utils.is_relation_made("ha"): utils.juju_log("INFO", "*ha* relation exists. Making sure the ha" " relation data is sent.") ha_relation_joined() return utils.juju_log("INFO", "Finish Ceph Relation Changed")
def ceph_changed(): utils.juju_log('INFO', 'Start Ceph Relation Changed') auth = utils.relation_get('auth') key = utils.relation_get('key') use_syslog = utils.relation_get('use_syslog') if None in [auth, key]: utils.juju_log('INFO', 'Missing key or auth in relation') return ceph.configure(service=SERVICE_NAME, key=key, auth=auth, use_syslog=use_syslog) if cluster.eligible_leader(LEADER_RES): sizemb = int(utils.config_get('block-size')) * 1024 rbd_img = utils.config_get('rbd-name') blk_device = '/dev/rbd/%s/%s' % (POOL_NAME, rbd_img) rbd_pool_rep_count = utils.config_get('ceph-osd-replication-count') ceph.ensure_ceph_storage(service=SERVICE_NAME, pool=POOL_NAME, rbd_img=rbd_img, sizemb=sizemb, fstype='ext4', mount_point=DATA_SRC_DST, blk_device=blk_device, system_services=['mysql'], rbd_pool_replicas=rbd_pool_rep_count) else: utils.juju_log('INFO', 'This is not the peer leader. Not configuring RBD.') # Stopping MySQL if utils.running('mysql'): utils.juju_log('INFO', 'Stopping MySQL...') utils.stop('mysql') # If 'ha' relation has been made before the 'ceph' relation # it is important to make sure the ha-relation data is being # sent. if utils.is_relation_made('ha'): utils.juju_log('INFO', '*ha* relation exists. Making sure the ha' ' relation data is sent.') ha_relation_joined() return utils.juju_log('INFO', 'Finish Ceph Relation Changed')