def process_requests_v1(reqs): """Process v1 requests. Takes a list of requests (dicts) and processes each one. If an error is found, processing stops and the client is notified in the response. Returns a response dict containing the exit code (non-zero if any operation failed along with an explanation). """ log("Processing %s ceph broker requests" % (len(reqs)), level=INFO) for req in reqs: op = req.get('op') log("Processing op='%s'" % (op), level=DEBUG) # Use admin client since we do not have other client key locations # setup to use them for these operations. svc = 'admin' if op == "create-pool": params = {'pool': req.get('name'), 'replicas': req.get('replicas')} if not all(params.iteritems()): msg = ( "Missing parameter(s): %s" % (' '.join([k for k in params.iterkeys() if not params[k]]))) log(msg, level=ERROR) return {'exit-code': 1, 'stderr': msg} # Mandatory params pool = params['pool'] replicas = params['replicas'] # Optional params pg_num = req.get('pg_num') if pg_num: # Cap pg_num to max allowed just in case. osds = get_osds(svc) if osds: pg_num = min(pg_num, (len(osds) * 100 // replicas)) # Ensure string pg_num = str(pg_num) if not pool_exists(service=svc, name=pool): log("Creating pool '%s' (replicas=%s)" % (pool, replicas), level=INFO) create_pool(service=svc, name=pool, replicas=replicas, pg_num=pg_num) else: log("Pool '%s' already exists - skipping create" % (pool), level=DEBUG) else: msg = "Unknown operation '%s'" % (op) log(msg, level=ERROR) return {'exit-code': 1, 'stderr': msg} return {'exit-code': 0}
def process_requests_v1(reqs): """Process v1 requests. Takes a list of requests (dicts) and processes each one. If an error is found, processing stops and the client is notified in the response. Returns a response dict containing the exit code (non-zero if any operation failed along with an explanation). """ log("Processing %s ceph broker requests" % (len(reqs)), level=INFO) for req in reqs: op = req.get('op') log("Processing op='%s'" % (op), level=DEBUG) # Use admin client since we do not have other client key locations # setup to use them for these operations. svc = 'admin' if op == "create-pool": params = {'pool': req.get('name'), 'replicas': req.get('replicas')} if not all(params.iteritems()): msg = ("Missing parameter(s): %s" % (' '.join([k for k in params.iterkeys() if not params[k]]))) log(msg, level=ERROR) return {'exit-code': 1, 'stderr': msg} # Mandatory params pool = params['pool'] replicas = params['replicas'] # Optional params pg_num = req.get('pg_num') if pg_num: # Cap pg_num to max allowed just in case. osds = get_osds(svc) if osds: pg_num = min(pg_num, (len(osds) * 100 // replicas)) # Ensure string pg_num = str(pg_num) if not pool_exists(service=svc, name=pool): log("Creating pool '%s' (replicas=%s)" % (pool, replicas), level=INFO) create_pool(service=svc, name=pool, replicas=replicas, pg_num=pg_num) else: log("Pool '%s' already exists - skipping create" % (pool), level=DEBUG) else: msg = "Unknown operation '%s'" % (op) log(msg, level=ERROR) return {'exit-code': 1, 'stderr': msg} return {'exit-code': 0}
def test_create_pool_argonaut(self, _exists, _get_osds): '''It creates rados pool correctly with 3 replicas''' _exists.return_value = False _get_osds.return_value = None ceph_utils.create_pool(service='cinder', name='foo') self.check_call.assert_has_calls([ call([ 'ceph', '--id', 'cinder', 'osd', 'pool', 'create', 'foo', '200' ]), call([ 'ceph', '--id', 'cinder', 'osd', 'pool', 'set', 'foo', 'size', '3' ]) ])
def ceph_changed(): log('Start Ceph Relation Changed') auth = relation_get('auth') key = relation_get('key') use_syslog = str(config('use-syslog')).lower() if None in [auth, key]: log('Missing key or auth in relation') sys.exit(0) ceph.configure(service=SERVICE_NAME, key=key, auth=auth, use_syslog=use_syslog) if is_elected_leader('res_rabbitmq_vip'): rbd_img = config('rbd-name') rbd_size = config('rbd-size') sizemb = int(rbd_size.split('G')[0]) * 1024 blk_device = '/dev/rbd/%s/%s' % (POOL_NAME, rbd_img) ceph.create_pool(service=SERVICE_NAME, name=POOL_NAME, replicas=int(config('ceph-osd-replication-count'))) ceph.ensure_ceph_storage(service=SERVICE_NAME, pool=POOL_NAME, rbd_img=rbd_img, sizemb=sizemb, fstype='ext4', mount_point=RABBIT_DIR, blk_device=blk_device, system_services=['rabbitmq-server']) subprocess.check_call( ['chown', '-R', '%s:%s' % (RABBIT_USER, RABBIT_GROUP), RABBIT_DIR]) else: log('This is not the peer leader. Not configuring RBD.') log('Stopping rabbitmq-server.') service_stop('rabbitmq-server') # If 'ha' relation has been made before the 'ceph' relation # it is important to make sure the ha-relation data is being # sent. if is_relation_made('ha'): log('*ha* relation exists. Triggering ha_joined()') ha_joined() else: log('*ha* relation does not exist.') log('Finish Ceph Relation Changed')
def ceph_changed(): log('Start Ceph Relation Changed') auth = relation_get('auth') key = relation_get('key') use_syslog = str(config('use-syslog')).lower() if None in [auth, key]: log('Missing key or auth in relation') sys.exit(0) ceph.configure(service=SERVICE_NAME, key=key, auth=auth, use_syslog=use_syslog) if is_elected_leader('res_rabbitmq_vip'): rbd_img = config('rbd-name') rbd_size = config('rbd-size') sizemb = int(rbd_size.split('G')[0]) * 1024 blk_device = '/dev/rbd/%s/%s' % (POOL_NAME, rbd_img) ceph.create_pool(service=SERVICE_NAME, name=POOL_NAME, replicas=int(config('ceph-osd-replication-count'))) ceph.ensure_ceph_storage(service=SERVICE_NAME, pool=POOL_NAME, rbd_img=rbd_img, sizemb=sizemb, fstype='ext4', mount_point=RABBIT_DIR, blk_device=blk_device, system_services=['rabbitmq-server']) subprocess.check_call(['chown', '-R', '%s:%s' % (RABBIT_USER, RABBIT_GROUP), RABBIT_DIR]) else: log('This is not the peer leader. Not configuring RBD.') log('Stopping rabbitmq-server.') service_stop('rabbitmq-server') # If 'ha' relation has been made before the 'ceph' relation # it is important to make sure the ha-relation data is being # sent. if is_relation_made('ha'): log('*ha* relation exists. Triggering ha_joined()') ha_joined() else: log('*ha* relation does not exist.') log('Finish Ceph Relation Changed')
def test_create_pool_already_exists(self): self._patch('pool_exists') self.pool_exists.return_value = True ceph_utils.create_pool(service='cinder', name='foo') self.log.assert_called() self.check_call.assert_not_called()