def pool_update(self, pool_name, var, val): log.info("pool_update %s %s %s" % (pool_name, var, val)) pool = [p for p in self._objects['osd_map']['pools'] if p['pool_name'] == pool_name][0] if var in ['pg_num', 'pgp_num']: pgs = [p for p in self._objects['pg_brief'] if p['pgid'].startswith("{0}.".format(pool['pool']))] states = set() for p in pgs: states |= set(p['state'].split("+")) if 'creating' in states: raise RuntimeError("Cannot modify pg_num while PGs are creating") if var == 'pg_num': log.debug("pool_update creating pgs %s->%s" % ( pool['pg_num'], val )) # Growing a pool, creating PGs new_pg_count = val - pool['pg_num'] osd_count = min(pool['pg_num'], len(self._objects['osd_map']['osds'])) if new_pg_count > osd_count * int(self._objects['config']['mon_osd_max_split_count']): raise RuntimeError("Exceeded mon_osd_max_split_count") self._create_pgs(pool['pool'], range(pool['pg_num'], val)) if var == 'pgp_num': # On the way in it's called pgp_num, on the way out it's called pg_placement_num var = 'pg_placement_num' if pool[var] != val: pool[var] = val self._objects['osd_map']['epoch'] += 1
def pool_create(self, pool_name, pg_num): log.info("pool_create: %s/%s" % (pool_name, pg_num)) if pool_name in [p['pool_name'] for p in self._objects['osd_map']['pools']]: log.error("Pool %s already exists" % pool_name) return new_id = max([p['pool'] for p in self._objects['osd_map']['pools']]) + 1 log.info("pool_create assigned %s=%s" % (pool_name, new_id)) self._objects['osd_map']['pools'].append( _pool_template(pool_name, new_id, pg_num) ) self._objects['osd_map']['epoch'] += 1 self._create_pgs(new_id, range(0, pg_num))
def create(self, fqdns, mon_count=3, osds_per_host=4, osd_overlap=False, osd_size=2 * TERABYTES): """ Generate initial state for a cluster """ log.info("Creating ceph_cluster") self.fsid = uuid.uuid4().__str__() self.name = 'ceph_fake' mon_hosts = fqdns[0:mon_count] if osd_overlap: osd_hosts = fqdns[mon_count:] else: osd_hosts = fqdns osd_id = 0 for fqdn in osd_hosts: for i in range(0, osds_per_host): self._service_locations["osd"][osd_id] = fqdn self._host_services[fqdn].append({ 'type': 'osd', 'id': osd_id, 'fsid': self.fsid }) osd_id += 1 for fqdn in mon_hosts: mon_id = get_hostname(fqdn) self._service_locations["mon"][mon_id] = fqdn self._host_services[fqdn].append({ "type": "mon", "id": mon_id, 'fsid': self.fsid }) # Mon health check output # ======================= self._objects['health'] = { 'detail': [], 'health': { 'health_services': [], }, 'overall_status': "HEALTH_OK", 'summary': [], 'timechecks': {} } # Cluster config settings # ======================= self._objects['config'] = DEFAULT_CONFIG # OSD map # ======= osd_count = len(osd_hosts) * osds_per_host self._objects['osd_map'] = { 'fsid': self.fsid, 'flags': '', 'max_osd': osd_count, 'epoch': 1, 'osds': [], 'pools': [], 'crush': DEFAULT_CRUSH } for i in range(0, osd_count): # TODO populate public_addr and cluster_addr from imagined # interface addresses of servers osd_id = i self._objects['osd_map']['osds'].append({ 'osd': osd_id, 'uuid': uuid.uuid4().__str__(), 'up': 1, 'in': 1, 'last_clean_begin': 0, 'last_clean_end': 0, 'up_from': 0, 'up_thru': 0, 'down_at': 0, 'lost_at': 0, 'public_addr': "", 'cluster_addr': "", 'heartbeat_back_addr': "", 'heartbeat_front_addr': "", "state": ["exists", "up"] }) self._osd_stats[osd_id] = { 'total_bytes': osd_size } for i, pool in enumerate(['data', 'metadata', 'rbd']): # TODO these should actually have a different crush ruleset etc each self._objects['osd_map']['pools'].append(_pool_template(pool, i, 64)) tree = { "nodes": [ { "id": -1, "name": "default", "type": "root", "type_id": 6, "children": [] } ] } host_tree_id = -2 for fqdn, services in self._host_services.items(): # Entries for OSDs on this host for s in services: if s['type'] != 'osd': continue tree['nodes'].append({ "id": s['id'], "name": "osd.%s" % s['id'], "exists": 1, "type": "osd", "type_id": 0, "status": "up", "reweight": 1.0, "crush_weight": 1.0, "depth": 2 }) # Entry for the host itself tree['nodes'].append({ "id": host_tree_id, "name": get_hostname(fqdn), "type": "host", "type_id": 1, "children": [ s['id'] for s in services if s['type'] == 'osd' ] }) tree['nodes'][0]['children'].append(host_tree_id) host_tree_id -= 1 self._objects['osd_map']['tree'] = tree # Mon status # ========== self._objects['mon_map'] = { 'epoch': 0, 'fsid': self.fsid, 'modified': datetime.datetime.now().isoformat(), 'created': datetime.datetime.now().isoformat(), 'mons': [ ], 'quorum': [] } for i, mon_fqdn in enumerate(mon_hosts): # TODO: populate addr self._objects['mon_map']['mons'].append({ 'rank': i, 'name': get_hostname(mon_fqdn), 'addr': "" }) self._objects['mon_map']['quorum'].append(i) self._objects['mon_status'] = { "election_epoch": 77, "rank": 0, # IRL the rank here is an arbitrary one from within quorum "state": "leader", "monmap": self._objects['mon_map'], "quorum": [m['rank'] for m in self._objects['mon_map']['mons']] } self._objects['mds_map'] = { "max_mds": 1, "in": [], "up": {}, "info": {} } # PG map # ====== self._objects['pg_brief'] = [] # Don't maintain a full PG map but do maintain a version counter. self._objects['pg_map'] = {"version": 1} for pool in self._objects['osd_map']['pools']: n_replicas = pool['size'] for pg_num in range(pool['pg_num']): pg_id = "%s.%s" % (pool['pool'], pg_num) osds = pseudorandom_subset(range(0, osd_count), n_replicas, pg_id) self._objects['pg_brief'].append({ 'pgid': pg_id, 'state': 'active+clean', 'up': osds, 'acting': osds }) self._pg_stats[pg_id] = { 'num_objects': 0, 'num_bytes': 0, 'num_bytes_wr': 0, 'num_bytes_rd': 0 }