def mon_add(args): cfg = conf.ceph.load(args) if not args.mon: raise exc.NeedHostError() mon_host = args.mon[0] try: with file('{cluster}.mon.keyring'.format(cluster=args.cluster), 'rb') as f: monitor_keyring = f.read() except IOError: raise RuntimeError( 'mon keyring not found; run \'new\' to create a new cluster') LOG.info('ensuring configuration of new mon host: %s', mon_host) args.client = [mon_host] admin.admin(args) LOG.debug( 'Adding mon to cluster %s, host %s', args.cluster, mon_host, ) mon_section = 'mon.%s' % mon_host cfg_mon_addr = cfg.safe_get(mon_section, 'mon addr') if args.address: LOG.debug('using mon address via --address %s' % args.address) mon_ip = args.address elif cfg_mon_addr: LOG.debug('using mon address via configuration: %s' % cfg_mon_addr) mon_ip = cfg_mon_addr else: mon_ip = net.get_nonlocal_ip(mon_host) LOG.debug('using mon address by resolving host: %s' % mon_ip) try: LOG.debug('detecting platform for host %s ...', mon_host) distro = hosts.get(mon_host, username=args.username) LOG.info('distro info: %s %s %s', distro.name, distro.release, distro.codename) rlogger = logging.getLogger(mon_host) # ensure remote hostname is good to go hostname_is_compatible(distro.conn, rlogger, mon_host) rlogger.debug('adding mon to %s', mon_host) args.address = mon_ip distro.mon.add(distro, args, monitor_keyring) # tell me the status of the deployed mon time.sleep(2) # give some room to start catch_mon_errors(distro.conn, rlogger, mon_host, cfg, args) mon_status(distro.conn, rlogger, mon_host, args) distro.conn.exit() except RuntimeError as e: LOG.error(e) raise exc.GenericError('Failed to add monitor to host: %s' % mon_host)
def mds_create(args): cfg = conf.ceph.load(args) LOG.debug( 'Deploying mds, cluster %s hosts %s', args.cluster, ' '.join(':'.join(x or '' for x in t) for t in args.mds), ) if not args.mds: raise exc.NeedHostError() key = get_bootstrap_mds_key(cluster=args.cluster) bootstrapped = set() errors = 0 for hostname, name in args.mds: try: distro = hosts.get(hostname, username=args.username) rlogger = distro.conn.logger LOG.info( 'Distro info: %s %s %s', distro.name, distro.release, distro.codename ) LOG.debug('remote host will use %s', distro.init) if hostname not in bootstrapped: bootstrapped.add(hostname) LOG.debug('deploying mds bootstrap to %s', hostname) conf_data = StringIO() cfg.write(conf_data) distro.conn.remote_module.write_conf( args.cluster, conf_data.getvalue(), args.overwrite_conf, ) path = '/var/lib/ceph/bootstrap-mds/{cluster}.keyring'.format( cluster=args.cluster, ) if not distro.conn.remote_module.path_exists(path): rlogger.warning('mds keyring does not exist yet, creating one') distro.conn.remote_module.write_keyring(path, key) create_mds(distro.conn, name, args.cluster, distro.init) distro.conn.exit() except RuntimeError as e: LOG.error(e) errors += 1 if errors: raise exc.GenericError('Failed to create %d MDSs' % errors)
def mon_create(args): cfg = conf.ceph.load(args) if not args.mon: mon_initial_members = cfg.safe_get('global', 'mon_initial_members') args.mon = re.split(r'[,\s]+', mon_initial_members) if not args.mon: raise exc.NeedHostError() if args.keyrings: monitor_keyring = concatenate_keyrings(args) else: keyring_path = '{cluster}.mon.keyring'.format(cluster=args.cluster) try: monitor_keyring = files.read_file(keyring_path) except IOError: LOG.warning('keyring (%s) not found, creating a new one' % keyring_path) new_mon_keyring(args) monitor_keyring = files.read_file(keyring_path) LOG.debug( 'Deploying mon, cluster %s hosts %s', args.cluster, ' '.join(args.mon), ) errors = 0 for (name, host) in mon_hosts(args.mon): try: # TODO add_bootstrap_peer_hint LOG.debug('detecting platform for host %s ...', name) distro = hosts.get(host, username=args.username) LOG.info('distro info: %s %s %s', distro.name, distro.release, distro.codename) rlogger = logging.getLogger(name) # ensure remote hostname is good to go hostname_is_compatible(distro.conn, rlogger, name) rlogger.debug('deploying mon to %s', name) distro.mon.create(distro, args, monitor_keyring) # tell me the status of the deployed mon time.sleep(2) # give some room to start mon_status(distro.conn, rlogger, name, args) catch_mon_errors(distro.conn, rlogger, name, cfg, args) distro.conn.exit() except RuntimeError as e: LOG.error(e) errors += 1 if errors: raise exc.GenericError('Failed to create %d monitors' % errors)
def get_mon_initial_members(args, error_on_empty=False, _cfg=None): """ Read the Ceph config file and return the value of mon_initial_members Optionally, a NeedHostError can be raised if the value is None. """ if _cfg: cfg = _cfg else: cfg = conf.ceph.load(args) mon_initial_members = cfg.safe_get('global', 'mon_initial_members') if not mon_initial_members: if error_on_empty: raise exc.NeedHostError( 'could not find `mon initial members` defined in ceph.conf') else: mon_initial_members = re.split(r'[,\s]+', mon_initial_members) return mon_initial_members
def osd_list(args, cfg): # FIXME: this portion should probably be abstracted. We do the same in # mon.py cfg = conf.ceph.load(args) mon_initial_members = cfg.safe_get('global', 'mon_initial_members') monitors = re.split(r'[,\s]+', mon_initial_members) if not monitors: raise exc.NeedHostError( 'could not find `mon initial members` defined in ceph.conf') # get the osd tree from a monitor host mon_host = monitors[0] distro = hosts.get(mon_host, username=args.username) tree = osd_tree(distro.conn, args.cluster) distro.conn.exit() interesting_files = ['active', 'magic', 'whoami', 'journal_uuid'] for hostname, disk, journal in args.disk: distro = hosts.get(hostname, username=args.username) remote_module = distro.conn.remote_module osds = distro.conn.remote_module.listdir(constants.osd_path) output, err, exit_code = remoto.process.check(distro.conn, [ 'ceph-disk', 'list', ]) for _osd in osds: osd_path = os.path.join(constants.osd_path, _osd) journal_path = os.path.join(osd_path, 'journal') _id = int(_osd.split('-')[-1]) # split on dash, get the id osd_name = 'osd.%s' % _id metadata = {} json_blob = {} # piggy back from ceph-disk and get the mount point device = get_osd_mount_point(output, osd_name) if device: metadata['device'] = device # read interesting metadata from files for f in interesting_files: osd_f_path = os.path.join(osd_path, f) if remote_module.path_exists(osd_f_path): metadata[f] = remote_module.readline(osd_f_path) # do we have a journal path? if remote_module.path_exists(journal_path): metadata['journal path'] = remote_module.get_realpath( journal_path) # is this OSD in osd tree? for blob in tree['nodes']: if blob.get('id') == _id: # matches our OSD json_blob = blob print_osd( distro.conn.logger, hostname, osd_path, json_blob, metadata, ) distro.conn.exit()
def mds_create(args): cfg = conf.ceph.load(args) LOG.debug( 'Deploying mds, cluster %s hosts %s', args.cluster, ' '.join(':'.join(x or '' for x in t) for t in args.mds), ) if not args.mds: raise exc.NeedHostError() key = get_bootstrap_mds_key(cluster=args.cluster) bootstrapped = set() errors = 0 failed_on_rhel = False for hostname, name in args.mds: try: distro = hosts.get(hostname, username=args.username) rlogger = distro.conn.logger LOG.info('Distro info: %s %s %s', distro.name, distro.release, distro.codename) LOG.debug('remote host will use %s', distro.init) if hostname not in bootstrapped: bootstrapped.add(hostname) LOG.debug('deploying mds bootstrap to %s', hostname) conf_data = StringIO() cfg.write(conf_data) distro.conn.remote_module.write_conf( args.cluster, conf_data.getvalue(), args.overwrite_conf, ) path = '/var/lib/ceph/bootstrap-mds/{cluster}.keyring'.format( cluster=args.cluster, ) if not distro.conn.remote_module.path_exists(path): rlogger.warning( 'mds keyring does not exist yet, creating one') distro.conn.remote_module.write_keyring(path, key) create_mds(distro, name, args.cluster, distro.init) distro.conn.exit() except RuntimeError as e: if distro.normalized_name == 'redhat': LOG.error('this feature may not yet available for %s %s' % (distro.name, distro.release)) failed_on_rhel = True LOG.error(e) errors += 1 if errors: if failed_on_rhel: # because users only read the last few lines :( LOG.error( 'RHEL RHCS systems do not have the ability to deploy MDS yet') raise exc.GenericError('Failed to create %d MDSs' % errors)