def mon_destroy(args): errors = 0 for (name, host) in mon_hosts(args.mon): try: LOG.debug('Removing mon from %s', name) distro = hosts.get( host, username=args.username, callbacks=[packages.ceph_is_installed] ) hostname = distro.conn.remote_module.shortname() # 删除mon destroy_mon( distro.conn, args.cluster, hostname, ) distro.conn.exit() except RuntimeError as e: LOG.error(e) errors += 1 if errors: raise exc.GenericError('Failed to destroy %d monitors' % errors)
def config_push(args): conf_data = conf.ceph.load_raw(args) errors = 0 for hostname in args.client: LOG.debug('Pushing config to %s', hostname) try: distro = hosts.get(hostname, username=args.username) distro.conn.remote_module.write_conf( args.cluster, conf_data, args.overwrite_conf, ) distro.conn.exit() except RuntimeError as e: LOG.error(e) errors += 1 if errors: raise exc.GenericError('Failed to config %d hosts' % errors) ''' start osd that sync in verify process ''' osds = [] if os.path.exists('/root/osds'): with open('/root/osds', 'r') as f: osds = f.readline() cmd = '/etc/init.d/ceph -a -c /etc/ceph/ceph.conf start %s' % osds.strip( '\n') LOG.debug("excute: %s", cmd) (ret, msg) = commands.getstatusoutput(cmd) os.unlink('/root/osds')
def config_pull(args): topath = '{cluster}.conf'.format(cluster=args.cluster) frompath = '/etc/ceph/{cluster}.conf'.format(cluster=args.cluster) errors = 0 for hostname in args.client: try: LOG.debug('Checking %s for %s', hostname, frompath) distro = hosts.get(hostname, username=args.username) conf_file_contents = distro.conn.remote_module.get_file(frompath) if conf_file_contents is not None: LOG.debug('Got %s from %s', frompath, hostname) if os.path.exists(topath): with file(topath, 'rb') as f: existing = f.read() if existing != conf_file_contents and not args.overwrite_conf: LOG.error('local config file %s exists with different content; use --overwrite-conf to overwrite' % topath) raise with file(topath, 'w') as f: f.write(conf_file_contents) return distro.conn.exit() LOG.debug('Empty or missing %s on %s', frompath, hostname) except: LOG.error('Unable to pull %s from %s', frompath, hostname) finally: errors += 1 raise exc.GenericError('Failed to fetch config from %d hosts' % errors)
def mon_add(args): cfg = conf.ceph.load(args) # args.mon is a list with only one entry mon_host = args.mon[0] try: with file('{cluster}.mon.keyring'.format(cluster=args.cluster), 'rb') as f: monitor_keyring = f.read() except IOError: raise RuntimeError( 'mon keyring not found; run \'new\' to create a new cluster') LOG.info('ensuring configuration of new mon host: %s', mon_host) args.client = args.mon admin.admin(args) LOG.debug( 'Adding mon to cluster %s, host %s', args.cluster, mon_host, ) mon_section = 'mon.%s' % mon_host cfg_mon_addr = cfg.safe_get(mon_section, 'mon addr') if args.address: LOG.debug('using mon address via --address %s' % args.address) mon_ip = args.address elif cfg_mon_addr: LOG.debug('using mon address via configuration: %s' % cfg_mon_addr) mon_ip = cfg_mon_addr else: mon_ip = net.get_nonlocal_ip(mon_host) LOG.debug('using mon address by resolving host: %s' % mon_ip) try: LOG.debug('detecting platform for host %s ...', mon_host) distro = hosts.get(mon_host, username=args.username, callbacks=[packages.ceph_is_installed]) LOG.info('distro info: %s %s %s', distro.name, distro.release, distro.codename) rlogger = logging.getLogger(mon_host) # ensure remote hostname is good to go hostname_is_compatible(distro.conn, rlogger, mon_host) rlogger.debug('adding mon to %s', mon_host) args.address = mon_ip distro.mon.add(distro, args, monitor_keyring) # tell me the status of the deployed mon time.sleep(2) # give some room to start catch_mon_errors(distro.conn, rlogger, mon_host, cfg, args) mon_status(distro.conn, rlogger, mon_host, args) distro.conn.exit() except RuntimeError as e: LOG.error(e) raise exc.GenericError('Failed to add monitor to host: %s' % mon_host)
def prepare(args, cfg, activate_prepared_disk): LOG.debug( 'Preparing cluster %s disks %s', args.cluster, ' '.join(':'.join(x or '' for x in t) for t in args.disk), ) key = get_bootstrap_osd_key(cluster=args.cluster) bootstrapped = set() errors = 0 for hostname, disk, journal in args.disk: try: if disk is None: raise exc.NeedDiskError(hostname) distro = hosts.get(hostname, username=args.username) LOG.info('Distro info: %s %s %s', distro.name, distro.release, distro.codename) if hostname not in bootstrapped: bootstrapped.add(hostname) LOG.debug('Deploying osd to %s', hostname) conf_data = StringIO() cfg.write(conf_data) distro.conn.remote_module.write_conf(args.cluster, conf_data.getvalue(), args.overwrite_conf) create_osd(distro.conn, args.cluster, key) LOG.debug('Preparing host %s disk %s journal %s activate %s', hostname, disk, journal, activate_prepared_disk) prepare_disk( distro.conn, cluster=args.cluster, disk=disk, journal=journal, activate_prepared_disk=activate_prepared_disk, zap=args.zap_disk, fs_type=args.fs_type, dmcrypt=args.dmcrypt, dmcrypt_dir=args.dmcrypt_key_dir, ) # give the OSD a few seconds to start time.sleep(5) catch_osd_errors(distro.conn, distro.conn.logger, args) LOG.debug('Host %s is now ready for osd use.', hostname) distro.conn.exit() except RuntimeError as e: LOG.error(e) errors += 1 if errors: raise exc.GenericError('Failed to create %d OSDs' % errors)
def mds_create(args): cfg = conf.ceph.load(args) LOG.debug( 'Deploying mds, cluster %s hosts %s', args.cluster, ' '.join(':'.join(x or '' for x in t) for t in args.mds), ) key = get_bootstrap_mds_key(cluster=args.cluster) bootstrapped = set() errors = 0 failed_on_rhel = False for hostname, name in args.mds: try: distro = hosts.get(hostname, username=args.username) rlogger = distro.conn.logger LOG.info('Distro info: %s %s %s', distro.name, distro.release, distro.codename) LOG.debug('remote host will use %s', distro.init) if hostname not in bootstrapped: bootstrapped.add(hostname) LOG.debug('deploying mds bootstrap to %s', hostname) conf_data = StringIO() cfg.write(conf_data) distro.conn.remote_module.write_conf( args.cluster, conf_data.getvalue(), args.overwrite_conf, ) path = '/var/lib/ceph/bootstrap-mds/{cluster}.keyring'.format( cluster=args.cluster, ) if not distro.conn.remote_module.path_exists(path): rlogger.warning( 'mds keyring does not exist yet, creating one') distro.conn.remote_module.write_keyring(path, key) create_mds(distro, name, args.cluster, distro.init) distro.conn.exit() except RuntimeError as e: if distro.normalized_name == 'redhat': LOG.error('this feature may not yet available for %s %s' % (distro.name, distro.release)) failed_on_rhel = True LOG.error(e) errors += 1 if errors: if failed_on_rhel: # because users only read the last few lines :( LOG.error( 'RHEL RHCS systems do not have the ability to deploy MDS yet') raise exc.GenericError('Failed to create %d MDSs' % errors)
def mds_create(args): cfg = conf.ceph.load(args) LOG.debug( 'Deploying mds, cluster %s hosts %s', args.cluster, ' '.join(':'.join(x or '' for x in t) for t in args.mds), ) if not args.mds: raise exc.NeedHostError() key = get_bootstrap_mds_key(cluster=args.cluster) bootstrapped = set() errors = 0 for hostname, name in args.mds: try: distro = hosts.get(hostname, username=args.username) rlogger = distro.conn.logger LOG.info( 'Distro info: %s %s %s', distro.name, distro.release, distro.codename ) LOG.debug('remote host will use %s', distro.init) if hostname not in bootstrapped: bootstrapped.add(hostname) LOG.debug('deploying mds bootstrap to %s', hostname) conf_data = StringIO() cfg.write(conf_data) distro.conn.remote_module.write_conf( args.cluster, conf_data.getvalue(), args.overwrite_conf, ) path = '/var/lib/ceph/bootstrap-mds/{cluster}.keyring'.format( cluster=args.cluster, ) if not distro.conn.remote_module.path_exists(path): rlogger.warning('mds keyring does not exist yet, creating one') distro.conn.remote_module.write_keyring(path, key) create_mds(distro.conn, name, args.cluster, distro.init) distro.conn.exit() except RuntimeError as e: LOG.error(e) errors += 1 if errors: raise exc.GenericError('Failed to create %d MDSs' % errors)
def mon_create(args): cfg = conf.ceph.load(args) if not args.mon: args.mon = get_mon_initial_members(args, error_on_empty=True, _cfg=cfg) if args.keyrings: monitor_keyring = concatenate_keyrings(args) else: keyring_path = '{cluster}.mon.keyring'.format(cluster=args.cluster) try: monitor_keyring = files.read_file(keyring_path) except IOError: LOG.warning('keyring (%s) not found, creating a new one' % keyring_path) new_mon_keyring(args) monitor_keyring = files.read_file(keyring_path) LOG.debug( 'Deploying mon, cluster %s hosts %s', args.cluster, ' '.join(args.mon), ) errors = 0 for (name, host) in mon_hosts(args.mon): try: # TODO add_bootstrap_peer_hint LOG.debug('detecting platform for host %s ...', name) distro = hosts.get(host, username=args.username, callbacks=[packages.ceph_is_installed]) LOG.info('distro info: %s %s %s', distro.name, distro.release, distro.codename) rlogger = logging.getLogger(name) # ensure remote hostname is good to go hostname_is_compatible(distro.conn, rlogger, name) rlogger.debug('deploying mon to %s', name) distro.mon.create(distro, args, monitor_keyring) # tell me the status of the deployed mon time.sleep(2) # give some room to start mon_status(distro.conn, rlogger, name, args) catch_mon_errors(distro.conn, rlogger, name, cfg, args) distro.conn.exit() except RuntimeError as e: LOG.error(e) errors += 1 if errors: raise exc.GenericError('Failed to create %d monitors' % errors)
def mon_create(args): cfg = conf.ceph.load(args) if not args.mon: mon_initial_members = cfg.safe_get('global', 'mon_initial_members') args.mon = re.split(r'[,\s]+', mon_initial_members) if not args.mon: raise exc.NeedHostError() if args.keyrings: monitor_keyring = concatenate_keyrings(args) else: try: with file('{cluster}.mon.keyring'.format(cluster=args.cluster), 'rb') as f: monitor_keyring = f.read() except IOError: raise RuntimeError('mon keyring not found; run \'new\' to create a new cluster') LOG.debug( 'Deploying mon, cluster %s hosts %s', args.cluster, ' '.join(args.mon), ) errors = 0 for (name, host) in mon_hosts(args.mon): try: # TODO add_bootstrap_peer_hint LOG.debug('detecting platform for host %s ...', name) distro = hosts.get(host, username=args.username) LOG.info('distro info: %s %s %s', distro.name, distro.release, distro.codename) rlogger = logging.getLogger(name) # ensure remote hostname is good to go hostname_is_compatible(distro.conn, rlogger, name) rlogger.debug('deploying mon to %s', name) distro.mon.create(distro, args, monitor_keyring) # tell me the status of the deployed mon time.sleep(2) # give some room to start mon_status(distro.conn, rlogger, name, args) catch_mon_errors(distro.conn, rlogger, name, cfg, args) distro.conn.exit() except RuntimeError as e: LOG.error(e) errors += 1 if errors: raise exc.GenericError('Failed to create %d monitors' % errors)
def rgw_create(args): cfg = conf.ceph.load(args) LOG.debug( 'Deploying rgw, cluster %s hosts %s', args.cluster, ' '.join(':'.join(x or '' for x in t) for t in args.rgw), ) key = get_bootstrap_rgw_key(cluster=args.cluster) bootstrapped = set() errors = 0 for hostname, name in args.rgw: try: distro = hosts.get(hostname, username=args.username) rlogger = distro.conn.logger LOG.info('Distro info: %s %s %s', distro.name, distro.release, distro.codename) LOG.debug('remote host will use %s', distro.init) if hostname not in bootstrapped: bootstrapped.add(hostname) LOG.debug('deploying rgw bootstrap to %s', hostname) conf_data = StringIO() cfg.write(conf_data) distro.conn.remote_module.write_conf( args.cluster, conf_data.getvalue(), args.overwrite_conf, ) path = '/var/lib/ceph/bootstrap-rgw/{cluster}.keyring'.format( cluster=args.cluster, ) if not distro.conn.remote_module.path_exists(path): rlogger.warning( 'rgw keyring does not exist yet, creating one') distro.conn.remote_module.write_keyring(path, key) create_rgw(distro, name, args.cluster, distro.init) distro.conn.exit() LOG.info( ('The Ceph Object Gateway (RGW) is now running on host %s and ' 'default port %s'), hostname, '7480') except RuntimeError as e: LOG.error(e) errors += 1 if errors: raise exc.GenericError('Failed to create %d RGWs' % errors)
def admin(args): cfg = conf.ceph.load(args) conf_data = StringIO() cfg.write(conf_data) try: with file('%s.client.admin.keyring' % args.cluster, 'rb') as f: keyring = f.read() except: raise RuntimeError('%s.client.admin.keyring not found' % args.cluster) errors = 0 for hostname in args.client: LOG.debug('Pushing admin keys and conf to %s', hostname) try: distro = hosts.get(hostname, username=args.username) hostname = distro.conn.remote_module.shortname() distro.conn.remote_module.write_conf( args.cluster, conf_data.getvalue(), args.overwrite_conf, ) distro.conn.remote_module.write_file( '/etc/ceph/%s.client.admin.keyring' % args.cluster, keyring ) distro.conn.exit() except RuntimeError as e: LOG.error(e) errors += 1 if errors: raise exc.GenericError('Failed to configure %d admin hosts' % errors)
def config_push(args): conf_data = conf.ceph.load_raw(args) errors = 0 for hostname in args.client: LOG.debug('Pushing config to %s', hostname) try: distro = hosts.get(hostname, username=args.username) distro.conn.remote_module.write_conf( args.cluster, conf_data, args.overwrite_conf, ) distro.conn.exit() except RuntimeError as e: LOG.error(e) errors += 1 if errors: raise exc.GenericError('Failed to config %d hosts' % errors)
def admin(args): conf_data = conf.ceph.load_raw(args) try: with open('%s.client.admin.keyring' % args.cluster, 'rb') as f: keyring = f.read() except: raise RuntimeError('%s.client.admin.keyring not found' % args.cluster) errors = 0 for hostname in args.client: LOG.debug('Pushing admin keys and conf to %s', hostname) try: distro = hosts.get(hostname, username=args.username) # 在远程mon上写入/etc/ceph/ceph.conf distro.conn.remote_module.write_conf( args.cluster, conf_data, args.overwrite_conf, ) # 在远程mon上写入/etc/ceph/ceph.client.admin.keyring distro.conn.remote_module.write_file( '/etc/ceph/%s.client.admin.keyring' % args.cluster, keyring, 0o600, ) distro.conn.exit() except RuntimeError as e: LOG.error(e) errors += 1 if errors: raise exc.GenericError('Failed to configure %d admin hosts' % errors)
def prepare(args, cfg, activate_prepared_disk): LOG.debug( 'Preparing cluster %s disks %s', args.cluster, ' '.join(':'.join(x or '' for x in t) for t in args.disk), ) hosts_in_danger = exceeds_max_osds(args) if hosts_in_danger: LOG.warning( 'if ``kernel.pid_max`` is not increased to a high enough value') LOG.warning('the following hosts will encounter issues:') for host, count in hosts_in_danger.items(): LOG.warning('Host: %8s, OSDs: %s' % (host, count)) key = get_bootstrap_osd_key(cluster=args.cluster) bootstrapped = set() errors = 0 for hostname, disk, journal in args.disk: try: if disk is None: raise exc.NeedDiskError(hostname) distro = hosts.get(hostname, username=args.username, callbacks=[packages.ceph_is_installed]) LOG.info('Distro info: %s %s %s', distro.name, distro.release, distro.codename) if hostname not in bootstrapped: bootstrapped.add(hostname) LOG.debug('Deploying osd to %s', hostname) conf_data = conf.ceph.load_raw(args) distro.conn.remote_module.write_conf(args.cluster, conf_data, args.overwrite_conf) create_osd_keyring(distro.conn, args.cluster, key) LOG.debug('Preparing host %s disk %s journal %s activate %s', hostname, disk, journal, activate_prepared_disk) storetype = None if args.bluestore: storetype = 'bluestore' prepare_disk( distro.conn, cluster=args.cluster, disk=disk, journal=journal, activate_prepared_disk=activate_prepared_disk, init=distro.init, zap=args.zap_disk, fs_type=args.fs_type, dmcrypt=args.dmcrypt, dmcrypt_dir=args.dmcrypt_key_dir, storetype=storetype, ) # give the OSD a few seconds to start time.sleep(5) catch_osd_errors(distro.conn, distro.conn.logger, args) LOG.debug('Host %s is now ready for osd use.', hostname) distro.conn.exit() except RuntimeError as e: LOG.error(e) errors += 1 if errors: raise exc.GenericError('Failed to create %d OSDs' % errors)
def prepare(args, cfg, activate_prepared_disk): LOG.debug( 'Preparing cluster %s disks %s', args.cluster, ' '.join(':'.join(x or '' for x in t) for t in args.disk), ) if args.filestore: if args.fs_type == 'f2fs': LOG.debug('start run prepare_all_disk') admin_keyring_path = '/Ceph/Meta/Keyring/client.admin.keyring' admin_key = get_admin_key() errors = 0 # hostname, pIP, cIP, disk, journal, strategy, mode, nvme_def_used, nvme_tt_nums, ssd_def_used,ssd_tt_used, hdd_def_used, hdd_tt_nums, cache_nums, cache_dis_type for hostname, pIP, cIP, disk, journal, strategy, mode, nvme_def_used, nvme_tt_nums, ssd_def_used,ssd_tt_used, hdd_def_used, hdd_tt_nums, cache_nums, cache_dis_type in args.disk: try: distro = hosts.get( hostname, username=args.username, callbacks=[packages.ceph_is_installed] ) LOG.info('distro info: %s %s %s', distro.name, distro.release, distro.codename) rlogger = logging.getLogger(hostname) # get client.admin.keyring from manager node if not distro.conn.remote_module.path_exists(admin_keyring_path): LOG.debug('client.admin.keyring not exist yet, creating one') distro.conn.remote_module.write_keyring(admin_keyring_path, admin_key) # ensure remote hostname is good to go #hostname_is_compatible(distro.conn, rlogger, hostname) localIP = net.get_nonlocal_ip(hostname) LOG.debug('get host ip : %s' ,localIP) LOG.debug('Create:add mon to ceph.conf') distro.mon.create_all(distro, args, hostname, pIP, cIP, disk, journal, strategy, mode, nvme_def_used, nvme_tt_nums, ssd_def_used,ssd_tt_used, hdd_def_used, hdd_tt_nums, cache_nums, cache_dis_type) distro.conn.exit() except RuntimeError as e: LOG.error(e) errors += 1 # start sync all osd ceph.conf conf_data = conf.ceph.load_raw(args) errnos = 0 for hostname, pIP, cIP, disk, journal, strategy, mode, nvme_def_used, nvme_tt_nums, ssd_def_used, ssd_tt_used, hdd_def_used, hdd_tt_nums, cache_nums, cache_dis_type in args.disk: LOG.debug('sync configfile for host %s ...', hostname) try: distro = hosts.get(hostname, username=args.username) distro.conn.remote_module.write_conf( args.cluster, conf_data, args.overwrite_conf, ) distro.conn.exit() except RuntimeError as e: LOG.error(e) errnos += 1 if errnos: raise exc.GenericError('Failed to sync configfile %d monitors' % errors) return hosts_in_danger = exceeds_max_osds(args) if hosts_in_danger: LOG.warning('if ``kernel.pid_max`` is not increased to a high enough value') LOG.warning('the following hosts will encounter issues:') for host, count in hosts_in_danger.items(): LOG.warning('Host: %8s, OSDs: %s' % (host, count)) key = get_bootstrap_osd_key(cluster=args.cluster) bootstrapped = set() errors = 0 for hostname, disk, journal in args.disk: localIP = net.get_nonlocal_ip(hostname) LOG.debug('hostname:%s ; ip:%s' % (hostname, localIP)) try: if disk is None: raise exc.NeedDiskError(hostname) distro = hosts.get( hostname, username=args.username, callbacks=[packages.ceph_is_installed] ) LOG.info( 'Distro info: %s %s %s', distro.name, distro.release, distro.codename ) if hostname not in bootstrapped: bootstrapped.add(hostname) LOG.debug('Deploying osd to %s', hostname) conf_data = conf.ceph.load_raw(args) distro.conn.remote_module.write_conf( args.cluster, conf_data, args.overwrite_conf ) create_osd_keyring(distro.conn, args.cluster, key) LOG.debug('Preparing host %s disk %s journal %s activate %s', hostname, disk, journal, activate_prepared_disk) storetype = None if args.bluestore: storetype = 'bluestore' if args.filestore: storetype = 'filestore' prepare_disk( distro.conn, cluster=args.cluster, disk=disk, journal=journal, activate_prepared_disk=activate_prepared_disk, init=distro.init, zap=args.zap_disk, fs_type=args.fs_type, dmcrypt=args.dmcrypt, dmcrypt_dir=args.dmcrypt_key_dir, storetype=storetype, block_wal=args.block_wal, block_db=args.block_db ) # give the OSD a few seconds to start time.sleep(5) catch_osd_errors(distro.conn, distro.conn.logger, args) LOG.debug('Host %s is now ready for osd use.', hostname) distro.conn.exit() except RuntimeError as e: LOG.error(e) errors += 1 if errors: raise exc.GenericError('Failed to create %d OSDs' % errors)
def mon_create(args): # 获取配置文件 cfg = conf.ceph.load(args) if not args.mon: # 参数没指定mon,调用get_mon_initial_members函数从配置文件获取mon_initial_members作为mon args.mon = get_mon_initial_members(args, error_on_empty=True, _cfg=cfg) if args.keyrings: monitor_keyring = concatenate_keyrings(args) else: keyring_path = '{cluster}.mon.keyring'.format(cluster=args.cluster) try: # 获取ceph.mon.keyring文件信息 monitor_keyring = files.read_file(keyring_path) except IOError: LOG.warning('keyring (%s) not found, creating a new one' % keyring_path) new_mon_keyring(args) monitor_keyring = files.read_file(keyring_path) LOG.debug( 'Deploying mon, cluster %s hosts %s', args.cluster, ' '.join(args.mon), ) errors = 0 # 循环mon for (name, host) in mon_hosts(args.mon): try: # TODO add_bootstrap_peer_hint LOG.debug('detecting platform for host %s ...', name) # 获取操作系统版本信息,检查是否安装ceph包,如果需要修改代码支持其他操作系统,可以从hosts入手修改 distro = hosts.get( host, username=args.username, callbacks=[packages.ceph_is_installed] ) LOG.info('distro info: %s %s %s', distro.name, distro.release, distro.codename) rlogger = logging.getLogger(name) # ensure remote hostname is good to go hostname_is_compatible(distro.conn, rlogger, name) rlogger.debug('deploying mon to %s', name) # 创建mon,调用hosts目录的相应操作系列目录,比如系统是centos,那就是hosts/centos下的mon模块 distro.mon.create(distro, args, monitor_keyring) # tell me the status of the deployed mon time.sleep(2) # give some room to start # 检测mon的状态 mon_status(distro.conn, rlogger, name, args) # 检测mon是否在monmap中存在,配置文件中是否配置public_addr、public_network等信息,写入logger warning catch_mon_errors(distro.conn, rlogger, name, cfg, args) distro.conn.exit() except RuntimeError as e: LOG.error(e) errors += 1 if errors: raise exc.GenericError('Failed to create %d monitors' % errors)
def mgr_create(args): LOG.debug( 'Deploying mgr, cluster %s hosts %s', args.cluster, ' '.join(':'.join(x or '' for x in t) for t in args.mgr), ) # add mgr conf to controler's ceph.conf sections = conf_hd.sections() mgrname_hostname = {} for sec in sections: if not sec.startswith("mon."): continue else: hostname = conf_hd.get(sec, 'host') if not hostname: continue mgr_sec_name = 'mgr.' + sec.split('.')[1] mgrname_hostname[hostname] = mgr_sec_name conf_hd.add_section(mgr_sec_name) conf_hd.set(mgr_sec_name, 'host', hostname) conf_write = open(ceph_conf_file, 'w') conf_hd.write(conf_write) conf_write.close() conf_data = conf.ceph.load_raw(args) # key = get_bootstrap_mgr_key(cluster=args.cluster) bootstrapped = set() errors = 0 LOG.info("---mgr_create====>args:%s------", args) for hostname, name in args.mgr: try: distro = hosts.get(hostname, username=args.username) rlogger = distro.conn.logger LOG.info('Distro info: %s %s %s', distro.name, distro.release, distro.codename) LOG.debug('remote host will use %s', distro.init) if hostname not in bootstrapped: bootstrapped.add(hostname) LOG.debug('deploying mgr bootstrap to %s', hostname) distro.conn.remote_module.write_conf( args.cluster, conf_data, args.overwrite_conf, ) mgr_name = None LOG.info("-----mgrname_and_hostname:%s-----" % mgrname_hostname) for hname in mgrname_hostname: if hostname == hname: mgr_name = mgrname_hostname[hname] LOG.info("======================mgr_name:%s=======" % mgr_name) path = '/Ceph/Data/Mgr/{mgr_sec_name}'.format( mgr_sec_name=mgr_name, ) LOG.info("++++++++path:%s++++++++++++++" % path) # if not distro.conn.remote_module.path_exists(path): # rlogger.warning('mgr keyring does not exist yet, creating one') # distro.conn.remote_module.write_keyring(path, key) distro.mon.create_mgr(distro, mgr_name, path, hostname) # distro.conn.remote_module.make_mgr_key(mgr_name, path) LOG.info("==============after remote path=============") # create_mgr(distro, name, args.cluster, distro.init) distro.conn.exit() except Exception as e: LOG.error(e) errors += 1 if errors: raise exc.GenericError('Failed to create %d MGRs' % errors)
def create(args, cfg, create=False): if not args.host: raise RuntimeError( 'Required host was not specified as a positional argument') LOG.debug('Creating OSD on cluster %s with data device %s', args.cluster, args.data) key = get_bootstrap_osd_key(cluster=args.cluster) bootstrapped = set() errors = 0 hostname = args.host try: if args.data is None: raise exc.NeedDiskError(hostname) distro = hosts.get(hostname, username=args.username, callbacks=[packages.ceph_is_installed]) LOG.info('Distro info: %s %s %s', distro.name, distro.release, distro.codename) if hostname not in bootstrapped: bootstrapped.add(hostname) LOG.debug('Deploying osd to %s', hostname) conf_data = conf.ceph.load_raw(args) distro.conn.remote_module.write_conf(args.cluster, conf_data, args.overwrite_conf) create_osd_keyring(distro.conn, args.cluster, key) # default to bluestore unless explicitly told not to storetype = 'bluestore' if args.filestore: storetype = 'filestore' #执行osd创建 create_osd( distro.conn, cluster=args.cluster, data=args.data, journal=args.journal, zap=args.zap_disk, fs_type=args.fs_type, dmcrypt=args.dmcrypt, dmcrypt_dir=args.dmcrypt_key_dir, storetype=storetype, block_wal=args.block_wal, block_db=args.block_db, debug=args.debug, ) # give the OSD a few seconds to start time.sleep(5) catch_osd_errors(distro.conn, distro.conn.logger, args) LOG.debug('Host %s is now ready for osd use.', hostname) distro.conn.exit() except RuntimeError as e: LOG.error(e) errors += 1 if errors: raise exc.GenericError('Failed to create %d OSDs' % errors)
def mon_create(args): cfg = conf.ceph.load(args) if not args.mon: args.mon = get_mon_initial_members(args, error_on_empty=True, _cfg=cfg) if args.keyrings: monitor_keyring = concatenate_keyrings(args) else: keyring_path = '/root/{cluster}.mon.keyring'.format(cluster=args.cluster) try: monitor_keyring = files.read_file(keyring_path) except IOError: LOG.warning('keyring (%s) not found, creating a new one' % keyring_path) new_mon_keyring(args) monitor_keyring = files.read_file(keyring_path) LOG.debug( 'Deploying mon, cluster %s hosts %s', args.cluster, ' '.join(args.mon), ) errors = 0 mon_no = None for (name, host) in mon_hosts(args.mon): try: # TODO add_bootstrap_peer_hint LOG.debug('detecting platform for host %s ...', name) distro = hosts.get( host, username=args.username, callbacks=[packages.ceph_is_installed] ) LOG.info('distro info: %s %s %s', distro.name, distro.release, distro.codename) rlogger = logging.getLogger(name) # ensure remote hostname is good to go hostname_is_compatible(distro.conn, rlogger, name) LOG.debug('write mon config to /etc/ceph.conf') mon_ip = net.get_nonlocal_ip(name) LOG.debug('get host ip : %s', mon_ip) LOG.debug('Create:add mon to ceph.conf') mon_no = add_mon_conf(name, mon_ip) rlogger.debug('deploying mon to %s ,mon_index : %s', name, mon_no.split('.')[1]) distro.mon.create(distro, args, monitor_keyring, mon_no.split('.')[1]) # tell me the status of the deployed mon time.sleep(5) # give some room to start mon_status(distro.conn, rlogger, mon_no.split('.')[1], args) catch_mon_errors(distro.conn, rlogger, name, mon_no.split('.')[1], cfg, args) distro.conn.exit() except RuntimeError as e: del_conf(mon_no) LOG.error(e) errors += 1 if errors: raise exc.GenericError('Failed to create %d monitors' % errors) # cp clientkey for create osd cmd = "cp /Ceph/Meta/Keyring/client.admin.keyring /root/" (ret, out) = commands.getstatusoutput(cmd) # start sync all monitors ceph.conf conf_data = conf.ceph.load_raw(args) errnos = 0 for (name, host) in mon_hosts(args.mon): LOG.debug('sync configfile for host %s ...', name) try: distro = hosts.get(host, username=args.username) distro.conn.remote_module.write_conf( args.cluster, conf_data, args.overwrite_conf, ) distro.conn.exit() except RuntimeError as e: LOG.error(e) errnos += 1 if errnos: raise exc.GenericError('Failed to sync configfile %d monitors' % errors)