def new(args): if args.ceph_conf: raise RuntimeError( 'will not create a Ceph conf file if attemtping to re-use with `--ceph-conf` flag' ) LOG.debug('Creating new cluster named %s', args.cluster) cfg = conf.ceph.CephConf() cfg.add_section('global') cfg.add_section('client') cfg.add_section('mon') cfg.add_section('osd') fsid = args.fsid or uuid.uuid4() cfg.set('global', 'fsid', str(fsid)) # if networks were passed in, lets set them in the # global section # if args.public_network: # cfg.set('global', 'public network', str(args.public_network)) # if args.cluster_network: # cfg.set('global', 'cluster network', str(args.cluster_network)) mon_initial_members = [] mon_host = [] for (name, host) in mon_hosts(args.mon): # Try to ensure we can ssh in properly before anything else if args.ssh_copykey: ssh_copy_keys(host, args.username) # Now get the non-local IPs from the remote node distro = hosts.get(host, username=args.username) remote_ips = net.ip_addresses(distro.conn) ceph_dir = '/Ceph' ceph_mon_dir = '/Ceph/Data/Mon' ceph_meta_dir = '/Ceph/Meta/Keyring' if not distro.conn.remote_module.path_exists(ceph_dir): distro.conn.remote_module.create_ceph_path(ceph_dir, 167, 167) distro.conn.remote_module.create_mon_path(ceph_mon_dir, 167, 167) distro.conn.remote_module.create_meta_path(ceph_meta_dir, 167, 167) # custom cluster names on sysvinit hosts won't work if distro.init == 'sysvinit' and args.cluster != 'ceph': LOG.error( 'custom cluster names are not supported on sysvinit hosts') raise exc.ClusterNameError( 'host %s does not support custom cluster names' % host) distro.conn.exit() # Validate subnets if we received any if args.public_network or args.cluster_network: validate_host_ip(remote_ips, [args.public_network, args.cluster_network]) # Pick the IP that matches the public cluster (if we were told to do # so) otherwise pick the first, non-local IP LOG.debug('Resolving host %s', host) if args.public_network: ip = get_public_network_ip(remote_ips, args.public_network) else: ip = net.get_nonlocal_ip(host) LOG.debug('Monitor %s at %s', name, ip) mon_initial_members.append(name) try: socket.inet_pton(socket.AF_INET6, ip) mon_host.append("[" + ip + "]") LOG.info('Monitors are IPv6, binding Messenger traffic on IPv6') cfg.set('global', 'ms bind ipv6', 'true') except socket.error: mon_host.append(ip) LOG.debug('Monitor initial members are %s', mon_initial_members) LOG.debug('Monitor addrs are %s', mon_host) #cfg.set('global', 'mon initial members', ', '.join(mon_initial_members)) # no spaces here, see http://tracker.newdream.net/issues/3145 #cfg.set('global', 'mon host', ','.join(mon_host)) cfg.set('global', 'keyring', '/Ceph/Meta/Keyring/$name.keyring') # override undesirable defaults, needed until bobtail # http://tracker.ceph.com/issues/6788 cfg.set('global', 'auth cluster required', 'cephx') cfg.set('global', 'auth service required', 'cephx') cfg.set('global', 'auth client required', 'cephx') cfg.set('client', 'rbd cache', 'false') cfg.set('client', 'client cache size', '16384000') cfg.set('client', 'client_oc_size', '838860800') cfg.set('client', 'admin socket', '/var/run/ceph/rbd-client-$pid.asok') cfg.set('client', 'log file', '/var/log/ceph/ceph.client.log') cfg.set('mon', 'mon clock drift allowed', '0.5') cfg.set('mon', 'mon debug dump transactions', 'false') cfg.set('mon', 'mon osd max split count', '10000') cfg.set('mon', 'mon data', '/Ceph/Data/Mon/mon.$id') cfg.set('osd', 'osd crush update on start', 'false') cfg.set('osd', 'osd journal size', '10240') cfg.set('osd', 'osd new ceph', 'false') cfg.set('osd', 'osd max backfills', '1') cfg.set('osd', 'osd recovery max active', '1') cfg.set('osd', 'osd deep scrub interval', '209018880000') cfg.set('osd', 'osd scrub begin hour', '0') cfg.set('osd', 'osd scrub end hour', '8') cfg.set('osd', 'osd deep scrub primary write', 'false') cfg.set('osd', 'osd deep scrub replica write', 'false') cfg.set('osd', 'osd max object name len', '256') path = '{name}.conf'.format(name=args.cluster, ) new_mon_keyring(args) LOG.debug('Writing initial config to %s...', path) tmp = '%s.tmp' % path with open(tmp, 'w') as f: cfg.write(f) try: os.rename(tmp, path) except OSError as e: if e.errno == errno.EEXIST: raise exc.ClusterExistsError(path) else: raise
def new(args): if args.ceph_conf: raise RuntimeError( 'will not create a Ceph conf file if attemtping to re-use with `--ceph-conf` flag' ) LOG.debug('Creating new cluster named %s', args.cluster) cfg = conf.ceph.CephConf() cfg.add_section('global') fsid = args.fsid or uuid.uuid4() cfg.set('global', 'fsid', str(fsid)) # if networks were passed in, lets set them in the # global section if args.public_network: cfg.set('global', 'public network', str(args.public_network)) if args.cluster_network: cfg.set('global', 'cluster network', str(args.cluster_network)) mon_initial_members = [] mon_host = [] for (name, host) in mon_hosts(args.mon): # Try to ensure we can ssh in properly before anything else if args.ssh_copykey: ssh_copy_keys(host, args.username) # Now get the non-local IPs from the remote node distro = hosts.get(host, username=args.username) remote_ips = net.ip_addresses(distro.conn) # custom cluster names on sysvinit hosts won't work if distro.init == 'sysvinit' and args.cluster != 'ceph': LOG.error( 'custom cluster names are not supported on sysvinit hosts') raise exc.ClusterNameError( 'host %s does not support custom cluster names' % host) distro.conn.exit() # Validate subnets if we received any if args.public_network or args.cluster_network: validate_host_ip(remote_ips, [args.public_network, args.cluster_network]) # Pick the IP that matches the public cluster (if we were told to do # so) otherwise pick the first, non-local IP LOG.debug('Resolving host %s', host) if args.public_network: ip = get_public_network_ip(remote_ips, args.public_network) else: ip = net.get_nonlocal_ip(host) LOG.debug('Monitor %s at %s', name, ip) mon_initial_members.append(name) try: socket.inet_pton(socket.AF_INET6, ip) mon_host.append("[" + ip + "]") LOG.info('Monitors are IPv6, binding Messenger traffic on IPv6') cfg.set('global', 'ms bind ipv6', 'true') except socket.error: mon_host.append(ip) LOG.debug('Monitor initial members are %s', mon_initial_members) LOG.debug('Monitor addrs are %s', mon_host) cfg.set('global', 'mon initial members', ', '.join(mon_initial_members)) # no spaces here, see http://tracker.newdream.net/issues/3145 cfg.set('global', 'mon host', ','.join(mon_host)) # override undesirable defaults, needed until bobtail # http://tracker.ceph.com/issues/6788 cfg.set('global', 'auth cluster required', 'cephx') cfg.set('global', 'auth service required', 'cephx') cfg.set('global', 'auth client required', 'cephx') path = '{name}.conf'.format(name=args.cluster, ) new_mon_keyring(args) LOG.debug('Writing initial config to %s...', path) tmp = '%s.tmp' % path with open(tmp, 'w') as f: cfg.write(f) try: os.rename(tmp, path) except OSError as e: if e.errno == errno.EEXIST: raise exc.ClusterExistsError(path) else: raise