def run_rhsm_reg(host, username, password, beta): """ Registers given host to Red Hat Repositories via subscription manager. """ server = utils.ScriptRunner(host) # register host cmd = ('subscription-manager register --username=\"%s\" ' '--password=\"%s\" --autosubscribe || true') server.append(cmd % (username, password.replace('"', '\\"'))) # subscribe to required channel cmd = ('subscription-manager list --consumed | grep -i openstack || ' 'subscription-manager subscribe --pool %s') pool = ("$(subscription-manager list --available | " "grep -e 'Red Hat OpenStack' -m 1 -A 2 | grep 'Pool Id' | " "awk '{print $3}')") server.append(cmd % pool) server.append("yum clean all") server.append("rpm -q yum-utils || yum install -y yum-utils") server.append("yum-config-manager --enable rhel-server-ost-6-folsom-rpms") if beta: server.append("yum-config-manager --enable rhel-6-server-beta-rpms") server.append("yum clean metadata") server.execute(maskList=[password])
def applyPuppetManifest(): print currently_running = [] lastmarker = None for manifest, marker in manifestfiles.getFiles(): # if the marker has changed then we don't want to proceed until # all of the previous puppet runs have finished if lastmarker != None and lastmarker != marker: waitforpuppet(currently_running) lastmarker = marker for hostname in gethostlist(controller.CONF): if "/%s_"%hostname not in manifest: continue print "Applying "+ manifest server = utils.ScriptRunner(hostname) logfile = "%s.log"%manifest currently_running.append((hostname, logfile)) command = "( flock %s/ps.lock puppet apply --modulepath %s/modules %s > %s_ 2>&1 < /dev/null ; mv %s_ %s ) > /dev/null 2>&1 < /dev/null &"%(basedefs.VAR_DIR, basedefs.VAR_DIR, manifest, logfile, logfile, logfile) server.append(command) server.execute() # wait for outstanding puppet runs befor exiting waitforpuppet(currently_running)
def createstoragemanifest(): # this need to happen once per storage host for host in set([device['host'] for device in devices]): controller.CONF["CONFIG_SWIFT_STORAGE_CURRENT"] = host manifestfile = "%s_swift.pp" % host manifestdata = getManifestTemplate("swift_storage.pp") appendManifestFile(manifestfile, manifestdata) # this need to happen once per storage device for device in devices: host = device['host'] devicename = device['device_name'] device = device['device'] server = utils.ScriptRunner(host) validate.r_validateDevice(server, device) server.execute() manifestfile = "%s_swift.pp" % host if device: manifestdata = "\n" + 'swift::storage::%s{"%s":\n device => "/dev/%s",\n}' % ( controller.CONF["CONFIG_SWIFT_STORAGE_FSTYPE"], devicename, device) else: controller.CONF["SWIFT_STORAGE_DEVICES"] = "'%s'" % devicename manifestdata = "\n" + getManifestTemplate("swift_loopback.pp") appendManifestFile(manifestfile, manifestdata)
def check_ifcfg(host, device): """ Raises ScriptRuntimeError if given host does not have give device. """ server = utils.ScriptRunner(host) cmd = "ip addr show dev %s || ( echo Device %s does not exist && exit 1 )" server.append(cmd % (device, device)) server.execute()
def checkcindervg(): server = utils.ScriptRunner(controller.CONF['CONFIG_CINDER_HOST']) server.append('vgdisplay cinder-volumes') try: server.execute() except: print "The cinder server should contain a cinder-volumes volume group" raise
def copyPuppetModules(): server = utils.ScriptRunner() tar_opts = "" if platform.linux_distribution()[0] == "Fedora": tar_opts += "--exclude create_resources " for hostname in gethostlist(controller.CONF): server.append("cd %s/puppet"%basedefs.DIR_PROJECT_DIR) server.append("tar %s --dereference -czf - modules | ssh -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null root@%s tar -C %s -xzf -"%(tar_opts, hostname, basedefs.VAR_DIR)) server.append("cd %s"%basedefs.PUPPET_MANIFEST_DIR) server.append("tar %s --dereference -czf - ../manifests | ssh -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null root@%s tar -C %s -xzf -"%(tar_opts, hostname, basedefs.VAR_DIR)) server.execute()
def createmanifest(): client_host = controller.CONF['CONFIG_OSCLIENT_HOST'].strip() manifestfile = "%s_osclient.pp" % client_host manifestdata = getManifestTemplate("openstack_client.pp") appendManifestFile(manifestfile, manifestdata) server = utils.ScriptRunner(client_host) server.append('echo $HOME') rc, root_home = server.execute() msg = ("To use the command line tools you need to source the file " "%s/keystonerc_admin created on %s") controller.MESSAGES.append(msg % (root_home.strip(), client_host))
def waitforpuppet(currently_running): while currently_running: for hostname, log in currently_running: server = utils.ScriptRunner(hostname) server.append("test -e %s"%log) server.append("cat %s"%log) print "Testing if puppet apply is finished : %s"%os.path.split(log)[1], try: # Errors are expected here if the puppet run isn't finished so we suppress their logging server.execute(logerrors=False) currently_running.remove((hostname,log)) print "OK" except Exception, e: # the test raises an exception if the file doesn't exist yet time.sleep(3) print
def installKeys(): with open(controller.CONF["CONFIG_SSH_KEY"]) as fp: sshkeydata = fp.read().strip() for hostname in gethostlist(controller.CONF): if '/' in hostname: hostname = hostname.split('/')[0] server = utils.ScriptRunner(hostname) # TODO replace all that with ssh-copy-id server.append("mkdir -p ~/.ssh") server.append("chmod 500 ~/.ssh") server.append( "grep '%s' ~/.ssh/authorized_keys > /dev/null 2>&1 || echo %s >> ~/.ssh/authorized_keys" % (sshkeydata, sshkeydata)) server.append("chmod 400 ~/.ssh/authorized_keys") server.append("restorecon -r ~/.ssh") server.execute()
def bring_up_ifcfg(host, device): """ Brings given device up if it's down. Raises ScriptRuntimeError in case of failure. """ server = utils.ScriptRunner(host) server.append('ip link show up | grep "%s"' % device) try: server.execute() except ScriptRuntimeError: server.clear() cmd = 'ip link set dev %s up' server.append(cmd % device) try: server.execute() except ScriptRuntimeError: msg = ('Failed to bring up network interface %s on host %s.' ' Interface should be up so Openstack can work' ' properly.' % (device, host)) raise ScriptRuntimeError(msg)
def serverprep(): for hostname in gethostlist(controller.CONF): if '/' in hostname: hostname = hostname.split('/')[0] server = utils.ScriptRunner(hostname) # install epel if on rhel and epel is configured if controller.CONF["CONFIG_USE_EPEL"] == 'y': server.append( "grep 'Red Hat Enterprise Linux' /etc/redhat-release && ( rpm -q epel-release || \ rpm -Uvh http://download.fedoraproject.org/pub/epel/6/i386/epel-release-6-7.noarch.rpm ) || echo -n ''" ) server.append("mkdir -p %s" % basedefs.PUPPET_MANIFEST_DIR) # Add yum repositories if configured CONFIG_REPO = controller.CONF["CONFIG_REPO"].strip() if CONFIG_REPO: for i, url in enumerate(CONFIG_REPO.split(',')): reponame = 'packstack_%d' % i server.append( 'echo "[%s]\nname=%s\nbaseurl=%s\nenabled=1\ngpgcheck=0" > /etc/yum.repos.d/%s.repo' % (reponame, reponame, url, reponame)) # Subscribe to Red Hat Repositories if configured RH_USERNAME = controller.CONF["CONFIG_RH_USERNAME"].strip() if RH_USERNAME: server.append( "subscription-manager register --username=%s --password=%s --autosubscribe || true" % (RH_USERNAME, controller.CONF["CONFIG_RH_PASSWORD"].strip())) server.append( "subscription-manager list --consumed | grep -i openstack || " "subscription-manager subscribe --pool $(subscription-manager list --available | grep -e 'Red Hat OpenStack' -m 1 -A 2 | grep 'Pool Id' | awk '{print $3}')" ) server.append("yum clean all") server.append( "yum-config-manager --enable rhel-server-ost-6-folsom-rpms") server.execute()
def check_device(host, device): """ Raises ScriptRuntimeError if given device is not mounted on given host. """ server = utils.ScriptRunner(host) # the device MUST exist cmd = 'ls -l /dev/%s' server.append(cmd % device) # if it is not mounted then we can use it cmd = 'grep "/dev/%s " /proc/self/mounts || exit 0' server.append(cmd % device) # if it is mounted then the mount point has to be in /srv/node cmd = 'grep "/dev/%s /srv/node" /proc/self/mounts && exit 0' server.append(cmd % device) # if we got here without exiting then we can't use this device server.append('exit 1') server.execute() return False
def serverprep(): config = controller.CONF rh_username = config["CONFIG_RH_USER"].strip() rh_password = config["CONFIG_RH_PW"].strip() sat_registered = set() sat_url = config["CONFIG_SATELLITE_URL"].strip() if sat_url: sat_flags = map(lambda i: i.strip(), config["CONFIG_SATELLITE_FLAGS"].split(',')) sat_proxy_user = config.get("CONFIG_SATELLITE_PROXY_USER", '') sat_proxy_pass = config.get("CONFIG_SATELLITE_PROXY_PW", '') sat_args = { 'username': config["CONFIG_SATELLITE_USER"].strip(), 'password': config["CONFIG_SATELLITE_PW"].strip(), 'cacert': config["CONFIG_SATELLITE_CACERT"].strip(), 'activation_key': config["CONFIG_SATELLITE_AKEY"].strip(), 'profile_name': config["CONFIG_SATELLITE_PROFILE"].strip(), 'proxy_host': config["CONFIG_SATELLITE_PROXY"].strip(), 'proxy_user': sat_proxy_user.strip(), 'proxy_pass': sat_proxy_pass.strip(), 'flags': sat_flags } for hostname in gethostlist(config): if '/' in hostname: hostname = hostname.split('/')[0] # Subscribe to Red Hat Repositories if configured if rh_username: run_rhsm_reg(hostname, rh_username, rh_password, config["CONFIG_RH_BETA_REPO"] == 'y') # Subscribe to RHN Satellite if configured if sat_url and hostname not in sat_registered: run_rhn_reg(hostname, sat_url, **sat_args) sat_registered.add(hostname) server = utils.ScriptRunner(hostname) # install epel if on rhel (or popular derivative thereof) and epel is configured if config["CONFIG_USE_EPEL"] == 'y': server.append("REPOFILE=$(mktemp)") server.append("cat /etc/yum.conf > $REPOFILE") server.append( "echo -e '[packstack-epel]\nname=packstack-epel\n" "enabled=1\n" "mirrorlist=https://mirrors.fedoraproject.org/metalink?repo=epel-6&arch=$basearch'" ">> $REPOFILE") server.append( "grep -e 'Red Hat Enterprise Linux' -e 'CentOS' -e 'Scientific Linux' /etc/redhat-release && " "( rpm -q epel-release || yum install -y --nogpg -c $REPOFILE epel-release ) || echo -n ''" ) server.append("rm -rf $REPOFILE") # set highest priority of RHOS repository if EPEL is installed and # the repo rhel-server-ost-6-folsom-rpms exists in redhat.repo # If RHOS has been installed we can diable EPEL when installing openstack-utils yum_opts = "" if config["CONFIG_RH_USER"].strip(): yum_opts += "--disablerepo='epel*'" server.append( "rpm -q epel-release && " "yum install -y %s openstack-utils yum-plugin-priorities || true" % yum_opts) subs_cmd = ( 'rpm -q epel-release && ' 'grep %(repo)s %(repo_file)s && ' 'openstack-config --set %(repo_file)s %(repo)s priority %(priority)s || true' ) server.append( subs_cmd % { "repo_file": "/etc/yum.repos.d/redhat.repo", "repo": "rhel-server-ost-6-folsom-rpms", "priority": 1 }) # Create the packstack tmp directory if hostname not in controller.temp_map: # TO-DO: Move this to packstack.installer.setup_controller server.append("mkdir -p %s" % basedefs.PACKSTACK_VAR_DIR) # Separately create the tmp directory for this packstack run, this will fail if # the directory already exists host_dir = os.path.join(basedefs.PACKSTACK_VAR_DIR, uuid.uuid4().hex) server.append("mkdir --mode 0700 %s" % host_dir) server.append("mkdir %s/resources" % host_dir) server.append("mkdir --mode 0700 %s" % os.path.join(host_dir, 'modules')) controller.temp_map[hostname] = host_dir # Add yum repositories if configured CONFIG_REPO = config["CONFIG_REPO"].strip() if CONFIG_REPO: for i, url in enumerate(CONFIG_REPO.split(',')): reponame = 'packstack_%d' % i server.append( 'echo "[%s]\nname=%s\nbaseurl=%s\nenabled=1\npriority=1\ngpgcheck=0"' ' > /etc/yum.repos.d/%s.repo' % (reponame, reponame, url, reponame)) server.append("yum clean metadata") server.execute()
def run_rhn_reg(host, server_url, username=None, password=None, cacert=None, activation_key=None, profile_name=None, proxy_host=None, proxy_user=None, proxy_pass=None, flags=None): """ Registers given host to given RHN Satellite server. To successfully register either activation_key or username/password is required. """ logging.debug('Setting RHN Satellite server: %s.' % locals()) mask = [] cmd = ['/usr/sbin/rhnreg_ks'] server = utils.ScriptRunner(host) # check satellite server url server_url = server_url.rstrip('/').endswith('/XMLRPC') \ and server_url \ or '%s/XMLRPC' % server_url cmd.extend(['--serverUrl', server_url]) if activation_key: cmd.extend(['--activationkey', activation_key]) elif username: cmd.extend(['--username', username]) if password: cmd.extend(['--password', password]) mask.append(password) else: raise InstallError('Either RHN Satellite activation key or ' 'username/password must be provided.') if cacert: # use and if required download given certificate location = "/etc/sysconfig/rhn/%s" % os.path.basename(cacert) if not os.path.isfile(location): logging.debug('Downloading cacert from %s.' % server_url) wget_cmd = ('ls %(location)s &> /dev/null && echo -n "" || ' 'wget -nd --no-check-certificate --timeout=30 ' '--tries=3 -O "%(location)s" "%(cacert)s"' % locals()) server.append(wget_cmd) cmd.extend(['--sslCACert', location]) if profile_name: cmd.extend(['--profilename', profile_name]) if proxy_host: cmd.extend(['--proxy', proxy_host]) if proxy_user: cmd.extend(['--proxyUser', proxy_user]) if proxy_pass: cmd.extend(['--proxyPassword', proxy_pass]) mask.append(proxy_pass) flags = flags or [] flags.append('force') for i in flags: cmd.append('--%s' % i) server.append(' '.join(cmd)) server.append('yum clean metadata') server.execute(maskList=mask)
def installpuppet(): for hostname in gethostlist(controller.CONF): server = utils.ScriptRunner(hostname) server.append("rpm -q puppet || yum install -y puppet") server.execute()
def install_cinder_deps(): server = utils.ScriptRunner(controller.CONF['CONFIG_CINDER_HOST']) server.append("rpm -q %(package)s || yum install -y %(package)s" % {'package': "lvm2"}) server.execute()
def check_cinder_vg(): cinders_volume = 'cinder-volumes' # Do we have a cinder-volumes vg? have_cinders_volume = False server = utils.ScriptRunner(controller.CONF['CONFIG_CINDER_HOST']) server.append('vgdisplay %s' % cinders_volume) try: server.execute() have_cinders_volume = True except ScriptRuntimeError: pass if controller.CONF["CONFIG_CINDER_VOLUMES_CREATE"] != "y": if not have_cinders_volume: raise exceptions.MissingRequirements( "The cinder server should" " contain a cinder-volumes volume group") else: if have_cinders_volume: controller.MESSAGES.append( output_messages.INFO_CINDER_VOLUMES_EXISTS) return server = utils.ScriptRunner(controller.CONF['CONFIG_CINDER_HOST']) logging.info("A new cinder volumes group will be created") err = "Cinder's volume group '%s' could not be created" % \ cinders_volume cinders_volume_path = '/var/lib/cinder' server.append('mkdir -p %s' % cinders_volume_path) logging.debug("Volume's path: %s" % cinders_volume_path) cinders_volume_path = os.path.join(cinders_volume_path, cinders_volume) server.append('dd if=/dev/zero of=%s bs=1 count=0 seek=%s' % \ (cinders_volume_path, controller.CONF['CONFIG_CINDER_VOLUMES_SIZE'])) server.append('LOFI=$(losetup --show -f %s)' % cinders_volume_path) server.append('pvcreate $LOFI') server.append('vgcreate %s $LOFI' % cinders_volume) # Add the loop device on boot server.append('grep %s /etc/rc.d/rc.local || ' 'echo losetup $LOFI %s >> /etc/rc.d/rc.local' % (cinders_volume, cinders_volume_path)) server.append('grep "#!" /etc/rc.d/rc.local || ' 'sed -i \'1i#!/bin/sh\' /etc/rc.d/rc.local') server.append('chmod +x /etc/rc.d/rc.local') # Let's make sure it exists server.append('vgdisplay %s' % cinders_volume) try: server.execute() except ScriptRuntimeError: # Release loop device if cinder's volume creation # fails. try: logging.debug("Release loop device, volume creation failed") server = utils.ScriptRunner( controller.CONF['CONFIG_CINDER_HOST']) server.append('losetup -d $(losetup -j %s | cut -d : -f 1)' % cinders_volume_path) server.execute() except: pass raise exceptions.MissingRequirements(err)
def runCleanup(): localserver = utils.ScriptRunner() localserver.append("rm -rf %s/*pp"%basedefs.PUPPET_MANIFEST_DIR) localserver.execute()