def handle(_name, cfg, _cloud, _log, _args):
    # If there isn't a salt key in the configuration don't do anything
    if 'salt_minion' not in cfg:
        return
    salt_cfg = cfg['salt_minion']
    # Start by installing the salt package ...
    cc.install_packages(("salt", ))
    config_dir = '/etc/salt'
    if not os.path.isdir(config_dir):
        os.makedirs(config_dir)
    # ... and then update the salt configuration
    if 'conf' in salt_cfg:
        # Add all sections from the conf object to /etc/salt/minion
        minion_config = os.path.join(config_dir, 'minion')
        yaml.dump(salt_cfg['conf'],
                  file(minion_config, 'w'),
                  default_flow_style=False)
    # ... copy the key pair if specified
    if 'public_key' in salt_cfg and 'private_key' in salt_cfg:
        pki_dir = '/etc/salt/pki'
        cumask = os.umask(077)
        if not os.path.isdir(pki_dir):
            os.makedirs(pki_dir)
        pub_name = os.path.join(pki_dir, 'minion.pub')
        pem_name = os.path.join(pki_dir, 'minion.pem')
        with open(pub_name, 'w') as f:
            f.write(salt_cfg['public_key'])
        with open(pem_name, 'w') as f:
            f.write(salt_cfg['private_key'])
        os.umask(cumask)

    # Start salt-minion
    subprocess.check_call(['service', 'salt-minion', 'start'])
def handle(_name, cfg, _cloud, _log, _args):
    # If there isn't a salt key in the configuration don't do anything
    if 'salt_minion' not in cfg:
        return
    salt_cfg = cfg['salt_minion']
    # Start by installing the salt package ...
    cc.install_packages(("salt",))
    config_dir = '/etc/salt'
    if not os.path.isdir(config_dir):
        os.makedirs(config_dir)
    # ... and then update the salt configuration
    if 'conf' in salt_cfg:
        # Add all sections from the conf object to /etc/salt/minion
        minion_config = os.path.join(config_dir, 'minion')
        yaml.dump(salt_cfg['conf'],
                  file(minion_config, 'w'),
                  default_flow_style=False)
    # ... copy the key pair if specified
    if 'public_key' in salt_cfg and 'private_key' in salt_cfg:
        pki_dir = '/etc/salt/pki'
        cumask = os.umask(077)
        if not os.path.isdir(pki_dir):
            os.makedirs(pki_dir)
        pub_name = os.path.join(pki_dir, 'minion.pub')
        pem_name = os.path.join(pki_dir, 'minion.pem')
        with open(pub_name, 'w') as f:
            f.write(salt_cfg['public_key'])
        with open(pem_name, 'w') as f:
            f.write(salt_cfg['private_key'])
        os.umask(cumask)

    # Start salt-minion
    subprocess.check_call(['service', 'salt-minion', 'start'])
def handle(_name, cfg, cloud, log, _args):
    # If there isn't a chef key in the configuration don't do anything
    if 'chef' not in cfg:
        return
    chef_cfg = cfg['chef']

    # ensure the chef directories we use exist
    mkdirs(['/etc/chef', '/var/log/chef', '/var/lib/chef',
            '/var/cache/chef', '/var/backups/chef', '/var/run/chef'])

    # set the validation key based on the presence of either 'validation_key'
    # or 'validation_cert'. In the case where both exist, 'validation_key'
    # takes precedence
    for key in ('validation_key', 'validation_cert'):
        if key in chef_cfg and chef_cfg[key]:
            with open('/etc/chef/validation.pem', 'w') as validation_key_fh:
                validation_key_fh.write(chef_cfg[key])
            break

    # create the chef config from template
    util.render_to_file('chef_client.rb', '/etc/chef/client.rb',
        {'server_url': chef_cfg['server_url'],
         'node_name': util.get_cfg_option_str(chef_cfg, 'node_name',
                                          cloud.datasource.get_instance_id()),
         'environment': util.get_cfg_option_str(chef_cfg, 'environment',
                                                '_default'),
         'validation_name': chef_cfg['validation_name']})

    # set the firstboot json
    with open('/etc/chef/firstboot.json', 'w') as firstboot_json_fh:
        initial_json = {}
        if 'run_list' in chef_cfg:
            initial_json['run_list'] = chef_cfg['run_list']
        if 'initial_attributes' in chef_cfg:
            initial_attributes = chef_cfg['initial_attributes']
            for k in initial_attributes.keys():
                initial_json[k] = initial_attributes[k]
        firstboot_json_fh.write(json.dumps(initial_json))

    # If chef is not installed, we install chef based on 'install_type'
    if not os.path.isfile('/usr/bin/chef-client'):
        install_type = util.get_cfg_option_str(chef_cfg, 'install_type',
                                               'packages')
        if install_type == "gems":
            # this will install and run the chef-client from gems
            chef_version = util.get_cfg_option_str(chef_cfg, 'version', None)
            ruby_version = util.get_cfg_option_str(chef_cfg, 'ruby_version',
                                                   ruby_version_default)
            install_chef_from_gems(ruby_version, chef_version)
            # and finally, run chef-client
            log.debug('running chef-client')
            subprocess.check_call(['/usr/bin/chef-client', '-d', '-i', '1800',
                                   '-s', '20'])
        else:
            # this will install and run the chef-client from packages
            cc.install_packages(('chef',))
Example #4
0
def handle(_name, cfg, _cloud, _log, _args):
    # If there isn't a mcollective key in the configuration don't do anything
    if 'mcollective' not in cfg:
        return
    mcollective_cfg = cfg['mcollective']
    # Start by installing the mcollective package ...
    cc.install_packages(("mcollective", ))

    # ... and then update the mcollective configuration
    if 'conf' in mcollective_cfg:
        # Create object for reading server.cfg values
        mcollective_config = ConfigParser.ConfigParser()
        # Read server.cfg values from original file in order to be able to mix
        # the rest up
        mcollective_config.readfp(
            FakeSecHead(open('/etc/mcollective/'
                             'server.cfg')))
        for cfg_name, cfg in mcollective_cfg['conf'].iteritems():
            if cfg_name == 'public-cert':
                util.write_file(pubcert_file, cfg, mode=0644)
                mcollective_config.set(cfg_name, 'plugin.ssl_server_public',
                                       pubcert_file)
                mcollective_config.set(cfg_name, 'securityprovider', 'ssl')
            elif cfg_name == 'private-cert':
                util.write_file(pricert_file, cfg, mode=0600)
                mcollective_config.set(cfg_name, 'plugin.ssl_server_private',
                                       pricert_file)
                mcollective_config.set(cfg_name, 'securityprovider', 'ssl')
            else:
                # Iterate throug the config items, we'll use ConfigParser.set
                # to overwrite or create new items as needed
                for o, v in cfg.iteritems():
                    mcollective_config.set(cfg_name, o, v)
        # We got all our config as wanted we'll rename
        # the previous server.cfg and create our new one
        os.rename('/etc/mcollective/server.cfg',
                  '/etc/mcollective/server.cfg.old')
        outputfile = StringIO.StringIO()
        mcollective_config.write(outputfile)
        # Now we got the whole file, write to disk except first line
        # Note below, that we've just used ConfigParser because it generally
        # works.  Below, we remove the initial 'nullsection' header
        # and then change 'key = value' to 'key: value'.  The global
        # search and replace of '=' with ':' could be problematic though.
        # this most likely needs fixing.
        util.write_file('/etc/mcollective/server.cfg',
                        outputfile.getvalue().replace('[nullsection]\n',
                                                      '').replace(' =', ':'),
                        mode=0644)

    # Start mcollective
    subprocess.check_call(['service', 'mcollective', 'start'])
def handle(_name, cfg, _cloud, _log, _args):
    # If there isn't a mcollective key in the configuration don't do anything
    if 'mcollective' not in cfg:
        return
    mcollective_cfg = cfg['mcollective']
    # Start by installing the mcollective package ...
    cc.install_packages(("mcollective",))

    # ... and then update the mcollective configuration
    if 'conf' in mcollective_cfg:
        # Create object for reading server.cfg values
        mcollective_config = ConfigParser.ConfigParser()
        # Read server.cfg values from original file in order to be able to mix
        # the rest up
        mcollective_config.readfp(FakeSecHead(open('/etc/mcollective/'
                                                   'server.cfg')))
        for cfg_name, cfg in mcollective_cfg['conf'].iteritems():
            if cfg_name == 'public-cert':
                util.write_file(pubcert_file, cfg, mode=0644)
                mcollective_config.set(cfg_name,
                    'plugin.ssl_server_public', pubcert_file)
                mcollective_config.set(cfg_name, 'securityprovider', 'ssl')
            elif cfg_name == 'private-cert':
                util.write_file(pricert_file, cfg, mode=0600)
                mcollective_config.set(cfg_name,
                    'plugin.ssl_server_private', pricert_file)
                mcollective_config.set(cfg_name, 'securityprovider', 'ssl')
            else:
                # Iterate throug the config items, we'll use ConfigParser.set
                # to overwrite or create new items as needed
                for o, v in cfg.iteritems():
                    mcollective_config.set(cfg_name, o, v)
        # We got all our config as wanted we'll rename
        # the previous server.cfg and create our new one
        os.rename('/etc/mcollective/server.cfg',
                  '/etc/mcollective/server.cfg.old')
        outputfile = StringIO.StringIO()
        mcollective_config.write(outputfile)
        # Now we got the whole file, write to disk except first line
        # Note below, that we've just used ConfigParser because it generally
        # works.  Below, we remove the initial 'nullsection' header
        # and then change 'key = value' to 'key: value'.  The global
        # search and replace of '=' with ':' could be problematic though.
        # this most likely needs fixing.
        util.write_file('/etc/mcollective/server.cfg',
            outputfile.getvalue().replace('[nullsection]\n', '').replace(' =',
                                                                         ':'),
            mode=0644)

    # Start mcollective
    subprocess.check_call(['service', 'mcollective', 'start'])
def install_chef_from_gems(ruby_version, chef_version=None):
    cc.install_packages(get_ruby_packages(ruby_version))
    if not os.path.exists('/usr/bin/gem'):
        os.symlink('/usr/bin/gem%s' % ruby_version, '/usr/bin/gem')
    if not os.path.exists('/usr/bin/ruby'):
        os.symlink('/usr/bin/ruby%s' % ruby_version, '/usr/bin/ruby')
    if chef_version:
        subprocess.check_call(['/usr/bin/gem', 'install', 'chef',
                               '-v %s' % chef_version, '--no-ri',
                               '--no-rdoc', '--bindir', '/usr/bin', '-q'])
    else:
        subprocess.check_call(['/usr/bin/gem', 'install', 'chef',
                               '--no-ri', '--no-rdoc', '--bindir',
                               '/usr/bin', '-q'])
Example #7
0
def install_chef_from_gems(ruby_version, chef_version=None):
    cc.install_packages(get_ruby_packages(ruby_version))
    if not os.path.exists('/usr/bin/gem'):
        os.symlink('/usr/bin/gem%s' % ruby_version, '/usr/bin/gem')
    if not os.path.exists('/usr/bin/ruby'):
        os.symlink('/usr/bin/ruby%s' % ruby_version, '/usr/bin/ruby')
    if chef_version:
        subprocess.check_call([
            '/usr/bin/gem', 'install', 'chef',
            '-v %s' % chef_version, '--no-ri', '--no-rdoc', '--bindir',
            '/usr/bin', '-q'
        ])
    else:
        subprocess.check_call([
            '/usr/bin/gem', 'install', 'chef', '--no-ri', '--no-rdoc',
            '--bindir', '/usr/bin', '-q'
        ])
Example #8
0
def handle(_name, cfg, cloud, log, _args):
    # If there isn't a puppet key in the configuration don't do anything
    if 'puppet' not in cfg:
        return
    puppet_cfg = cfg['puppet']
    # Start by installing the puppet package ...
    cc.install_packages(("puppet", ))

    # ... and then update the puppet configuration
    if 'conf' in puppet_cfg:
        # Add all sections from the conf object to puppet.conf
        puppet_conf_fh = open('/etc/puppet/puppet.conf', 'r')
        # Create object for reading puppet.conf values
        puppet_config = ConfigParser.ConfigParser()
        # Read puppet.conf values from original file in order to be able to
        # mix the rest up
        puppet_config.readfp(
            StringIO.StringIO(''.join(i.lstrip()
                                      for i in puppet_conf_fh.readlines())))
        # Close original file, no longer needed
        puppet_conf_fh.close()
        for cfg_name, cfg in puppet_cfg['conf'].iteritems():
            # ca_cert configuration is a special case
            # Dump the puppetmaster ca certificate in the correct place
            if cfg_name == 'ca_cert':
                # Puppet ssl sub-directory isn't created yet
                # Create it with the proper permissions and ownership
                os.makedirs('/var/lib/puppet/ssl')
                os.chmod('/var/lib/puppet/ssl', 0771)
                os.chown('/var/lib/puppet/ssl',
                         pwd.getpwnam('puppet').pw_uid, 0)
                os.makedirs('/var/lib/puppet/ssl/certs/')
                os.chown('/var/lib/puppet/ssl/certs/',
                         pwd.getpwnam('puppet').pw_uid, 0)
                ca_fh = open('/var/lib/puppet/ssl/certs/ca.pem', 'w')
                ca_fh.write(cfg)
                ca_fh.close()
                os.chown('/var/lib/puppet/ssl/certs/ca.pem',
                         pwd.getpwnam('puppet').pw_uid, 0)
                util.restorecon_if_possible('/var/lib/puppet', recursive=True)
            else:
                #puppet_conf_fh.write("\n[%s]\n" % (cfg_name))
                # If puppet.conf already has this section we don't want to
                # write it again
                if puppet_config.has_section(cfg_name) == False:
                    puppet_config.add_section(cfg_name)
                # Iterate throug the config items, we'll use ConfigParser.set
                # to overwrite or create new items as needed
                for o, v in cfg.iteritems():
                    if o == 'certname':
                        # Expand %f as the fqdn
                        v = v.replace("%f", socket.getfqdn())
                        # Expand %i as the instance id
                        v = v.replace("%i", cloud.datasource.get_instance_id())
                        # certname needs to be downcase
                        v = v.lower()
                    puppet_config.set(cfg_name, o, v)
                    #puppet_conf_fh.write("%s=%s\n" % (o, v))
            # We got all our config as wanted we'll rename
            # the previous puppet.conf and create our new one
            os.rename('/etc/puppet/puppet.conf', '/etc/puppet/puppet.conf.old')
            with open('/etc/puppet/puppet.conf', 'wb') as configfile:
                puppet_config.write(configfile)
            util.restorecon_if_possible('/etc/puppet/puppet.conf')
    # Set puppet to automatically start
    if os.path.exists('/etc/default/puppet'):
        subprocess.check_call([
            'sed', '-i', '-e', 's/^START=.*/START=yes/', '/etc/default/puppet'
        ])
    elif os.path.exists('/bin/systemctl'):
        subprocess.check_call(['/bin/systemctl', 'enable', 'puppet.service'])
    elif os.path.exists('/sbin/chkconfig'):
        subprocess.check_call(['/sbin/chkconfig', 'puppet', 'on'])
    else:
        log.warn("Do not know how to enable puppet service on this system")
    # Start puppetd
    subprocess.check_call(['service', 'puppet', 'start'])
Example #9
0
def handle(_name, cfg, cloud, log, _args):
	print "Searching for cvmfs reference..."
	# If there isn't a cvmfs reference in the configuration don't do anything
	if 'cvmfs' not in cfg:
		print "cvmfs configuration was not found"
		return

	print "Ready to setup cvmfs."
	cvmfs_cfg = cfg['cvmfs']
	if 'cvmfs' in cfg:
		print "Configuring cvmfs...(this may take a while)"
		# Let's retrieve the current cvmfs release
		ReleaseAux = subprocess.Popen(["rpm", "-q", "--queryformat", "%{version}", "sl-release"], stdout=subprocess.PIPE)
		Release, ReleaseErr = ReleaseAux.communicate()
		# print Release		# If you want to check the release number
		
		ReleaseMajor = Release[0]

		arch = platform.machine()	# Platform info
		
		# cvmfs package url
		cvmfs_rpm_url = 'http://cvmrepo.web.cern.ch/cvmrepo/yum/cvmfs/EL/'+Release+'/'+arch+'/cvmfs-release-2-2.el'+ReleaseMajor+'.noarch.rpm'
		# Downloading cvmfs .rpm file to /home path
		urllib.urlretrieve(cvmfs_rpm_url, '/home/cvmfs.rpm')
		if subprocess.check_call(["rpm", "-i", "/home/cvmfs.rpm"]):	# If it returns 0 then it is fine
			print ".rpm installation failed"
			return
		else:
			print ".rpm installation successful."

		# Install cvmfs packages
		cc.install_packages(("cvmfs-keys","cvmfs","cvmfs-init-scripts",))   # TODO: create a failure check here. If it fails, do a yum -y clean all
		# Base setup
		subprocess.call(["cvmfs_config", "setup"])
		# Start autofs and make it starting automatically after reboot
		subprocess.call(['service','autofs','start'])
		subprocess.call(['chkconfig','autofs','on'])
		subprocess.call(['cvmfs_config','chksetup'])

		LocalFile = '/etc/cvmfs/default.local'
		DomainFile = '/etc/cvmfs/domain.d/cern.ch.local'
		quota_aux_var = 1   # Aux varibale to check whether to write default quota-limit value or not
		# To configure cvmfs...

		if 'local' in cvmfs_cfg:
			local_args = cvmfs_cfg['local']
			flocal = open(LocalFile, 'w')
			for prop_name, value in local_args.iteritems():
				if prop_name == 'repositories':
					flocal.write('CVMFS_REPOSITORIES='+value+'\n')
				if prop_name == 'cache-base':
					flocal.write('CVMFS_CACHE_BASE='+value+'\n')
				if prop_name == 'default-domain':
					flocal.write('CVMFS_DEFAULT_DOMAIN='+value+'\n')
				if prop_name == 'http-proxy':
					flocal.write('CVMFS_HTTP_PROXY='+value+'\n')
				if prop_name == 'quota-limit':
					flocal.write('CVMFS_QUOTA_LIMIT='+str(value)+'\n')
					quota_aux_var = 0
				if prop_name == 'cms-local-site':
					flocal.write('\nCMS_LOCAL_SITE='+str(value)+'\n')

			# Write some default configurations
			if quota_aux_var:
				flocal.write('CVMFS_QUOTA_LIMIT=8000\nCVMFS_TIMEOUT=5\nCVMFS_TIMEOUT_DIRECT=10\nCVMFS_NFILES=65535')
			else:
				flocal.write('CVMFS_TIMEOUT=5\nCVMFS_TIMEOUT_DIRECT=10\nCVMFS_NFILES=65535')

			# Close the file
			flocal.close()
		
		if 'domain' in cvmfs_cfg:
			domain_args = cvmfs_cfg['domain']
			if 'server' in domain_args:
				fdomain = open(DomainFile, 'w')
				fdomain.write('CVMFS_SERVER_URL='+domain_args['server']+'\n')
				fdomain.close()
		
		print "START cvmfs"
		# Start cvmfs
		subprocess.check_call(['service', 'cvmfs', 'start'])
		subprocess.check_call(['service', 'cvmfs', 'probe'])    # To mount the repositories
def main():
    # expect to be called with
    #   name [ freq [ args ]
    #   run the cloud-config job 'name' at with given args
    # or
    #   read cloud config jobs from config (builtin -> system)
    #   and run all in order

    util.close_stdin()

    modename = "config"

    if len(sys.argv) < 2:
        Usage(sys.stderr)
        sys.exit(1)
    if sys.argv[1] == "all":
        name = "all"
        if len(sys.argv) > 2:
            modename = sys.argv[2]
    else:
        freq = None
        run_args = []
        name = sys.argv[1]
        if len(sys.argv) > 2:
            freq = sys.argv[2]
            if freq == "None":
                freq = None
        if len(sys.argv) > 3:
            run_args = sys.argv[3:]

    cfg_path = cloudinit.get_ipath_cur("cloud_config")
    cfg_env_name = cloudinit.cfg_env_name
    if cfg_env_name in os.environ:
        cfg_path = os.environ[cfg_env_name]

    cloud = cloudinit.CloudInit(ds_deps=[])  # ds_deps=[], get only cached
    try:
        cloud.get_data_source()
    except cloudinit.DataSourceNotFoundException as e:
        # there was no datasource found, theres nothing to do
        sys.exit(0)

    cc = CC.CloudConfig(cfg_path, cloud)

    try:
        (outfmt, errfmt) = CC.get_output_cfg(cc.cfg, modename)
        CC.redirect_output(outfmt, errfmt)
    except Exception as e:
        err("Failed to get and set output config: %s\n" % e)

    cloudinit.logging_set_from_cfg(cc.cfg)
    log = logging.getLogger()
    log.info("cloud-init-cfg %s" % sys.argv[1:])

    module_list = []
    if name == "all":
        modlist_cfg_name = "cloud_%s_modules" % modename
        module_list = CC.read_cc_modules(cc.cfg, modlist_cfg_name)
        if not len(module_list):
            err("no modules to run in cloud_config [%s]" % modename, log)
            sys.exit(0)
    else:
        module_list.append([name, freq] + run_args)

    failures = CC.run_cc_modules(cc, module_list, log)
    if len(failures):
        err("errors running cloud_config [%s]: %s" % (modename, failures), log)
    sys.exit(len(failures))
def handle(_name, cfg, cloud, log, _args):
    update = util.get_cfg_option_bool(cfg, 'apt_update', False)
    upgrade = util.get_cfg_option_bool(cfg, 'apt_upgrade', False)

    release = get_release()

    mirror = find_apt_mirror(cloud, cfg)

    log.debug("selected mirror at: %s" % mirror)

    if not util.get_cfg_option_bool(cfg, \
        'apt_preserve_sources_list', False):
        generate_sources_list(release, mirror)
        old_mir = util.get_cfg_option_str(cfg, 'apt_old_mirror', \
            "archive.ubuntu.com/ubuntu")
        rename_apt_lists(old_mir, mirror)

    # set up proxy
    proxy = cfg.get("apt_proxy", None)
    proxy_filename = "/etc/apt/apt.conf.d/95cloud-init-proxy"
    if proxy:
        try:
            contents = "Acquire::HTTP::Proxy \"%s\";\n"
            with open(proxy_filename, "w") as fp:
                fp.write(contents % proxy)
        except Exception as e:
            log.warn("Failed to write proxy to %s" % proxy_filename)
    elif os.path.isfile(proxy_filename):
        os.unlink(proxy_filename)

    # process 'apt_sources'
    if 'apt_sources' in cfg:
        errors = add_sources(cfg['apt_sources'],
                             {'MIRROR': mirror, 'RELEASE': release})
        for e in errors:
            log.warn("Source Error: %s\n" % ':'.join(e))

    dconf_sel = util.get_cfg_option_str(cfg, 'debconf_selections', False)
    if dconf_sel:
        log.debug("setting debconf selections per cloud config")
        try:
            util.subp(('debconf-set-selections', '-'), dconf_sel)
        except:
            log.error("Failed to run debconf-set-selections")
            log.debug(traceback.format_exc())

    pkglist = util.get_cfg_option_list_or_str(cfg, 'packages', [])

    errors = []
    if update or len(pkglist) or upgrade:
        try:
            cc.update_package_sources()
        except subprocess.CalledProcessError as e:
            log.warn("apt-get update failed")
            log.debug(traceback.format_exc())
            errors.append(e)

    if upgrade:
        try:
            cc.apt_get("upgrade")
        except subprocess.CalledProcessError as e:
            log.warn("apt upgrade failed")
            log.debug(traceback.format_exc())
            errors.append(e)

    if len(pkglist):
        try:
            cc.install_packages(pkglist)
        except subprocess.CalledProcessError as e:
            log.warn("Failed to install packages: %s " % pkglist)
            log.debug(traceback.format_exc())
            errors.append(e)

    if len(errors):
        raise errors[0]

    return(True)
Example #12
0
def handle(_name, cfg, cloud, log, _args):
   if 'condor' in cfg:
    	condor_cc_cfg = cfg['condor']    
    	if 'master' in condor_cc_cfg and 'workernode' in condor_cc_cfg:
        	print 'You can not set condor master and condor workernode in the same machine.\n'
        	print 'Exiting condor configuration...'
        	return

    	print "Checking for previous condor versions."
 
    	if subprocess.call(['service','condor','stop']):
		# If it returns 1 it means that the command failed and that condor isn't recognized
		print "No previous condor version was found! Moving on"
		OldVersion = False
	else:
		OldVersion = True
		print "Condor is already installed. Applying your configuration parameters and deleting the old ones."
		OldConfigFile_aux = subprocess.Popen(['find','/','-name','condor_config.local'], stdout=subprocess.PIPE)
		OldConfigFile, olderr = OldConfigFile_aux.communicate()

		OldConfigFile = re.sub('\n','',OldConfigFile)

		# Comment the above lines and uncomment the following ones if you want a clean condor installation even if there is already a condor installed
		# subprocess.check_call(['pkill','-f','condor'])
            	# subprocess.check_call(['rpm','-e','$(rpm -qa | grep condor)'])
            	# subprocess.check_call(['rm','/etc/yum.repos.d/condor-stable-rhel5.repo'])        

	# Condor configuration file
        ConfigFile = '/root/condor_config.local'

	# Default CONDOR_HOST
        Host = subprocess.Popen(["hostname", "-f"], stdout=subprocess.PIPE)
        Hostname, ReleaseErr = Host.communicate()
	Hostname = re.sub('\n','',Hostname)      

	if not OldVersion:
		CondorRepo = "http://www.cs.wisc.edu/condor/yum/repo.d/condor-stable-rhel6.repo"
		urllib.urlretrieve(CondorRepo,'/etc/yum.repos.d/condor.repo')        	

		# Defining the most suitable condor version for the machine
		arch = str(platform.machine())
		if arch == 'x86_64': arch = '.'+str(arch)
		else:
			arch = '.i'
		version0 = subprocess.Popen(['yum','info','condor%s' % arch], stdout=subprocess.PIPE)
		version1 = subprocess.Popen(['grep','Version   '], stdin=version0.stdout, stdout=subprocess.PIPE)
		version0.stdout.close()
		yum_version, verror = version1.communicate()
		yum_version = re.sub('\n','', yum_version)
		yum_version = re.sub(' ','',yum_version)
		
		yum_condor_version = yum_version.split(':')
		
		DownloadManually = False
		# If CondorVersion is empty that it means that condor is not available on the yum repository or that some error has occured
		if not yum_condor_version:
			# In this case let's define and download manually the condor we want to install 
			CondorVersion = "condor-7.8.7"	# Stable version
			DownloadManually = True
		else:
			CondorVersion = 'condor-'+str(yum_condor_version[1])

 	        # Sourcing from /etc/profile.d/condor.sh
            	path_aux = subprocess.Popen(['echo ${PATH}'], stdout=subprocess.PIPE, shell=True)
            	path_aux2 = subprocess.Popen(['tr','\n',':'], stdin=path_aux.stdout, stdout=subprocess.PIPE)
            	path_aux.stdout.close()
            	Path, perr = path_aux2.communicate()

            	f3 = open('/etc/profile.d/condor.sh','a') # Create if the file doesn't exist
            	f3.write("export PATH="+str(Path)+"/opt/"+CondorVersion+"/usr/bin:/opt/"+str(CondorVersion)+"/usr/sbin:/sbin\nexport CONDOR_CONFIG=/opt/"+str(CondorVersion)+"/etc/condor/condor_config\n")
            	f3.close()

            	os.environ['PATH'] = os.environ['PATH']+":/opt/"+CondorVersion+"/usr/bin:/opt/"+str(CondorVersion)+"/usr/sbin:/sbin"
            	os.environ['CONDOR_CONFIG'] = "/opt/"+str(CondorVersion)+"/etc/condor/condor_config"
            	# This sourcing is done here, instead of being done in the end, to avoid situation where the user logs in into the machine before the configuration is finished.

        	print "Installing Condor dependencies..."
        	cc.install_packages(("yum-downloadonly","libvirt","perl-XML-Simple","openssl098e","compat-expat1","compat-openldap","perl-DateManip","perl-Time-HiRes","policycoreutils-python",))
		
		if arch == '.i': arch == ''	# To avoid confusions between i386 and i686, which are 32 bits. So let's just 'yum install condor' in case the machine is 32 bits

		if not DownloadManually:
			subprocess.call(["yum -y install condor%s --downloadonly --downloaddir=/tmp" % arch] , shell=True)		

	        	r1 = subprocess.Popen(["ls -1 /tmp/condor-*.rpm"], stdout=subprocess.PIPE, shell=True)
        		r2 = subprocess.Popen(["head", "-1"],stdin=r1.stdout, stdout=subprocess.PIPE)
        		r1.stdout.close()
        		CondorRPM, rerror = r2.communicate()
			CondorRPM = re.sub('\n','',CondorRPM)	
		else:			
			arch = str(platform.machine())	# All of these arch redefinements are due to the fact that on the yum repo, the condor 32 bits is named condor.i386 and on the official website it is condor.i686
			# If condor is not available in the yum repository you can uncomment the following lines to donwload the .rpm directly from the source.
			try:
				urllib.urlretrieve('http://research.cs.wisc.edu/htcondor/yum/stable/rhel6/condor-7.8.7-86173.rhel6.3.'+arch+'.rpm', '/root/condor.rpm') 	# Version 7.8.7
			except:
				# If it failed it probably means that the arch is not right or it is not compatible with the available condor versions
				urllib.urlretrieve('http://research.cs.wisc.edu/htcondor/yum/stable/rhel6/condor-7.8.7-86173.rhel6.3.i686.rpm', '/root/condor.rpm')
				CondorRPM = '/root/condor.rpm'

        	print "Condor installation:"
        	subprocess.check_call(["rpm -ivh %s --relocate /usr=/opt/%s/usr --relocate /var=/opt/%s/var --relocate /etc=/opt/%s/etc" % (CondorRPM, CondorVersion, CondorVersion, CondorVersion)] , shell=True) 	# Relocating...
       		# subprocess.check_call(["rpm -ivh %s" % CondorRPM], shell=True) 	# Uncomment this line and comment the above one if you do not want to relocate condor installation
	
	# Write new configuration file
        f = open(ConfigFile,'w')        
	
	# Default variables    
        DaemonList = 'MASTER, STARTD'
        Highport = 24500
        Lowport = 20000
        CollectorHostPORT = 20001
        Start = 'True'
        Suspend = 'False'
        Preempt = 'False'
        Kill = 'False'
        QueueSuperUsers = 'root, condor'        
        AllowWrite = '*'
        StarterAllowRunasOwner = 'False'
        AllowDaemon = '*'
        HostAllowRead = '*'
        HostAllowWrite = '*'
        SecDaemonAuthentication = 'OPTIONAL'

        # PARAMETERS LIST
        if 'workernode' in condor_cc_cfg:
            condor_cfg = condor_cc_cfg['workernode']
            if 'condor-host' in condor_cfg:
                Hostname = condor_cfg['condor-host']
            f.write("CONDOR_HOST = "+str(Hostname)+'\n')

            f.write("COLLECTOR_NAME = Personal Condor at "+Hostname+'\n')

            CondorAdmin = Hostname
            UIDDomain = Hostname        

            if 'collector-host-port' in condor_cfg:
                CollectorHostPORT = condor_cfg['collector-host-port']
            f.write("COLLECTOR_HOST = "+str(Hostname)+':'+str(CollectorHostPORT)+'\n')

            if 'daemon-list' in condor_cfg:
                DaemonList = condor_cfg['daemon-list']
            f.write("DAEMON_LIST = "+DaemonList+'\n')

            if 'release-dir' in condor_cfg:
                f.write("RELEASE_DIR = "+condor_cfg['release-directory']+'\n')
        
            if 'local-dir' in condor_cfg:
                f.write("LOCAL_DIR = "+condor_cfg['local-dir']+'\n')
    
            if 'condor-admin' in condor_cfg:
                CondorAdmin = condor_cfg['condor-admin']
            f.write("CONDOR_ADMIN = "+str(CondorAdmin)+'\n')

            if 'queue-super-users' in condor_cfg:
                QueueSuperUsers = condor_cfg['queue-super-users']
            f.write("QUEUE_SUPER_USERS = "+str(QueueSuperUsers)+'\n')

            if 'highport' in condor_cfg:
                Highport = condor_cfg['highport']
            f.write("HIGHPORT = "+str(Highport)+'\n')

            if 'lowport' in condor_cfg:
                Lowport = condor_cfg['lowport']
            f.write("LOWPORT = "+str(Lowport)+'\n')

            if 'uid-domain' in condor_cfg:
                UIDDomain = condor_cfg['uid-domain']
            f.write("UID_DOMAIN = "+str(UIDDomain)+'\n')

            if 'allow-write' in condor_cfg:
                AllowWrite = condor_cfg['allow-write']    
            f.write("ALLOW_WRITE = "+str(AllowWrite)+'\n')

            if 'dedicated-execute-account-regexp' in condor_cfg:
                f.write("DEDICATED_EXECUTE_ACCOUNT_REGEXP = "+str(condor_cfg['dedicated-execute-account-regexp'])+'\n')

            if 'allow-daemon' in condor_cfg:
                AllowDaemon = condor_cfg['allow-daemon']
            f.write("ALLOW_DAEMON = "+str(AllowDaemon)+'\n')

            if 'starter-allow-runas-owner' in condor_cfg:
                StarterAllowRunasOwner = condor_cfg['starter-allow-runas-owner']    
            f.write("STARTER_ALLOW_RUNAS_OWNER = "+str(StarterAllowRunasOwner)+'\n')

            if 'java' in condor_cfg:
                f.write("JAVA = "+str(condor_cfg['java'])+'\n')

            if 'user-job-wrapper' in condor_cfg:
                f.write("USER_JOB_WRAPPER = "+str(condor_cfg['user-job-wrapper'])+'\n')

            if 'gsite' in condor_cfg:
                f.write("GSITE = "+str(condor_cfg['gsite'])+'\n')

            if 'startd-attrs' in condor_cfg:
                f.write("STARTD_ATTRS = "+str(condor_cfg['startd-attrs'])+'\n')

            if 'enable-ssh-to-job' in condor_cfg:
                f.write("ENABLE_SSH_TO_JOB = "+str(condor_cfg['enable-ssh-to-job'])+'\n')

            if 'certificate-mapfile' in condor_cfg:
                f.write("CERTIFICATE_MAPFILE = "+str(condor_cfg['certificate-mapfile'])+'\n')

            if 'ccb-address' in condor_cfg:
                f.write("CCB_ADDRESS = "+str(condor_cfg['ccb-address'])+'\n')
    
            if 'execute' in condor_cfg:
                f.write("EXECUTE = "+str(condor_cfg['execute'])+'\n')        

            if 'starter-debug' in condor_cfg:
                f.write("STARTER_DEBUG = "+str(condor_cfg['starter-debug'])+'\n')

            if 'startd-debug' in condor_cfg:
                f.write("STARTD_DEBUG = "+str(condor_cfg['startd-debug'])+'\n')

            if 'sec-default-authentication' in condor_cfg:
                f.write("SEC_DEFAULT_AUTHENTICATION = "+str(condor_cfg['sec-default-authentication'])+'\n')

            if 'sec-default-authentication-methods' in condor_cfg:
                f.write("SEC_DEFAULT_AUTHENTICATION_METHODS = "+str(condor_cfg['sec-default-authentication-methods'])+'\n')

            if 'sec-daemon-authentication' in condor_cfg:
                SecDaemonAuthentication = condor_cfg['sec-daemon-authentication']
            f.write("SEC_DAEMON_AUTHENTICATION = "+str(SecDaemonAuthentication)+'\n')

            if 'sec-password-file' in condor_cfg:
                f.write("SEC_PASSWORD_FILE = "+str(condor_cfg['sec-password-file'])+'\n')

            if 'update-collector-with-tcp' in condor_cfg:
                f.write("UPDATE_COLLECTOR_WITH_TCP = "+str(condor_cfg['update-collector-with-tcp'])+'\n')

            if 'max-job-retirement-time' in condor_cfg:
                f.write("MAXJOBRETIREMENTTIME = "+str(condor_cfg['max-job-retirement-time'])+'\n')

            if 'startd-cron-joblist' in condor_cfg:
                f.write("STARTD_CRON_JOBLIST = "+str(condor_cfg['startd-cron-joblist'])+'\n')

            if 'startd-cron-atlval-mode' in condor_cfg:
                f.write("STARTD_CRON_ATLVAL_MODE = "+str(condor_cfg['startd-cron-atlval-mode'])+'\n')

            if 'startd-cron-atlval-executable' in condor_cfg:
                f.write("STARTD_CRON_ATLVAL_EXECUTABLE = "+str(condor_cfg['startd-cron-atlval-executable'])+'\n')

            if 'startd-cron-atlval-period' in condor_cfg:
                f.write("STARTD_CRON_ATLVAL_PERIOD = "+str(condor_cfg['startd-cron-atlval-period'])+'\n')

            if 'startd-cron-atlval-job-load' in condor_cfg:
                f.write("STARTD_CRON_ATLVAL_JOB_LOAD = "+str(condor_cfg['startd-cron-atlval-job-load'])+'\n')

            if 'hostallow-write' in condor_cfg:
                HostAllowWrite = condor_cfg['hostallow-write']    
            f.write("HOSTALLOW_WRITE = "+str(HostAllowWrite)+'\n')
    
            if 'hostallow-read' in condor_cfg:
                HostAllowRead = condor_cfg['hostallow-read']    
            f.write("HOSTALLOW_READ = "+str(HostAllowRead)+'\n')

            if 'start' in condor_cfg:
                Start = condor_cfg['start']
            f.write("START = "+str(Start)+'\n')

            if 'suspend' in condor_cfg:
                Suspend = condor_cfg['suspend']
            f.write("SUSPEND = "+str(Suspend)+'\n')

            if 'preempt' in condor_cfg:
                Preempt = condor_cfg['preempt']
            f.write("PREEMPT = "+str(Preempt)+'\n')
        
            if 'kill' in condor_cfg:
                Kill = condor_cfg['kill']
            f.write("KILL = "+str(Kill)+'\n')


            # End of parameters
            ##############################################################################

            cid1 = subprocess.Popen(["cat", "/etc/passwd"], stdout=subprocess.PIPE)
            cid2 = subprocess.Popen(["grep", "condor:"],stdin=cid1.stdout, stdout=subprocess.PIPE)
            cid3 = subprocess.Popen(["awk", "-F:",'{print $3"."$4}'], stdin=cid2.stdout, stdout=subprocess.PIPE)
            cid1.stdout.close()
            cid2.stdout.close()
        
            CondorIDs, Err = cid3.communicate()
    
            f.write("CONDOR_IDS = "+str(CondorIDs)+'\n')

            # Dynamically writing SLOT users
            CPUs_aux = subprocess.Popen(['cat /proc/cpuinfo | grep processor | wc -l'], stdout=subprocess.PIPE, shell=True)
            CPUs, cpuerr = CPUs_aux.communicate()
            CPUs = re.sub('\n','', CPUs)  

            for count in range(1,int(CPUs)+1):
                f.write("SLOT"+str(count)+"_USER = user"+str(count)+'\n')
                os.system("useradd -m -s /sbin/nologin  user"+str(count)+" > /dev/null 2>&1\n")

 	Start = 'False'           
        DaemonList = 'COLLECTOR, MASTER, NEGOTIATOR, SCHEDD'        
        if 'master' in condor_cc_cfg:
            condor_cfg = condor_cc_cfg['master']

            f.write("CONDOR_HOST = "+str(Hostname)+'\n')

            f.write("COLLECTOR_NAME = Personal Condor at "+Hostname+'\n')

            if 'collector-host-port' in condor_cfg:
                CollectorHostPORT = condor_cfg['collector-host-port']
            f.write("COLLECTOR_HOST = "+str(Hostname)+':'+str(CollectorHostPORT)+'\n')
            
            if 'highport' in condor_cfg:
                Highport = condor_cfg['highport']
            f.write("HIGHPORT = "+str(Highport)+'\n')

            if 'lowport' in condor_cfg:
                Lowport = condor_cfg['lowport']
            f.write("LOWPORT = "+str(Lowport)+'\n')

            if 'start' in condor_cfg:
                Start = condor_cfg['start']
            f.write("START = "+str(Start)+'\n')

            if 'suspend' in condor_cfg:
                Suspend = condor_cfg['suspend']
            f.write("SUSPEND = "+str(Suspend)+'\n')

            if 'preempt' in condor_cfg:
                Preempt = condor_cfg['preempt']
            f.write("PREEMPT = "+str(Preempt)+'\n')

            if 'kill' in condor_cfg:
                Kill = condor_cfg['kill']
            f.write("KILL = "+str(Kill)+'\n')

            if 'hostallow-write' in condor_cfg:
                HostAllowWrite = condor_cfg['hostallow-write']
            f.write("HOSTALLOW_WRITE = "+str(HostAllowWrite)+'\n')

            if 'hostallow-read' in condor_cfg:
                HostAllowRead = condor_cfg['hostallow-read']
            f.write("HOSTALLOW_READ = "+str(HostAllowRead)+'\n')

	    if 'daemon-list' in condor_cfg:
                DaemonList = condor_cfg['daemon-list']
            f.write("DAEMON_LIST = "+DaemonList+'\n')

            cid1 = subprocess.Popen(["cat", "/etc/passwd"], stdout=subprocess.PIPE)
            cid2 = subprocess.Popen(["grep", "condor:"],stdin=cid1.stdout, stdout=subprocess.PIPE)
            cid3 = subprocess.Popen(["awk", "-F:",'{print $3"."$4}'], stdin=cid2.stdout, stdout=subprocess.PIPE)
            cid1.stdout.close()
            cid2.stdout.close()

            CondorIDs, Err = cid3.communicate()

            f.write("CONDOR_IDS = "+str(CondorIDs)+'\n')

            f.write("SEC_DAEMON_AUTHENTICATION = OPTIONAL\n")
            f.write("SEC_DEFAULT_AUTHENTICATION = OPTIONAL\n")

        f.close()
        subprocess.check_call(['/etc/init.d/iptables', 'stop'])		# The iptables should be configured instead of being stopped 

	if not OldVersion:
        	# Moving our config file to the right directory (erase the old config)        
        	subprocess.check_call(['rm','-f','/opt/%s/etc/condor/condor_config.local' % CondorVersion])
        	subprocess.check_call(['cp','/root/condor_config.local','/opt/%s/etc/condor/' % CondorVersion])
		subprocess.call(['rm','-f','/root/condor_config.local'])
      
		subprocess.call(['chown','-R','condor:condor','/opt/%s' % CondorVersion])
		subprocess.call(['chmod','-R','go+rwx','/opt/%s/var/log' % CondorVersion])

		# Specifying additional default directories in /etc/ld.so.conf
	        f2 = open('/etc/ld.so.conf','a')
     	   	f2.write('/opt/'+CondorVersion+'/usr/lib64\n/opt/'+CondorVersion+'/usr/lib64/condor\n')
        	f2.close()

        	# Executing ldconfig
        	subprocess.check_call(['/sbin/ldconfig'])
                

	else:
		# Moving our config file to the right directory (overwrite the old config)
                subprocess.check_call(['rm','-f','%s' % OldConfigFile])
                subprocess.check_call(['cp','/root/condor_config.local','%s' % OldConfigFile])
                subprocess.call(['rm','-f','/root/condor_config.local'])

        # Starting condor
        subprocess.check_call(['service','condor','start'])
Example #13
0
def main():
    util.close_stdin()

    cmds = ("start", "start-local")
    deps = {
        "start": (ds.DEP_FILESYSTEM, ds.DEP_NETWORK),
        "start-local": (ds.DEP_FILESYSTEM, )
    }

    cmd = ""
    if len(sys.argv) > 1:
        cmd = sys.argv[1]

    cfg_path = None
    if len(sys.argv) > 2:
        # this is really for debugging only
        # but you can invoke on development system with ./config/cloud.cfg
        cfg_path = sys.argv[2]

    if not cmd in cmds:
        sys.stderr.write("bad command %s. use one of %s\n" % (cmd, cmds))
        sys.exit(1)

    now = time.strftime("%a, %d %b %Y %H:%M:%S %z", time.gmtime())
    try:
        uptimef = open("/proc/uptime")
        uptime = uptimef.read().split(" ")[0]
        uptimef.close()
    except IOError as e:
        warn("unable to open /proc/uptime\n")
        uptime = "na"

    cmdline_msg = None
    cmdline_exc = None
    if cmd == "start":
        target = "%s.d/%s" % (cloudinit.system_config,
                              "91_kernel_cmdline_url.cfg")
        if os.path.exists(target):
            cmdline_msg = "cmdline: %s existed" % target
        else:
            cmdline = util.get_cmdline()
            try:
                (key, url,
                 content) = cloudinit.get_cmdline_url(cmdline=cmdline)
                if key and content:
                    util.write_file(target, content, mode=0600)
                    cmdline_msg = ("cmdline: wrote %s from %s, %s" %
                                   (target, key, url))
                elif key:
                    cmdline_msg = ("cmdline: %s, %s had no cloud-config" %
                                   (key, url))
            except Exception:
                cmdline_exc = ("cmdline: '%s' raised exception\n%s" %
                               (cmdline, traceback.format_exc()))
                warn(cmdline_exc)

    try:
        cfg = cloudinit.get_base_cfg(cfg_path)
    except Exception as e:
        warn("Failed to get base config. falling back to builtin: %s\n" % e)
        try:
            cfg = cloudinit.get_builtin_cfg()
        except Exception as e:
            warn("Unable to load builtin config\n")
            raise

    try:
        (outfmt, errfmt) = CC.get_output_cfg(cfg, "init")
        CC.redirect_output(outfmt, errfmt)
    except Exception as e:
        warn("Failed to get and set output config: %s\n" % e)

    cloudinit.logging_set_from_cfg(cfg)
    log = logging.getLogger()

    if cmdline_exc:
        log.debug(cmdline_exc)
    elif cmdline_msg:
        log.debug(cmdline_msg)

    try:
        cloudinit.initfs()
    except Exception as e:
        warn("failed to initfs, likely bad things to come: %s\n" % str(e))

    nonet_path = "%s/%s" % (cloudinit.get_cpath("data"), "no-net")

    if cmd == "start":
        print netinfo.debug_info()

        stop_files = (cloudinit.get_ipath_cur("obj_pkl"), nonet_path)
        # if starting as the network start, there are cases
        # where everything is already done for us, and it makes
        # most sense to exit early and silently
        for f in stop_files:
            try:
                fp = open(f, "r")
                fp.close()
            except:
                continue

            log.debug("no need for cloud-init start to run (%s)\n", f)
            sys.exit(0)
    elif cmd == "start-local":
        # cache is not instance specific, so it has to be purged
        # but we want 'start' to benefit from a cache if
        # a previous start-local populated one
        manclean = util.get_cfg_option_bool(cfg, 'manual_cache_clean', False)
        if manclean:
            log.debug("not purging cache, manual_cache_clean = True")
        cloudinit.purge_cache(not manclean)

        try:
            os.unlink(nonet_path)
        except OSError as e:
            if e.errno != errno.ENOENT:
                raise

    msg = "cloud-init %s running: %s. up %s seconds" % (cmd, now, uptime)
    sys.stderr.write(msg + "\n")
    sys.stderr.flush()

    log.info(msg)

    cloud = cloudinit.CloudInit(ds_deps=deps[cmd])

    try:
        cloud.get_data_source()
    except cloudinit.DataSourceNotFoundException as e:
        sys.stderr.write("no instance data found in %s\n" % cmd)
        sys.exit(0)

    # set this as the current instance
    cloud.set_cur_instance()

    # store the metadata
    cloud.update_cache()

    msg = "found data source: %s" % cloud.datasource
    sys.stderr.write(msg + "\n")
    log.debug(msg)

    # parse the user data (ec2-run-userdata.py)
    try:
        ran = cloud.sem_and_run("consume_userdata", cloudinit.per_instance,
                                cloud.consume_userdata,
                                [cloudinit.per_instance], False)
        if not ran:
            cloud.consume_userdata(cloudinit.per_always)
    except:
        warn("consuming user data failed!\n")
        raise

    cfg_path = cloudinit.get_ipath_cur("cloud_config")
    cc = CC.CloudConfig(cfg_path, cloud)

    # if the output config changed, update output and err
    try:
        outfmt_orig = outfmt
        errfmt_orig = errfmt
        (outfmt, errfmt) = CC.get_output_cfg(cc.cfg, "init")
        if outfmt_orig != outfmt or errfmt_orig != errfmt:
            warn("stdout, stderr changing to (%s,%s)" % (outfmt, errfmt))
            CC.redirect_output(outfmt, errfmt)
    except Exception as e:
        warn("Failed to get and set output config: %s\n" % e)

    # send the cloud-config ready event
    cc_path = cloudinit.get_ipath_cur('cloud_config')
    cc_ready = cc.cfg.get("cc_ready_cmd", [
        'initctl', 'emit', 'cloud-config',
        '%s=%s' % (cloudinit.cfg_env_name, cc_path)
    ])
    if cc_ready:
        if isinstance(cc_ready, str):
            cc_ready = ['sh', '-c', cc_ready]
        subprocess.Popen(cc_ready).communicate()

    module_list = CC.read_cc_modules(cc.cfg, "cloud_init_modules")

    failures = []
    if len(module_list):
        failures = CC.run_cc_modules(cc, module_list, log)
    else:
        msg = "no cloud_init_modules to run"
        sys.stderr.write(msg + "\n")
        log.debug(msg)
        sys.exit(0)

    sys.exit(len(failures))
Example #14
0
def handle(_name, cfg, cloud, log, _args):
    # If there isn't a chef key in the configuration don't do anything
    if 'chef' not in cfg:
        return
    chef_cfg = cfg['chef']

    # ensure the chef directories we use exist
    mkdirs([
        '/etc/chef', '/var/log/chef', '/var/lib/chef', '/var/cache/chef',
        '/var/backups/chef', '/var/run/chef'
    ])

    # set the validation key based on the presence of either 'validation_key'
    # or 'validation_cert'. In the case where both exist, 'validation_key'
    # takes precedence
    for key in ('validation_key', 'validation_cert'):
        if key in chef_cfg and chef_cfg[key]:
            with open('/etc/chef/validation.pem', 'w') as validation_key_fh:
                validation_key_fh.write(chef_cfg[key])
            break

    # create the chef config from template
    util.render_to_file(
        'chef_client.rb', '/etc/chef/client.rb', {
            'server_url':
            chef_cfg['server_url'],
            'node_name':
            util.get_cfg_option_str(chef_cfg, 'node_name',
                                    cloud.datasource.get_instance_id()),
            'environment':
            util.get_cfg_option_str(chef_cfg, 'environment', '_default'),
            'validation_name':
            chef_cfg['validation_name']
        })

    # set the firstboot json
    with open('/etc/chef/firstboot.json', 'w') as firstboot_json_fh:
        initial_json = {}
        if 'run_list' in chef_cfg:
            initial_json['run_list'] = chef_cfg['run_list']
        if 'initial_attributes' in chef_cfg:
            initial_attributes = chef_cfg['initial_attributes']
            for k in initial_attributes.keys():
                initial_json[k] = initial_attributes[k]
        firstboot_json_fh.write(json.dumps(initial_json))

    # If chef is not installed, we install chef based on 'install_type'
    if not os.path.isfile('/usr/bin/chef-client'):
        install_type = util.get_cfg_option_str(chef_cfg, 'install_type',
                                               'packages')
        if install_type == "gems":
            # this will install and run the chef-client from gems
            chef_version = util.get_cfg_option_str(chef_cfg, 'version', None)
            ruby_version = util.get_cfg_option_str(chef_cfg, 'ruby_version',
                                                   ruby_version_default)
            install_chef_from_gems(ruby_version, chef_version)
            # and finally, run chef-client
            log.debug('running chef-client')
            subprocess.check_call(
                ['/usr/bin/chef-client', '-d', '-i', '1800', '-s', '20'])
        else:
            # this will install and run the chef-client from packages
            cc.install_packages(('chef', ))
def main():
    util.close_stdin()

    cmds = ("start", "start-local")
    deps = {"start": (ds.DEP_FILESYSTEM, ds.DEP_NETWORK),
            "start-local": (ds.DEP_FILESYSTEM, )}

    cmd = ""
    if len(sys.argv) > 1:
        cmd = sys.argv[1]

    cfg_path = None
    if len(sys.argv) > 2:
        # this is really for debugging only
        # but you can invoke on development system with ./config/cloud.cfg
        cfg_path = sys.argv[2]

    if not cmd in cmds:
        sys.stderr.write("bad command %s. use one of %s\n" % (cmd, cmds))
        sys.exit(1)

    now = time.strftime("%a, %d %b %Y %H:%M:%S %z", time.gmtime())
    try:
        uptimef = open("/proc/uptime")
        uptime = uptimef.read().split(" ")[0]
        uptimef.close()
    except IOError as e:
        warn("unable to open /proc/uptime\n")
        uptime = "na"

    cmdline_msg = None
    cmdline_exc = None
    if cmd == "start":
        target = "%s.d/%s" % (cloudinit.system_config,
            "91_kernel_cmdline_url.cfg")
        if os.path.exists(target):
            cmdline_msg = "cmdline: %s existed" % target
        else:
            cmdline = util.get_cmdline()
            try:
                (key, url, content) = cloudinit.get_cmdline_url(
                    cmdline=cmdline)
                if key and content:
                    util.write_file(target, content, mode=0600)
                    cmdline_msg = ("cmdline: wrote %s from %s, %s" %
                        (target, key, url))
                elif key:
                    cmdline_msg = ("cmdline: %s, %s had no cloud-config" %
                        (key, url))
            except Exception:
                cmdline_exc = ("cmdline: '%s' raised exception\n%s" %
                    (cmdline, traceback.format_exc()))
                warn(cmdline_exc)

    try:
        cfg = cloudinit.get_base_cfg(cfg_path)
    except Exception as e:
        warn("Failed to get base config. falling back to builtin: %s\n" % e)
        try:
            cfg = cloudinit.get_builtin_cfg()
        except Exception as e:
            warn("Unable to load builtin config\n")
            raise

    try:
        (outfmt, errfmt) = CC.get_output_cfg(cfg, "init")
        CC.redirect_output(outfmt, errfmt)
    except Exception as e:
        warn("Failed to get and set output config: %s\n" % e)

    cloudinit.logging_set_from_cfg(cfg)
    log = logging.getLogger()

    if cmdline_exc:
        log.debug(cmdline_exc)
    elif cmdline_msg:
        log.debug(cmdline_msg)

    try:
        cloudinit.initfs()
    except Exception as e:
        warn("failed to initfs, likely bad things to come: %s\n" % str(e))

    nonet_path = "%s/%s" % (cloudinit.get_cpath("data"), "no-net")

    if cmd == "start":
        print netinfo.debug_info()

        stop_files = (cloudinit.get_ipath_cur("obj_pkl"), nonet_path)
        # if starting as the network start, there are cases
        # where everything is already done for us, and it makes
        # most sense to exit early and silently
        for f in stop_files:
            try:
                fp = open(f, "r")
                fp.close()
            except:
                continue

            log.debug("no need for cloud-init start to run (%s)\n", f)
            sys.exit(0)
    elif cmd == "start-local":
        # cache is not instance specific, so it has to be purged
        # but we want 'start' to benefit from a cache if
        # a previous start-local populated one
        manclean = util.get_cfg_option_bool(cfg, 'manual_cache_clean', False)
        if manclean:
            log.debug("not purging cache, manual_cache_clean = True")
        cloudinit.purge_cache(not manclean)

        try:
            os.unlink(nonet_path)
        except OSError as e:
            if e.errno != errno.ENOENT:
                raise

    msg = "cloud-init %s running: %s. up %s seconds" % (cmd, now, uptime)
    sys.stderr.write(msg + "\n")
    sys.stderr.flush()

    log.info(msg)

    cloud = cloudinit.CloudInit(ds_deps=deps[cmd])

    try:
        cloud.get_data_source()
    except cloudinit.DataSourceNotFoundException as e:
        sys.stderr.write("no instance data found in %s\n" % cmd)
        sys.exit(0)

    # set this as the current instance
    cloud.set_cur_instance()

    # store the metadata
    cloud.update_cache()

    msg = "found data source: %s" % cloud.datasource
    sys.stderr.write(msg + "\n")
    log.debug(msg)

    # parse the user data (ec2-run-userdata.py)
    try:
        ran = cloud.sem_and_run("consume_userdata", cloudinit.per_instance,
            cloud.consume_userdata, [cloudinit.per_instance], False)
        if not ran:
            cloud.consume_userdata(cloudinit.per_always)
    except:
        warn("consuming user data failed!\n")
        raise

    cfg_path = cloudinit.get_ipath_cur("cloud_config")
    cc = CC.CloudConfig(cfg_path, cloud)

    # if the output config changed, update output and err
    try:
        outfmt_orig = outfmt
        errfmt_orig = errfmt
        (outfmt, errfmt) = CC.get_output_cfg(cc.cfg, "init")
        if outfmt_orig != outfmt or errfmt_orig != errfmt:
            warn("stdout, stderr changing to (%s,%s)" % (outfmt, errfmt))
            CC.redirect_output(outfmt, errfmt)
    except Exception as e:
        warn("Failed to get and set output config: %s\n" % e)

    # send the cloud-config ready event
    cc_path = cloudinit.get_ipath_cur('cloud_config')
    cc_ready = cc.cfg.get("cc_ready_cmd",
        ['initctl', 'emit', 'cloud-config',
         '%s=%s' % (cloudinit.cfg_env_name, cc_path)])
    if cc_ready:
        if isinstance(cc_ready, str):
            cc_ready = ['sh', '-c', cc_ready]
        subprocess.Popen(cc_ready).communicate()

    module_list = CC.read_cc_modules(cc.cfg, "cloud_init_modules")

    failures = []
    if len(module_list):
        failures = CC.run_cc_modules(cc, module_list, log)
    else:
        msg = "no cloud_init_modules to run"
        sys.stderr.write(msg + "\n")
        log.debug(msg)
        sys.exit(0)

    sys.exit(len(failures))
def handle(_name, cfg, cloud, log, _args):
    # If there isn't a puppet key in the configuration don't do anything
    if 'puppet' not in cfg:
        return
    puppet_cfg = cfg['puppet']
    # Start by installing the puppet package ...
    cc.install_packages(("puppet",))

    # ... and then update the puppet configuration
    if 'conf' in puppet_cfg:
        # Add all sections from the conf object to puppet.conf
        puppet_conf_fh = open('/etc/puppet/puppet.conf', 'r')
        # Create object for reading puppet.conf values
        puppet_config = ConfigParser.ConfigParser()
        # Read puppet.conf values from original file in order to be able to
        # mix the rest up
        puppet_config.readfp(StringIO.StringIO(''.join(i.lstrip() for i in
                                               puppet_conf_fh.readlines())))
        # Close original file, no longer needed
        puppet_conf_fh.close()
        for cfg_name, cfg in puppet_cfg['conf'].iteritems():
            # ca_cert configuration is a special case
            # Dump the puppetmaster ca certificate in the correct place
            if cfg_name == 'ca_cert':
                # Puppet ssl sub-directory isn't created yet
                # Create it with the proper permissions and ownership
                os.makedirs('/var/lib/puppet/ssl')
                os.chmod('/var/lib/puppet/ssl', 0771)
                os.chown('/var/lib/puppet/ssl',
                         pwd.getpwnam('puppet').pw_uid, 0)
                os.makedirs('/var/lib/puppet/ssl/certs/')
                os.chown('/var/lib/puppet/ssl/certs/',
                         pwd.getpwnam('puppet').pw_uid, 0)
                ca_fh = open('/var/lib/puppet/ssl/certs/ca.pem', 'w')
                ca_fh.write(cfg)
                ca_fh.close()
                os.chown('/var/lib/puppet/ssl/certs/ca.pem',
                         pwd.getpwnam('puppet').pw_uid, 0)
                util.restorecon_if_possible('/var/lib/puppet', recursive=True)
            else:
                #puppet_conf_fh.write("\n[%s]\n" % (cfg_name))
                # If puppet.conf already has this section we don't want to
                # write it again
                if puppet_config.has_section(cfg_name) == False:
                    puppet_config.add_section(cfg_name)
                # Iterate throug the config items, we'll use ConfigParser.set
                # to overwrite or create new items as needed
                for o, v in cfg.iteritems():
                    if o == 'certname':
                        # Expand %f as the fqdn
                        v = v.replace("%f", socket.getfqdn())
                        # Expand %i as the instance id
                        v = v.replace("%i",
                              cloud.datasource.get_instance_id())
                        # certname needs to be downcase
                        v = v.lower()
                    puppet_config.set(cfg_name, o, v)
                    #puppet_conf_fh.write("%s=%s\n" % (o, v))
            # We got all our config as wanted we'll rename
            # the previous puppet.conf and create our new one
            os.rename('/etc/puppet/puppet.conf', '/etc/puppet/puppet.conf.old')
            with open('/etc/puppet/puppet.conf', 'wb') as configfile:
                puppet_config.write(configfile)
            util.restorecon_if_possible('/etc/puppet/puppet.conf')
    # Set puppet to automatically start
    if os.path.exists('/etc/default/puppet'):
        subprocess.check_call(['sed', '-i',
                               '-e', 's/^START=.*/START=yes/',
                               '/etc/default/puppet'])
    elif os.path.exists('/bin/systemctl'):
        subprocess.check_call(['/bin/systemctl', 'enable', 'puppet.service'])
    elif os.path.exists('/sbin/chkconfig'):
        subprocess.check_call(['/sbin/chkconfig', 'puppet', 'on'])
    else:
        log.warn("Do not know how to enable puppet service on this system")
    # Start puppetd
    subprocess.check_call(['service', 'puppet', 'start'])