def configure(self): """ [tcpbuffers] # increase TCP max buffer size setable using setsockopt() # 16 MB with a few parallel streams is recommended for most 10G paths # 32 MB might be needed for some very long end-to-end 10G or 40G paths net.core.rmem_max = 16777216 net.core.wmem_max = 16777216 # increase Linux autotuning TCP buffer limits # min, default, and max number of bytes to use # (only change the 3rd value, and make it 16 MB or more) net.ipv4.tcp_rmem = 4096 87380 16777216 net.ipv4.tcp_wmem = 4096 65536 16777216 # recommended to increase this for 10G NICS net.core.netdev_max_backlog = 30000 # these should be the default, but just to be sure net.ipv4.tcp_timestamps = 1 net.ipv4.tcp_sack = 1 """ cfg = self.ud.getSection('tcpbuffers') for var in [ 'net.core.rmem_max', 'net.core.wmem_max', 'net.ipv4.tcp_rmem', 'net.ipv4.tcp_wmem', 'net.core.netdev_max_backlog', 'net.ipv4.tcp_timestamps', 'net.ipv4.tcp_sack' ]: if var in cfg: util.call(['/sbin/sysctl', '-w',"%s=%s" % (var,cfg['var'])])
def configure(self): baseurl = 'http://s3.amazonaws.com/ec2-downloads/' map = { ('2.6.16', 'i686'): 'modules-2.6.16-ec2.tgz', ('2.6.16.33', 'x86_64'): 'ec2-modules-2.6.16.33-xenU-x86_64.tgz', ('2.6.18', 'i686'): 'ec2-modules-2.6.18-xenU-i686.tgz', ('2.6.18', 'x86_64'): 'ec2-modules-2.6.18-xenU-x86_64.tgz' } version = os.uname()[2].split('-')[0] arch = os.uname()[4] key = (version, arch) if key not in map: return url = baseurl + map[key] file = util.urlgrab(url, filename=tempfile.mktemp()) tf = tarfile.TarFile.gzopen(file) for member in tf.getmembers(): tf.extract(member, path='/') util.call(['depmod', '-a'])
def configure(self): cfg = self.ud.getSection('hostname') if 'hostname' in cfg: hostname = cfg['hostname'] else: try: hostname = self.id.getLocalHostname() except errors.EC2DataRetrievalError: return util.call(['hostname', hostname])
def configure(self): """ [amildap] base = <dn> url = <url> """ cfg = self.ud.getSection('amildap') for key in ( 'base', 'url'): if key in cfg: util.call(['/bin/touch', '/etc/ldap.conf']) util.call(['/usr/sbin/amiconfig-helper', '-f', '/etc/ldap.conf', '%s=%s' % (key,cfg[key])])
def configure(self): """ [rbuilderstorage] # optional list of ':' seperated dirs relocate-paths = /srv:/var/rmake """ try: blkdevmap = self.id.getBlockDeviceMapping() except EC2DataRetrievalError: return cfg = self.ud.getSection("storage") ephemeralDevs = [] for key, dev in blkdevmap.iteritems(): if "ephemeral" in key: mntpnt = "/ephemeral/%s" % key[9:] # ephemeral device names are not correct # for our kernel if not os.path.exists("/dev/%s" % dev): dev = dev.replace("sd", "xvd") ephemeralDevs.append(("/dev/%s" % dev, mntpnt)) relocatePaths = ["/srv", "/var/rmake"] if "relocate-paths" in cfg: relocatePaths = cfg["relocate-paths"].split(":") # First ephemeral is scratch scratchDev = ephemeralDevs[0][0] os.system("pvcreate %s" % scratchDev) os.system("vgcreate vg00 %s" % scratchDev) # Second dev is for mass storage (dev, mntpnt) = ephemeralDevs[1] util.mkdirChain(mntpnt) util.call(["mount", dev, mntpnt]) for relocPath in relocatePaths: if os.path.exists(relocPath) and not os.path.islink(relocPath): util.movetree(relocPath, "%s/%s" % (mntpnt, relocPath)) os.symlink("%s/%s" % (mntpnt, relocPath), relocPath)
def configure(self): """ [hepix] # contextualization tar ball name context = <string> requiressl = <string> """ cfg = self.ud.getSection('hepix') if 'context' in cfg: tarname = cfg['context'] else: tarname = 'default.tgz' if 'requiressl' in cfg: requiressl = cfg['requiressl'] else: requiressl = 'no' util.call(['/usr/sbin/site_context',tarname,requiressl])
def configure(self): """ [nss] password = files group = files shadow = files hosts = files bootparams = nisplus [NOTFOUND=return] files ethers = files netmasks = files networks = files protocols = files rpc = files services = files netgroup = nisplus publickey = nisplus automount = files nisplus aliases = files nisplus """ cfg = self.ud.getSection('nss') for key in ( 'passwd', 'group', 'shadow', 'hosts', 'bootparams', 'ethers', 'netmasks', 'networks', 'protocols', 'rpc', 'services', 'netgroup', 'publickey', 'automount', 'aliases'): if key in cfg: util.call(['/usr/sbin/amiconfig-helper', '-f', '/etc/nsswitch.conf', '%s:=%s' % (key,cfg[key])])
def configure(self): """ [yp] ypserver = hostname """ cfg = self.ud.getSection('yp') if 'ypserver' in cfg: util.call(['/usr/sbin/amiconfig-helper', '-f', '/etc/yp.conf', '%s=%s' % ('ypserver',cfg['ypserver'])]) return if 'server' in cfg and 'domain' in cfg: util.call(['/usr/sbin/amiconfig-helper', '-f', '/etc/yp.conf', '%s="%s server %s"' % ('domain',cfg['domain'],cfg['server'])]) return if 'domain' in cfg: util.call(['/usr/sbin/amiconfig-helper', '-f', '/etc/yp.conf', '%s="%s broadcast"' % ('domain',cfg['domain'])]) return
def configure(self): baseurl = 'http://s3.amazonaws.com/ec2-downloads/' map = {('2.6.16', 'i686'): 'modules-2.6.16-ec2.tgz', ('2.6.16.33', 'x86_64'): 'ec2-modules-2.6.16.33-xenU-x86_64.tgz', ('2.6.18', 'i686'): 'ec2-modules-2.6.18-xenU-i686.tgz', ('2.6.18', 'x86_64'): 'ec2-modules-2.6.18-xenU-x86_64.tgz'} version = os.uname()[2].split('-')[0] arch = os.uname()[4] key = (version, arch) if key not in map: return url = baseurl + map[key] file = util.urlgrab(url, filename=tempfile.mktemp()) tf = tarfile.TarFile.gzopen(file) for member in tf.getmembers(): tf.extract(member, path='/') util.call(['depmod', '-a'])
def configure(self): """ [condor] # master host name condor_master = <FQDN> # shared secret key condor_secret = <string> #----------------------# # host name hostname = <FQDN> # collector name collector_name = CernVM # condor user condor_user = condor # condor group condor_group = condor # condor directory condor_dir = ~condor/condor # condor admin condor_admin = root@master highport = 9700 lowport = 9600 uid_domain = <hostname> filesystem_domain = <hostname> # allow_write = *.$uid_domain # localconfig = <filename> # slots = 1 # slot_user = condor # cannonical_user = condor extra_vars = use_ips = """ cfg = self.ud.getSection('condor') if 'hostname' in cfg: hostname = cfg['hostname'] util.call(['hostname', hostname]) # Array of lines of the condor_config.local file (will be rewritten) output = [] # Dictionary of entries to go in the condor_config file (will be updated) condor_config_entries = { 'NO_DNS': None, 'DEFAULT_DOMAIN_NAME': None, 'NETWORK_INTERFACE': None } output.append('# Generated using the CernVM amiconfig Condor plugin') # # We are now getting the assigned hostname (i.e., what this host thinks # its name is), the real hostname (the FQDN) and the IP address (the # one from which outbound connections are generated). # # This heuristics is needed to work around mismatches in the assigned # and real hostnames. # # Configured hostname assigned_hostname = socket.gethostname() output.append("# Assigned hostname: %s" % assigned_hostname) # IP address used for outbound connections. Using a dummy UDP # IPv4 socket to a known IP (not opening any actual connection) s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM) s.connect( ('8.8.8.8', 53) ) real_ip = s.getsockname()[0] s.close() # Hostname obtained through reverse lookup from the IP #real_hostname = socket.gethostbyaddr(real_ip)[0] real_hostname = socket.getfqdn() output.append("# Real hostname: %s" % real_hostname) # Option to always use IP addresses use_ips = ('use_ips' in cfg) and (cfg['use_ips'] == 'true') if use_ips: output.append("# Always using IP addresses per user's choice") condor_config_entries['NO_DNS'] = 'True' condor_config_entries['DEFAULT_DOMAIN_NAME'] = 'virtual-analysis-facility' condor_config_entries['NETWORK_INTERFACE'] = real_ip condor_master = "" if 'condor_master' in cfg: # We are on a worker condor_master = cfg['condor_master'] output.append('DAEMON_LIST = MASTER, STARTD') else: # We are on the Condor Master # If there's a mismatch between "real" and "assigned" hostname, use # the IP address if use_ips or (assigned_hostname != real_hostname): condor_master = real_ip else: condor_master = assigned_hostname output.append('DAEMON_LIST = COLLECTOR, MASTER, NEGOTIATOR, SCHEDD') condor_domain = real_hostname.partition('.')[2] if condor_domain == '': condor_domain = '*' output.append("CONDOR_HOST = %s" % (condor_master)) if 'condor_admin' in cfg: output.append("CONDOR_ADMIN = %s" % (cfg['condor_admin'])) else: output.append("CONDOR_ADMIN = root@%s" % (condor_master)) if 'uid_domain' in cfg: if cfg['uid_domain'] == '*': output.append("") output.append("# Preserve UID of submitting user") output.append("UID_DOMAIN = *") output.append("TRUST_UID_DOMAIN = True") output.append("SOFT_UID_DOMAIN = True") output.append("") else: output.append("UID_DOMAIN = %s" % (cfg['uid_domain'])) else: output.append("UID_DOMAIN = %s" % condor_domain) condor_user = '******' condor_group = 'condor' if 'condor_user' in cfg: condor_user = cfg['condor_user'] if 'condor_group' in cfg: condor_group = cfg['condor_group'] os.system("/usr/sbin/groupadd %s 2>/dev/null" % (condor_group)) os.system("/usr/sbin/useradd -m -g %s %s > /dev/null 2>&1" % (condor_group, condor_user)) os.system("/bin/chown -R %s:%s /var/lib/condor /var/log/condor /var/run/condor /var/lock/condor" % (condor_user, condor_group)) condor_user_id = pwd.getpwnam(condor_user)[2] condor_group_id = grp.getgrnam(condor_group)[2] output.append("CONDOR_IDS = %s.%s" % (condor_user_id, condor_group_id)) output.append("QUEUE_SUPER_USERS = root, %s" % (condor_user)) condor_dir = pwd.getpwnam(condor_user)[5] if 'condor_dir' in cfg: condor_dir = cfg['condor_dir'] os.system('mkdir -p ' + condor_dir + '/run/condor' + ' ' \ + condor_dir + '/log/condor' + ' ' \ + condor_dir + '/lock/condor' + ' ' \ + condor_dir + '/lib/condor/spool' + ' ' \ + condor_dir + '/lib/condor/execute') os.system("chown -R %s:%s %s" % (condor_user, condor_group, condor_dir)) os.system("chmod 755 %s" % (condor_dir)) output.append("LOCAL_DIR = %s" % (condor_dir)) condor_highport = '9700' condor_lowport = '9600' if 'highport' in cfg: condor_highport = cfg['highport'] if 'lowport' in cfg: condor_lowport = cfg['lowport'] output.append("HIGHPORT = %s" % (condor_highport)) output.append("LOWPORT = %s" % (condor_lowport)) if 'collector_name' in cfg: output.append("COLLECTOR_NAME = %s" % (cfg['collector_name'])) if 'allow_write' in cfg: output.append("ALLOW_WRITE = %s" % (cfg['allow_write'])) #if 'localconfig' in cfg: # output.append("CONFIG_CONDOR_LOCALCONFIG=%s" % (cfg['localconfig'])) #if 'slots' in cfg: # output.append("CONFIG_CONDOR_SLOTS=%s" % (cfg['slots'])) #if 'slot_user' in cfg: # output.append("CONFIG_CONDOR_SLOT_USER=%s" % (cfg['slot_user'])) #if 'cannonical_user' in cfg: # output.append("CONFIG_CONDOR_MAP=%s" % (cfg['cannonical_user'])) if 'extra_vars' in cfg: output = output + cfg['extra_vars'].split(','); # Mangle the main condor_config configuration file conf_file_name = '/etc/condor/condor_config' conf_file_bak = conf_file_name + '.0' try: os.rename( conf_file_name, conf_file_bak ) except OSError as e: print "Cannot rename %s to %s: %s" % (conf_file_name, conf_file_bak, e) return try: fo = open(conf_file_name, 'a') fi = open(conf_file_bak, 'r') for line in fi: omit = False for key in condor_config_entries: # Check for the equiv. of ^KEY[ \t=] new_line = line.lstrip() len_key = len(key) next_char = new_line[len_key:len_key+1] if new_line.startswith(key) and ( next_char == ' ' or next_char == '\t' or next_char == '=' ): omit = True if omit == False: fo.write( line.rstrip() + '\n' ) for key,val in condor_config_entries.iteritems(): if val is None: break fo.write( "%s = %s\n" % (key, val) ) fi.close() fo.close() except IOError as e: print "Error while modifying main configuration file: %s" % e return try: os.remove(conf_file_bak) except OSError as e: print "Cannot remove %s" % conf_file_bak # non-fatal # Write the condor_config.local configuration file if len(output): f = open('/etc/condor/condor_config.local', 'w') f.write('\n'.join(output)) f.close() # Condor secret can be written only after creating the config file if 'condor_secret' in cfg: os.system("/usr/sbin/condor_store_cred add -c -p %s > /dev/null" % (cfg['condor_secret'])) # We can start Condor now os.system("/sbin/chkconfig condor on") os.system("/sbin/service condor restart")
def configure(self): """ [storage] # disable the spacedaemon daemon = False # size in GB pre-allocated-space = 20 # list of ':' seperated dirs relocate-paths = /srv/rmake-builddir:/srv/mysql """ try: blkdevmap = self.id.getBlockDeviceMapping() except errors.EC2DataRetrievalError: return cfg = self.ud.getSection('storage') # Always mount swap if 'swap' in blkdevmap: swap = blkdevmap['swap'] util.call(['swapon', swap]) ephemeralDevs = [] for key, dev in blkdevmap.iteritems(): if 'ephemeral' in key: mntpnt = '/ephemeral/%s' % key[9:] ephemeralDevs.append(('/dev/%s' % dev, mntpnt)) relocatePaths = [] if 'relocate-paths' in cfg: relocatePaths = cfg['relocate-paths'].split(':') ephemeralDevsCount = len(ephemeralDevs) relocatePathsCount = len(relocatePaths) if ephemeralDevsCount < 1: return pathsPerDev = relocatePathsCount if ephemeralDevsCount > 1 and relocatePathsCount > 1: pathsPerDev = math.ceil(relocatePathsCount / float(ephemeralDevsCount)) # The ephemeral space is a sparse file on an independent spindle. To # increase performance you want to create a file under the ephemeral # mout point to pre allocate the sparse file. size = 0 if 'pre-allocated-space' in cfg: # size is in GB size = int(cfg['pre-allocated-space']) # Get daemon configuration. daemon = True if 'daemon' in cfg: daemon = bool(cfg['daemon']) paths = [] for i, (dev, mntpnt) in enumerate(ephemeralDevs): util.mkdirChain(mntpnt) util.call(['mount', dev, mntpnt]) if daemon: paths.append(mntpnt) else: fh = util.createUnlinkedTmpFile(mntpnt) util.growFile(fh, size * 1024) fh.close() for j in range((i+1) * pathsPerDev): if relocatePathsCount > j and os.path.exists(relocatePaths[j]) \ and not os.path.islink(relocatePaths[j]): util.movetree(relocatePaths[j], '%s/%s' % (mntpnt, relocatePaths[j])) os.symlink('%s/%s' % (mntpnt, relocatePaths[j]), relocatePaths[j]) if daemon and len(paths) > 0: exe = spacedaemon.__file__ if exe.endswith('.pyc'): exe = exe[:-1] cmd = [ exe, str(size * 1024) ] cmd.extend(paths) util.call(cmd)
def configure(self): """ [cernvm] # entitlement key entitlement_key = 289a919c-9a97-44a9-a07d-473850bd5730 # contextualization key contextualization_key = de4248a0-3fc9-463b-a66f-88f7bc935b11 # path to contextualization command contextualization_command = /path/to/script.sh # url to retrieve initial CernVM configuration # config_url = <url> # list of ',' seperated organisations/experiments organisations = alice,atlas # install group profile group_profile = group-<org>[-desktop] # list of ',' seperated repositories repositories = alice,atlas,grid # extra repositories, comma-separated; each field has: # name|server|<base64_encoded_pubkey> extra_repositories = name|server|<base64_encoded_pubkey>,name2|server2|<base64_encoded_pubkey2> # CernVM user name:group:password users = testalice:alice:12345test,testatlas:atlas:12345atlas # CernVM user shell </bin/bash|/bin/tcsh> shell = /bin/bash # Automatically login CernVM user to GUI auto_login = on # CVMFS HTTP proxy http://<host>:<port>;DIRECT proxy = DIRECT # list of ',' seperated services to start services = <list> # extra environment variables to define environment = CMS_SITECONFIG=CERN,CMS_ROOT=/opt/cms # CernVM edition Basic|Desktop edition = Basic # CernVM screen Resolution screenRes = 1024x768 # Start XDM on boot on|off startXDM = off # Keyboard keyboard = us # GRID UI version gridUiVersion = default """ cfg = self.ud.getSection('cernvm') group_profile = '' if 'group_profile' in cfg: group_profile = cfg['group_profile'] call(['/etc/cernvm/config', '-g', '%s' % (group_profile)]) entitlement_key = '' if 'entitlement_key' in cfg: entitlement_key = cfg['entitlement_key'] self.writeConfigToFile( "/etc/cvmfs/site.conf", 'CVMFS_ENTITLEMENT_KEY',entitlement_key,"=") contextualization_key = '' if 'contextualization_key' in cfg: contextualization_key = cfg['contextualization_key'] self.writeConfigToFile( "/etc/cernvm/site.conf", 'CERNVM_CONTEXTUALIZATION_KEY',contextualization_key,"=") contextualization_cmd = '' if 'contextualization_command' in cfg: contextualization_cmd = cfg['contextualization_command'] self.writeConfigToFile( "/etc/cernvm/site.conf", 'CERNVM_CONTEXTUALIZATION_COMMAND', contextualization_cmd,"=") organisations = '' if 'organisations' in cfg: organisations = cfg['organisations'] self.writeConfigToFile( "/etc/cernvm/site.conf", 'CERNVM_ORGANISATION',organisations,"=") repositories = '' if 'repositories' in cfg: repositories = cfg['repositories'] self.writeConfigToFile( "/etc/cvmfs/site.conf", 'CVMFS_REPOSITORIES',repositories,"=") extra_repositories = cfg.get('extra_repositories', None) if extra_repositories is not None: for entry in extra_repositories.split(','): parsed_entry = entry.split('|') if len(parsed_entry) == 3: r_name, r_serv, r_key_b64 = parsed_entry try: r_key = base64.b64decode(r_key_b64) except Exception: # malformed b64 continue # Write configuration f = None try: try: f = open('/etc/cvmfs/config.d/%s.conf'%r_name, 'w') f.write( 'CVMFS_SERVER_URL=http://%s/cvmfs/%s\n' % (r_serv, r_name) ) f.write( 'CVMFS_HTTP_PROXY=DIRECT\n' ) except IOError, e: print "Cannot write configuration for CVMFS repo %s" % r_name pass finally: if f is not None: f.close() # Write key f = None try: try: f = open('/etc/cvmfs/keys/%s.pub'%r_name, 'w') f.write(r_key) f.write('\n') except IOError, e: print "Cannot write pubkey for CVMFS repo %s" % r_name pass finally: if f is not None: f.close() screenRes = '' if 'screenres' in cfg: screenRes = cfg['screenres'] self.writeConfigToFile( "/etc/cernvm/site.conf", 'CERNVM_SCREEN_RES',screenRes,"=") startXDM = '' if 'startxdm' in cfg: startXDM = cfg['startxdm'] self.writeConfigToFile( "/etc/cernvm/site.conf", 'CERNVM_START_XDM',startXDM,"=") edition = '' if 'edition' in cfg: edition = cfg['edition'] self.writeConfigToFile( "/etc/cernvm/site.conf", 'CERNVM_EDITION',edition,"=") keyboard = '' if 'keyboard' in cfg: keyboard = cfg['keyboard'] self.writeConfigToFile( "/etc/cernvm/site.conf", 'CERNVM_KEYBOARD',keyboard,"=") gridUiVersion = '' if 'griduiversion' in cfg: gridUiVersion = cfg['griduiversion'] self.writeConfigToFile( "/etc/cernvm/site.conf", 'CERNVM_GRID_UI_VERSION',gridUiVersion,"=") #config_url = '' #if 'config_url' in cfg: # config_url = cfg['config_url'] # self.writeConfigToFile( # "/etc/cernvm/site.conf", # 'CERNVM_CONFIG_URL',config_url,"=") proxy = '' if 'proxy' in cfg: proxy = cfg['proxy'] self.writeConfigToFile( "/etc/cvmfs/site.conf", 'CVMFS_HTTP_PROXY',proxy,"=") services = '' if 'services' in cfg: services = cfg['services'] self.writeConfigToFile( "/etc/cernvm/site.conf", 'CERNVM_SERVICES',services,"=") shell = '/bin/bash' if 'shell' in cfg: shell = cfg['shell'] self.writeConfigToFile( "/etc/cernvm/site.conf", 'CERNVM_USER_SHELL',shell,"=") autoLogin = '******' if 'auto_login' in cfg: autoLogin = cfg['auto_login'] self.writeConfigToFile( "/etc/cernvm/site.conf", 'CERNVM_AUTOLOGIN',autoLogin,"=") if 'desktop_icons' in cfg: desktopIcons = cfg['desktop_icons'] self.writeConfigToFile( "/etc/cernvm/site.conf", 'CERNVM_DESKTOP_ICONS', desktopIcons, "=") util.call(['/etc/cernvm/config','-y']) environment = '' vars = '' if 'environment' in cfg: environment = cfg['environment'] for entry in environment.split(','): (var,val) = entry.split('=') self.writeConfigToFile( "/etc/cernvm/environment.conf",var,val,"=") vars += '+' + var self.writeConfigToFile( "/etc/cernvm/site.conf",'CERNVM_ENVIRONMENT_VARS',vars,'=') users = '' first = 1 eosUser = None x509User = None if 'users' in cfg: users = cfg['users'] for entry in users.split(','): (username,group,password) = entry.split(':') if not len(password): password = ''.join(random.choice(string.ascii_uppercase + string.digits + string.ascii_lowercase) for x in range(8)) if first: self.writeConfigToFile( "/etc/cernvm/site.conf", 'CERNVM_USER',username,"=") self.writeConfigToFile( "/etc/cernvm/site.conf", 'CERNVM_USER_GROUP',group,"=") first = 0 x509User = username eosUser = username call(['/etc/cernvm/config', '-u', '%s' % (username), '%s' % (shell), '%s' % (password), '%s' % (group)]) certUserField = 'x509-user' if certUserField in cfg: x509User = cfg[certUserField] if x509User is None: # Fallback to root x509User = '******' certFileField = 'x509-cert-file' if certFileField in cfg and x509User is not None: pw = pwd.getpwnam(x509User) x509CertFile = '/tmp/x509up_u' + str(pw.pw_uid) eosx509CertFile = x509CertFile shutil.copy2(cfg[certFileField], x509CertFile) os.chmod(x509CertFile,stat.S_IREAD|stat.S_IWRITE) os.chown(x509CertFile,pw.pw_uid,pw.pw_gid) certField = 'x509-cert' if certField in cfg and x509User is not None: x509Cert = cfg[certField] try: x509Cert = base64.decodestring(x509Cert) except: # Malformed base64 data. We ignore it. return pw = pwd.getpwnam(x509User) x509CertFile = '/tmp/x509up_u' + str(pw.pw_uid) eosx509CertFile = x509CertFile file(x509CertFile, "w").write(x509Cert) os.chmod(x509CertFile,stat.S_IREAD|stat.S_IWRITE) os.chown(x509CertFile,pw.pw_uid,pw.pw_gid) eosUserField = 'eos-user' if eosUserField in cfg: eosUser = cfg[eosUserField] eosCertField = 'eos-x509-cert' if eosCertField in cfg: eosx509Cert = cfg[eosCertField] try: eosx509Cert = base64.decodestring(eosx509Cert) except: # Malformed base64 data. We ignore it. return pw = pwd.getpwnam(eosUser) eosx509CertFile = '/tmp/x509up_u' + str(pw.pw_uid) + '.eos' file(eosx509CertFile, "w").write(eosx509Cert) os.chmod(eosx509CertFile,stat.S_IREAD|stat.S_IWRITE) os.chown(x509CertFile,pw.pw_uid,pw.pw_gid) field = 'eos-readaheadsize' eosReadAheadSize = 4000000 if field in cfg: eosReadAheadSize = cfg[field] field = 'eos-readcachesize' eosReadCacheSize = 16000000 if field in cfg: eosReadCacheSize = cfg[field] srvField = 'eos-server' if srvField in cfg and eosUser is not None: server = cfg[srvField] util.mkdirChain('/eos') util.call(['/bin/chown',eosUser,'/eos']) util.call(['/sbin/modprobe','fuse']) cmd='/usr/bin/env X509_CERT_DIR=/cvmfs/grid.cern.ch/etc/grid-security/certificates X509_USER_PROXY=%s EOS_READAHEADSIZE=%s EOS_READCACHESIZE=%s /usr/bin/eosfsd /eos -oallow_other,kernel_cache,attr_timeout=30,entry_timeout=30,max_readahead=131072,max_write=4194304,fsname=eos root://%s//eos/' % (eosx509CertFile,eosReadAheadSize,eosReadCacheSize,server) util.call(cmd.split()) if edition == 'Desktop': util.call(['/etc/cernvm/config','-x']) util.call(['/sbin/telinit','5']) util.call(['/sbin/service cernvm start'])