def _enable_xacml(self): """ Enable authorization services using xacml protocol """ self.log("Updating " + GSI_AUTHZ_LOCATION, level=logging.INFO) gsi_contents = "globus_mapping liblcas_lcmaps_gt4_mapping.so lcmaps_callout\n" if not utilities.atomic_write(GSI_AUTHZ_LOCATION, gsi_contents): self.log("Error while writing to " + GSI_AUTHZ_LOCATION, level=logging.ERROR) raise exceptions.ConfigureError("Error while writing to " + GSI_AUTHZ_LOCATION) self.log("Updating " + GUMS_CLIENT_LOCATION, level=logging.INFO) location_re = re.compile("^gums.location=.*$", re.MULTILINE) authz_re = re.compile("^gums.authz=.*$", re.MULTILINE) if not validation.valid_file(GUMS_CLIENT_LOCATION): gums_properties = "gums.location=https://%s:8443" % (self.options['gums_host'].value) gums_properties += "/gums/services/GUMSAdmin\n" gums_properties += "gums.authz=https://%s:8443" % (self.options['gums_host'].value) gums_properties += "/gums/services/GUMSXACMLAuthorizationServicePort" else: gums_properties = open(GUMS_CLIENT_LOCATION).read() replacement = "gums.location=https://%s:8443" % (self.options['gums_host'].value) replacement += "/gums/services/GUMSAdmin" gums_properties = location_re.sub(replacement, gums_properties) replacement = "gums.authz=https://%s:8443" % (self.options['gums_host'].value) replacement += "/gums/services/GUMSXACMLAuthorizationServicePort" gums_properties = authz_re.sub(replacement, gums_properties) utilities.atomic_write(GUMS_CLIENT_LOCATION, gums_properties)
def _update_lcmaps_file(self, gums=True): """ Update lcmaps file and give appropriate messages if lcmaps.db.rpmnew exists """ warning_message = """It appears that you've updated the lcmaps RPM and the configuration has changed. If you have ever edited /etc/lcmaps.db by hand (most people don't), then you should: 1. Edit /etc/lcmaps.db.rpmnew to make your changes again 2. mv /etc/lcmaps.db.rpmnew /etc/lcmaps.db If you haven't edited /etc/lcmaps.db by hand, then you can just use the new configuration: 1. mv /etc/lcmaps.db.rpmnew /etc/lcmaps.db""" files_to_update = [LCMAPS_DB_LOCATION] rpmnew_file = LCMAPS_DB_LOCATION + ".rpmnew" if validation.valid_file(rpmnew_file): self.log(warning_message, level=logging.WARNING) files_to_update.append(rpmnew_file) for lcmaps_db_file in files_to_update: self.log("Updating " + lcmaps_db_file, level=logging.INFO) lcmaps_db = open(lcmaps_db_file).read() lcmaps_db = self._update_lcmaps_text(lcmaps_db, gums, self.options['gums_host'].value) utilities.atomic_write(lcmaps_db_file, lcmaps_db)
def _configure_condor_cron_ids(self): """Ensure UID/GID of cndrcron user is valid and is in the condor-cron configs :raise ConfigFailed: if modifying condor-cron configs failed """ # check the uid/gid in the condor_ids file condor_id_fname = "/etc/condor-cron/config.d/condor_ids" ids = open(condor_id_fname, "r", encoding="latin-1").read() id_regex = re.compile(r'^\s*CONDOR_IDS\s+=\s+(\d+)\.(\d+).*', re.MULTILINE) condor_ent = pwd.getpwnam('cndrcron') match = id_regex.search(ids) if ((match is not None) and (((int(match.group(1)) != condor_ent.pw_uid) or (int(match.group(2)) != condor_ent.pw_gid)))): self.log("Condor-cron uid/gid not correct, correcting", level=logging.ERROR) (ids, count) = id_regex.subn( "CONDOR_IDS = %s.%s" % (condor_ent.pw_uid, condor_ent.pw_gid), ids, 1) if count == 0: self.log( "Can't correct condor-cron uid/gid, please double check", level=logging.ERROR) if not utilities.atomic_write( condor_id_fname, ids, encoding="latin-1"): raise exceptions.ConfigureError elif match is None: ids += "CONDOR_IDS = %d.%d\n" % (condor_ent.pw_uid, condor_ent.pw_gid) if not utilities.atomic_write( condor_id_fname, ids, encoding="latin-1"): raise exceptions.ConfigureError
def _write_lcmaps_file(self): old_lcmaps_contents = utilities.read_file(LCMAPS_DB_LOCATION, default='') if old_lcmaps_contents and 'THIS FILE WAS WRITTEN BY OSG-CONFIGURE' not in old_lcmaps_contents: backup_path = LCMAPS_DB_LOCATION + '.pre-configure' self.log("Backing up %s to %s" % (LCMAPS_DB_LOCATION, backup_path), level=logging.WARNING) if not utilities.atomic_write(backup_path, old_lcmaps_contents): msg = "Unable to back up old lcmaps.db to %s" % backup_path self.log(msg, level=logging.ERROR) raise exceptions.ConfigureError(msg) self.log("Writing " + LCMAPS_DB_LOCATION, level=logging.INFO) if self.authorization_method == 'xacml': lcmaps_template_fn = 'lcmaps.db.gums' elif self.authorization_method == 'gridmap' or self.authorization_method == 'local-gridmap': lcmaps_template_fn = 'lcmaps.db.gridmap' elif self.authorization_method == 'vomsmap': if self.all_fqans: lcmaps_template_fn = 'lcmaps.db.vomsmap.allfqans' else: lcmaps_template_fn = 'lcmaps.db.vomsmap' else: assert False lcmaps_template_path = os.path.join(LCMAPS_DB_TEMPLATES_LOCATION, lcmaps_template_fn) if not validation.valid_file(lcmaps_template_path): msg = "lcmaps.db template file not found at %s; ensure lcmaps-db-templates >= 1.6.6-1.8" \ " is installed or set edit_lcmaps_db=False" % lcmaps_template_path self.log(msg, level=logging.ERROR) raise exceptions.ConfigureError(msg) old_lcmaps_contents = utilities.read_file(LCMAPS_DB_LOCATION, default='') if old_lcmaps_contents and 'THIS FILE WAS WRITTEN BY OSG-CONFIGURE' not in old_lcmaps_contents: backup_path = LCMAPS_DB_LOCATION + '.pre-configure' self.log("Backing up %s to %s" % (LCMAPS_DB_LOCATION, backup_path), level=logging.WARNING) try: shutil.copy2(LCMAPS_DB_LOCATION, backup_path) except EnvironmentError as err: msg = "Unable to back up old lcmaps.db: " + str(err) self.log(msg, level=logging.ERROR) raise exceptions.ConfigureError(msg) lcmaps_contents = utilities.read_file(lcmaps_template_path) lcmaps_contents = ( "# THIS FILE WAS WRITTEN BY OSG-CONFIGURE AND WILL BE OVERWRITTEN ON FUTURE RUNS\n" "# Set edit_lcmaps_db = False in the [%s] section of your OSG configuration to\n" "# keep your changes.\n" % self.config_section + lcmaps_contents.replace('@GUMSHOST@', str(self.options['gums_host'].value))) if not utilities.atomic_write(LCMAPS_DB_LOCATION, lcmaps_contents): msg = "Error while writing to " + LCMAPS_DB_LOCATION self.log(msg, level=logging.ERROR) raise exceptions.ConfigureError(msg)
def write_blah_disable_wn_proxy_renewal_to_blah_config(self): if os.path.exists(self.BLAH_CONFIG): contents = utilities.read_file(self.BLAH_CONFIG) contents = utilities.add_or_replace_setting( contents, "blah_disable_wn_proxy_renewal", "yes", quote_value=True) utilities.atomic_write(self.BLAH_CONFIG, contents)
def set_pbs_pro_in_blah_config(self): if os.path.exists(self.BLAH_CONFIG): contents = utilities.read_file(self.BLAH_CONFIG) new_value = "yes" if self.opt_val('pbs_flavor') == "pro" else "no" contents = utilities.add_or_replace_setting(contents, "pbs_pro", new_value, quote_value=False) utilities.atomic_write(self.BLAH_CONFIG, contents)
def write_lsf_confpath_to_blah_config(self): if os.path.exists(self.BLAH_CONFIG): contents = utilities.read_file(self.BLAH_CONFIG) contents = utilities.add_or_replace_setting( contents, 'lsf_confpath', self.options['lsf_conf'].value, quote_value=True) utilities.atomic_write(self.BLAH_CONFIG, contents)
def write_blah_disable_wn_proxy_renewal_to_blah_config(self): if os.path.exists(self.BLAH_CONFIG): contents = utilities.read_file(self.BLAH_CONFIG) for option, value in [("blah_disable_wn_proxy_renewal", "yes"), ("blah_delegate_renewed_proxies", "no"), ("blah_disable_limited_proxy", "yes")]: contents = utilities.add_or_replace_setting(contents, option, value, quote_value=True) utilities.atomic_write(self.BLAH_CONFIG, contents)
def write_htcondor_ce_sentinel(self): if self.htcondor_gateway_enabled and utilities.ce_installed(): contents = utilities.read_file( self.HTCONDOR_CE_CONFIG_FILE, default="# This file is managed by osg-configure\n") contents = utilities.add_or_replace_setting(contents, "OSG_CONFIGURED", "true", quote_value=False) utilities.atomic_write(self.HTCONDOR_CE_CONFIG_FILE, contents)
def write_gridmap_to_htcondor_ce_config(self): contents = utilities.read_file(HTCONDOR_CE_CONFIG_FILE, default="# This file is managed by osg-configure\n") if self.options['authorization_method'].value == 'xacml': # Remove GRIDMAP setting contents = re.sub(r'(?m)^\s*GRIDMAP\s*=.*?$[\n]?', "", contents) else: contents = utilities.add_or_replace_setting(contents, "GRIDMAP", "/etc/grid-security/grid-mapfile", quote_value=False) utilities.atomic_write(HTCONDOR_CE_CONFIG_FILE, contents)
def configure(self, attributes): """ Setup basic osg/vdt services """ self.log("NetworkConfiguration.configure started") status = True header = "# This file is automatically generated by osg-configure\n" header += "# based on the settings in the [Network] section, please\n" header += "# make changes there instead of manually editing this file\n" source_settings_sh = '' source_settings_csh = '' port_settings_sh = '' port_settings_csh = '' if not utilities.blank(self.options['source_range'].value): source_settings_sh = "export GLOBUS_TCP_SOURCE_RANGE_STATE_FILE=%s\n" % \ self.options['source_state_file'].value source_settings_sh += "export GLOBUS_TCP_SOURCE_RANGE=%s\n" % \ self.options['source_range'].value source_settings_csh = "setenv GLOBUS_TCP_SOURCE_RANGE_STATE_FILE %s\n" % \ self.options['source_state_file'].value source_settings_csh += "setenv GLOBUS_TCP_SOURCE_RANGE %s\n" % \ self.options['source_range'].value if not utilities.blank(self.options['port_range'].value): port_settings_sh = "export GLOBUS_TCP_PORT_RANGE_STATE_FILE=%s\n" % \ self.options['port_state_file'].value port_settings_sh += "export GLOBUS_TCP_PORT_RANGE=%s\n" % \ self.options['port_range'].value port_settings_csh = "setenv GLOBUS_TCP_PORT_RANGE_STATE_FILE %s\n" % \ self.options['port_state_file'].value port_settings_csh += "setenv GLOBUS_TCP_PORT_RANGE %s\n" % \ self.options['port_range'].value contents = "#!/bin/sh\n" + header + source_settings_sh + port_settings_sh filename = os.path.join('/', 'var', 'lib', 'osg', 'globus-firewall') if not utilities.atomic_write(filename, contents): self.log("Error writing to %s" % filename, level=logging.ERROR) status = False filename = os.path.join('/', 'etc', 'profile.d', 'osg.sh') if not utilities.atomic_write(filename, contents): self.log("Error writing to %s" % filename, level=logging.ERROR) status = False contents = "#!/bin/csh\n" + header + source_settings_csh + port_settings_csh filename = os.path.join('/', 'etc', 'profile.d', 'osg.csh') if not utilities.atomic_write(filename, contents): self.log("Error writing to %s" % filename, level=logging.ERROR) status = False self.log("NetworkConfiguration.configure completed") return status
def _write_route_config_vars(self): """ Write condor-ce config attributes for the bosco job route. Sets values for: - BOSCO_RMS - BOSCO_ENDPOINT """ contents = utilities.read_file(self.HTCONDOR_CE_CONFIG_FILE, default="# This file is managed by osg-configure\n") contents = utilities.add_or_replace_setting(contents, "BOSCO_RMS", self.options['batch'].value, quote_value=False) contents = utilities.add_or_replace_setting(contents, "BOSCO_ENDPOINT", self.options['endpoint'].value, quote_value=False) utilities.atomic_write(self.HTCONDOR_CE_CONFIG_FILE, contents)
def setup_gram_config(self): """ Populate the gram config file with correct values Returns True if successful, False otherwise """ buf = open(LSFConfiguration.GRAM_CONFIG_FILE).read() for binfile in ['bsub', 'bqueues', 'bjobs', 'bhist', 'bacct', 'bkill']: bin_location = os.path.join(self.lsf_bin_location, binfile) if validation.valid_file(bin_location): buf = utilities.add_or_replace_setting(buf, binfile, 'bin_location') if self.options['seg_enabled'].value: if (self.options['log_directory'].value is None or not validation.valid_directory(self.options['log_directory'].value)): mesg = "%s is not a valid directory location " % self.options['log_directory'].value mesg += "for lsf log files" self.log(mesg, section=self.config_section, option='log_directory', level=logging.ERROR) return False buf = utilities.add_or_replace_setting(buf, 'log_path', self.options['log_directory'].value) buf = utilities.add_or_replace_setting(buf, 'lsf_profile', self.options['lsf_profile'].value) if not utilities.atomic_write(LSFConfiguration.GRAM_CONFIG_FILE, buf): return False return True
def setup_gram_config(self): """ Populate the gram config file with correct values Returns True if successful, False otherwise """ contents = open(PBSConfiguration.GRAM_CONFIG_FILE).read() for binfile in ['qsub', 'qstat', 'qdel']: bin_location = os.path.join(self.pbs_bin_location, binfile) if validation.valid_file(bin_location): contents = utilities.add_or_replace_setting(contents, binfile, bin_location) if self.options['pbs_server'].value != '': contents = utilities.add_or_replace_setting(contents, 'pbs_default', self.options['pbs_server'].value) if self.options['seg_enabled'].value: if (self.options['log_directory'].value is None or not validation.valid_directory(self.options['log_directory'].value)): mesg = "%s is not a valid directory location " % self.options['log_directory'].value mesg += "for pbs log files" self.log(mesg, section=self.config_section, option='log_directory', level=logging.ERROR) return False contents = utilities.add_or_replace_setting(contents, 'log_path', self.options['log_directory'].value) if not utilities.atomic_write(PBSConfiguration.GRAM_CONFIG_FILE, contents): return False return True
def _write_ce_collector_attributes_file(self, attributes_file): """Write config file that contains the osg attributes for the CE-Collector to advertise """ schedd_attrs_list = ["$(SCHEDD_ATTRS)"] attributes_file_lines = [] for name, value in [ ('OSG_Resource', self.osg_resource), ('OSG_ResourceGroup', self.osg_resource_group), ('OSG_BatchSystems', ",".join(self.enabled_batch_systems)) ]: attributes_file_lines.append("%s = %s" % (name, utilities.classad_quote(value))) schedd_attrs_list.append(name) if self.resource_catalog: attributes_file_lines.append(self.resource_catalog.compose_text()) schedd_attrs_list.append('OSG_ResourceCatalog') attributes_file_contents = ( "# Do not edit - file generated by osg-configure\n" + "\n".join(attributes_file_lines) + "\n" + "SCHEDD_ATTRS = " + " ".join(schedd_attrs_list) + "\n" ) return utilities.atomic_write(attributes_file, attributes_file_contents)
def _write_ce_collector_attributes_file(self, attributes_file): """Write config file that contains the osg attributes for the CE-Collector to advertise """ schedd_attrs_list = ["$(SCHEDD_ATTRS)"] attributes_file_lines = [] for name, value in [('OSG_Resource', self.osg_resource), ('OSG_ResourceGroup', self.osg_resource_group), ('OSG_BatchSystems', ",".join(self.enabled_batch_systems))]: attributes_file_lines.append( "%s = %s" % (name, utilities.classad_quote(value))) schedd_attrs_list.append(name) if self.resource_catalog: attributes_file_lines.append(self.resource_catalog.compose_text()) schedd_attrs_list.append('OSG_ResourceCatalog') attributes_file_contents = ( "# Do not edit - file generated by osg-configure\n" + "\n".join(attributes_file_lines) + "\n" + "SCHEDD_ATTRS = " + " ".join(schedd_attrs_list) + "\n") return utilities.atomic_write(attributes_file, attributes_file_contents)
def write_binpaths_to_blah_config(self, jobmanager, submit_binpath): """ Change the *_binpath variables in /etc/blah.config for the given jobmanager to point to the locations specified by the user in the config for that jobmanager. Does not do anything if /etc/blah.config is missing (e.g. if blahp is not installed). :param jobmanager: The name of a job manager that has a _binpath variable in /etc/blah.config :param submit_binpath: The fully-qualified path to the submit executables for that jobmanager """ if os.path.exists(self.BLAH_CONFIG): contents = utilities.read_file(self.BLAH_CONFIG) contents = utilities.add_or_replace_setting(contents, jobmanager + "_binpath", submit_binpath, quote_value=False) utilities.atomic_write(self.BLAH_CONFIG, contents)
def _configure_slurm_probe(self): """ Do SLURM probe specific configuration """ config_location = GRATIA_CONFIG_FILES['slurm'] buf = file(config_location).read() settings = self._probe_config['slurm'] if not validation.valid_file(settings['db_pass']): self.log("Slurm DB password file not present", level=logging.ERROR, option='db_pass', section='SLURM') return True buf = self.replace_setting(buf, 'SlurmDbHost', settings['db_host']) buf = self.replace_setting(buf, 'SlurmDbPort', settings['db_port']) buf = self.replace_setting(buf, 'SlurmDbUser', settings['db_user']) buf = self.replace_setting(buf, 'SlurmDbPasswordFile', settings['db_pass']) buf = self.replace_setting(buf, 'SlurmDbName', settings['db_name']) buf = self.replace_setting(buf, 'SlurmCluster', settings['cluster']) buf = self.replace_setting(buf, 'SlurmLocation', settings['location']) if not utilities.atomic_write(config_location, buf): return False return True
def write_binpaths_to_blah_config(self, jobmanager, submit_binpath): """ Change the *_binpath variables in /etc/blah.config for the given jobmanager to point to the locations specified by the user in the config for that jobmanager. Does not do anything if /etc/blah.config is missing (e.g. if blahp is not installed). :param jobmanager: The name of a job manager that has a _binpath variable in /etc/blah.config :param submit_binpath: The fully-qualified path to the submit executables for that jobmanager """ if os.path.exists(self.BLAH_CONFIG): contents = utilities.read_file(self.BLAH_CONFIG) contents = utilities.add_or_replace_setting(contents, jobmanager + "_binpath", submit_binpath, quote_value=True) utilities.atomic_write(self.BLAH_CONFIG, contents)
def setup_gram_config(self): """ Populate the gram config file with correct values Returns True if successful, False otherwise """ buf = open(LSFConfiguration.GRAM_CONFIG_FILE).read() for binfile in ['bsub', 'bqueues', 'bjobs', 'bhist', 'bacct', 'bkill']: bin_location = os.path.join(self.lsf_bin_location, binfile) if validation.valid_file(bin_location): buf = utilities.add_or_replace_setting(buf, binfile, 'bin_location') if self.options['seg_enabled'].value: if (self.options['log_directory'].value is None or not validation.valid_directory( self.options['log_directory'].value)): mesg = "%s is not a valid directory location " % self.options[ 'log_directory'].value mesg += "for lsf log files" self.log(mesg, section=self.config_section, option='log_directory', level=logging.ERROR) return False buf = utilities.add_or_replace_setting( buf, 'log_path', self.options['log_directory'].value) buf = utilities.add_or_replace_setting( buf, 'lsf_profile', self.options['lsf_profile'].value) if not utilities.atomic_write(LSFConfiguration.GRAM_CONFIG_FILE, buf): return False return True
def _configure_pbs_probe(self): """ Do pbs probe specific configuration """ if (self._probe_config['pbs']['accounting_log_directory'] is None or self._probe_config['pbs']['accounting_log_directory'] == ''): return True accounting_dir = self._probe_config['pbs']['accounting_log_directory'] if not validation.valid_directory(accounting_dir): self.log( "PBS accounting log not present, PBS gratia probe not configured", level=logging.ERROR, option='accounting_log_directory', section='PBS') return True config_location = GRATIA_CONFIG_FILES['pbs'] buf = file(config_location).read() buf = self.replace_setting(buf, 'pbsAcctLogDir', accounting_dir, xml_file=False) buf = self.replace_setting(buf, 'lrmsType', 'pbs', xml_file=False) if not utilities.atomic_write(config_location, buf): return False return True
def setup_gram_config(self): """ Populate the gram config file with correct values Returns True if successful, False otherwise """ buf = open(SGEConfiguration.GRAM_CONFIG_FILE).read() for binfile in ['qsub', 'qstat', 'qdel', 'qconf']: bin_location = os.path.join(self.options['sge_bin_location'].value, binfile) if validation.valid_file(bin_location): buf = utilities.add_or_replace_setting(buf, binfile, bin_location) for setting in ['sge_cell', 'sge_root', 'sge_config']: buf = utilities.add_or_replace_setting(buf, setting, self.options[setting].value) if self.options['seg_enabled'].value: buf = utilities.add_or_replace_setting(buf, 'log_path', self.options['log_file'].value) if self.options['default_queue'].value != '': buf = utilities.add_or_replace_setting(buf, 'default_queue', self.options['default_queue'].value) if self.options['validate_queues'].value: buf = utilities.add_or_replace_setting(buf, 'validate_queues', 'yes', quote_value=False) else: buf = utilities.add_or_replace_setting(buf, 'validate_queues', 'no', quote_value=False) if self.options['available_queues'].value != '': buf = utilities.add_or_replace_setting(buf, 'available_queues', self.options['available_queues'].value) if not utilities.atomic_write(SGEConfiguration.GRAM_CONFIG_FILE, buf): return False return True
def setup_htcondor_ce_config(self): """ Populate the config file that tells htcondor-ce where the condor pool is and where the spool directory is. Returns True if successful, False otherwise """ if not utilities.rpm_installed('htcondor-ce'): self.log("Unable to configure htcondor-ce for Condor: htcondor-ce not installed", level=logging.ERROR) return False def get_condor_ce_config_val(variable): return utilities.get_condor_config_val(variable, executable='condor_ce_config_val', quiet_undefined=True) # Get values for the settings we want to update. We can get the # values from condor_config_val; in the case of JOB_ROUTER_SCHEDD2_NAME, # we have FULL_HOSTNAME as a fallback in case SCHEDD_NAME is missing. # We also get the current / default value from condor_ce_config_val; # only update the setting in case the value from # condor_config_val is different from the value from condor_ce_config_val. condor_ce_config = {} for condor_ce_config_key, condor_config_keys in [ ('JOB_ROUTER_SCHEDD2_NAME', ['SCHEDD_NAME', 'FULL_HOSTNAME']), ('JOB_ROUTER_SCHEDD2_POOL', ['COLLECTOR_HOST']), ('JOB_ROUTER_SCHEDD2_SPOOL', ['SPOOL'])]: condor_config_value = None for condor_config_value in (utilities.get_condor_config_val(k, quiet_undefined=True) for k in condor_config_keys): if condor_config_value: break condor_ce_config_value = get_condor_ce_config_val(condor_ce_config_key) if not (condor_config_value or condor_ce_config_value): self.log("Unable to determine value for %s from %s and default not set; check your Condor config" % (condor_ce_config_key, ' or '.join(condor_config_keys)), level=logging.ERROR) return False elif not condor_config_value: continue # can't set anything for this # Special case for JOB_ROUTER_SCHEDD2_POOL: append port if necessary (SOFTWARE-1744) if condor_ce_config_key == 'JOB_ROUTER_SCHEDD2_POOL': condor_collector_port = (utilities.get_condor_config_val('COLLECTOR_PORT', quiet_undefined=True) or '9618') condor_config_value = self._add_port_if_necessary(condor_config_value, condor_collector_port) if not condor_ce_config_value or condor_ce_config_value != condor_config_value: condor_ce_config[condor_ce_config_key] = condor_config_value if condor_ce_config: buf = utilities.read_file(JobManagerConfiguration.HTCONDOR_CE_CONFIG_FILE, default="# This file is managed by osg-configure\n") for key, value in condor_ce_config.items(): buf = utilities.add_or_replace_setting(buf, key, value, quote_value=False) if not utilities.atomic_write(JobManagerConfiguration.HTCONDOR_CE_CONFIG_FILE, buf): return False return True
def _configure_sge_probe(self): """ Do SGE probe specific configuration """ accounting_path = self._probe_config['sge']['sge_accounting_file'] config_location = GRATIA_CONFIG_FILES['sge'] buf = file(config_location).read() buf = self.replace_setting(buf, 'SGEAccountingFile', accounting_path) if not utilities.atomic_write(config_location, buf): return False return True
def _disable_callout(self): """ Enable authorization using gridmap files """ self.log("Updating " + GSI_AUTHZ_LOCATION, level=logging.INFO) gsi_contents = "#globus_mapping liblcas_lcmaps_gt4_mapping.so lcmaps_callout\n" if not utilities.atomic_write(GSI_AUTHZ_LOCATION, gsi_contents): self.log("Error while writing to " + GSI_AUTHZ_LOCATION, level=logging.ERROR) raise exceptions.ConfigureError("Error while writing to " + GSI_AUTHZ_LOCATION)
def _set_lcmaps_callout(self, enable): self.log("Updating " + GSI_AUTHZ_LOCATION, level=logging.INFO) if enable: gsi_contents = "globus_mapping liblcas_lcmaps_gt4_mapping.so lcmaps_callout\n" else: gsi_contents = "#globus_mapping liblcas_lcmaps_gt4_mapping.so lcmaps_callout\n" if not utilities.atomic_write(GSI_AUTHZ_LOCATION, gsi_contents): msg = "Error while writing to " + GSI_AUTHZ_LOCATION self.log(msg, level=logging.ERROR) raise exceptions.ConfigureError(msg)
def edit_ssh_config(self, ssh_key_loc, local_user_home, local_user_name): # Add a section to .ssh/config for this host config_path = os.path.join(local_user_home, ".ssh", "config") # Split the entry point by the "@" endpoint_user_name, endpoint_host = self.options[ "endpoint"].value.split('@') host_config = """ Host %(endpoint_host)s HostName %(endpoint_host)s User %(endpoint_user_name)s IdentityFile %(ssh_key_loc)s """ % locals() text_to_add = "%s%s%s" % (self.SSH_CONFIG_SECTION_BEGIN, host_config, self.SSH_CONFIG_SECTION_END) if not os.path.exists(config_path): utilities.atomic_write(config_path, text_to_add) return config_contents = "" with open(config_path, "r", encoding="latin-1") as f: config_contents = f.read() section_re = re.compile( r"%s.+?%s" % (re.escape(self.SSH_CONFIG_SECTION_BEGIN), re.escape(self.SSH_CONFIG_SECTION_END)), re.MULTILINE | re.DOTALL) host_re = re.compile(r"^\s*Host\s+%s\s*$" % re.escape(endpoint_host), re.MULTILINE) if section_re.search(config_contents): config_contents = section_re.sub(text_to_add, config_contents) self.logger.debug("osg-configure section found in %s", config_path) elif host_re.search(config_contents): self.logger.info( "Host %s already found in %s but not in an osg-configure section. Not modifying it.", endpoint_host, config_path) return else: config_contents += "\n" + text_to_add utilities.atomic_write(config_path, config_contents)
def _configure_htcondor_ce_probe(self): """ Do HTCondor-CE probe specific configuration Set to suppress grid local jobs (pre-routed jobs) """ config_location = GRATIA_CONFIG_FILES['htcondor-ce'] buf = file(config_location).read() buf = self.replace_setting(buf, 'SuppressGridLocalRecords', '1') if not utilities.atomic_write(config_location, buf): return False return True
def _make_subscription(self, probe, probe_file, probe_host, site, hostname): """ Check to see if a given probe has the correct subscription and if not make it. """ self.log("GratiaConfiguration._make_subscription started") if self._subscription_present(probe_file, probe_host): self.log("Subscription found %s probe, returning" % probe) self.log("GratiaConfiguration._make_subscription completed") return True if probe == 'gridftp': probe = 'gridftp-transfer' try: buf = open(probe_file).read() buf = re.sub(r'(\s*)ProbeName\s*=.*', r'\1ProbeName="' + "%s:%s" % (probe, hostname) + '"', buf, 1) buf = re.sub(r'(\s*)SiteName\s*=.*', r'\1SiteName="' + site + '"', buf, 1) buf = re.sub(r'(\s*)Grid\s*=.*', r'\1Grid="' + self.grid_group + '"', buf, 1) buf = re.sub(r'(\s*)EnableProbe\s*=.*', r'\1EnableProbe="1"', buf, 1) for var in ['SSLHost', 'SOAPHost', 'SSLRegistrationHost', 'CollectorHost']: buf = re.sub(r'(\s*)' + var + r'\s*=.*', r'\1' + var + '="' + probe_host + '"', buf, 1) if not utilities.atomic_write(probe_file, buf, mode=420): self.log("Error while configuring gratia probes: " + "can't write to %s" % probe_file, level=logging.ERROR) raise exceptions.ConfigureError("Error configuring gratia") except(IOError, OSError): self.log("Error while configuring gratia probes", exception=True, level=logging.ERROR) raise exceptions.ConfigureError("Error configuring gratia") self.log("GratiaConfiguration._make_subscription completed") return True
def _configure_condor_probe(self): """ Do condor probe specific configuration """ config_location = GRATIA_CONFIG_FILES['condor'] buf = file(config_location).read() settings = self._probe_config['condor'] buf = self.replace_setting(buf, 'CondorLocation', settings['condor_location']) buf = self.replace_setting(buf, 'CondorConfig', settings['condor_config']) if not utilities.atomic_write(config_location, buf): return False return True
def update_gums_client_location(self): self.log("Updating " + GUMS_CLIENT_LOCATION, level=logging.INFO) location_re = re.compile("^gums.location=.*$", re.MULTILINE) authz_re = re.compile("^gums.authz=.*$", re.MULTILINE) if not validation.valid_file(GUMS_CLIENT_LOCATION): gums_properties = "gums.location=https://%s:8443" % ( self.options['gums_host'].value) gums_properties += "/gums/services/GUMSAdmin\n" gums_properties += "gums.authz=https://%s:8443" % ( self.options['gums_host'].value) gums_properties += "/gums/services/GUMSXACMLAuthorizationServicePort" else: gums_properties = open(GUMS_CLIENT_LOCATION).read() replacement = "gums.location=https://%s:8443" % ( self.options['gums_host'].value) replacement += "/gums/services/GUMSAdmin" gums_properties = location_re.sub(replacement, gums_properties) replacement = "gums.authz=https://%s:8443" % ( self.options['gums_host'].value) replacement += "/gums/services/GUMSXACMLAuthorizationServicePort" gums_properties = authz_re.sub(replacement, gums_properties) utilities.atomic_write(GUMS_CLIENT_LOCATION, gums_properties)
def _configure_condor_probe(self): """ Do condor probe specific configuration """ config_location = GRATIA_CONFIG_FILES['condor'] buf = open(config_location, "r", encoding="latin-1").read() settings = self._probe_config['condor'] buf = self.replace_setting(buf, 'CondorLocation', settings['condor_location']) buf = self.replace_setting(buf, 'CondorConfig', settings['condor_config']) if not utilities.atomic_write(config_location, buf): return False return True
def setup_blah_config(self): """ Populate blah.config with correct values Return True if successful, False otherwise """ if os.path.exists(self.BLAH_CONFIG): contents = utilities.read_file(self.BLAH_CONFIG) contents = utilities.add_or_replace_setting(contents, "sge_rootpath", self.options['sge_root'].value, quote_value=True) contents = utilities.add_or_replace_setting(contents, "sge_cellname", self.options['sge_cell'].value, quote_value=True) return utilities.atomic_write(self.BLAH_CONFIG, contents) return False
def _subscribe_probe_to_remote_host( self, probe, probe_file, remote_host, local_resource, local_host): """Subscribe the given probe to the given remote host if necessary -- this means: - Enable the probe - Set the local host name in the probe config (in ProbeName) - Set the local resource name (in SiteName) - Set the grid group (in Grid) - Set the *Host settings to the the remote host Check to see if a given probe has the correct subscription and if not make it. """ self.log("GratiaConfiguration._subscribe_probe_to_remote_host started") # XXX This just checks EnableProbe and SOAPHost; should we check the other *Host # settings or are we using SOAPHost as a "don't configure me" sentinel? # -mat 2/19/21 if self._subscription_present(probe_file, remote_host): self.log("Subscription found %s probe, returning" % probe) self.log("GratiaConfiguration._subscribe_probe_to_remote_host completed") return True if probe == 'gridftp': probe = 'gridftp-transfer' try: buf = open(probe_file, "r", encoding="latin-1").read() buf = self.replace_setting(buf, 'ProbeName', "%s:%s" % (probe, local_host)) buf = self.replace_setting(buf, 'SiteName', local_resource) buf = self.replace_setting(buf, 'Grid', self.grid_group) buf = self.replace_setting(buf, 'EnableProbe', '1') for var in ['SSLHost', 'SOAPHost', 'SSLRegistrationHost', 'CollectorHost']: buf = self.replace_setting(buf, var, remote_host) if not utilities.atomic_write(probe_file, buf, mode=0o644): self.log("Error while configuring gratia probes: " + "can't write to %s" % probe_file, level=logging.ERROR) raise exceptions.ConfigureError("Error configuring gratia") except OSError: self.log("Error while configuring gratia probes", exception=True, level=logging.ERROR) raise exceptions.ConfigureError("Error configuring gratia") self.log("GratiaConfiguration._subscribe_probe_to_remote_host completed") return True
def _update_lcmaps_file(self): """ Update lcmaps file and give appropriate messages if lcmaps.db.rpmnew exists """ self.log("Updating " + LCMAPS_DB_LOCATION, level=logging.INFO) lcmaps_db = open(LCMAPS_DB_LOCATION).read() endpoint_re = re.compile(r'^\s*"--endpoint\s+https://.*/gums/services.*"\s*$', re.MULTILINE) replacement = " \"--endpoint https://%s:8443" % (self.options['gums_host'].value) replacement += "/gums/services/GUMSXACMLAuthorizationServicePort\"" lcmaps_db = endpoint_re.sub(replacement, lcmaps_db) utilities.atomic_write(LCMAPS_DB_LOCATION, lcmaps_db) rpmnew_file = LCMAPS_DB_LOCATION + ".rpmnew" warning_message = """It appears that you've updated the lcmaps RPM and the configuration has changed. If you have ever edited /etc/lcmaps.db by hand (most people don't), then you should: 1. Edit /etc/lcmaps.db.rpmnew to make your changes again 2. mv /etc/lcmaps.db.rpmnew /etc/lcmaps.db If you haven't edited /etc/lcmaps.db by hand, then you can just use the new configuration: 1. mv /etc/lcmaps.db.rpmnew /etc/lcmaps.db""" if validation.valid_file(rpmnew_file): self.log(warning_message, level=logging.WARNING) else: return lcmaps_db = open(rpmnew_file).read() endpoint_re = re.compile(r'^\s*"--endpoint\s+https://.*/gums/services.*"\s*$', re.MULTILINE) replacement = " \"--endpoint https://%s:8443" % (self.options['gums_host'].value) replacement += "/gums/services/GUMSXACMLAuthorizationServicePort\"" lcmaps_db = endpoint_re.sub(replacement, lcmaps_db) utilities.atomic_write(rpmnew_file, lcmaps_db)
def _make_subscription(self, probe, probe_file, probe_host, site, hostname): """ Check to see if a given probe has the correct subscription and if not make it. """ self.log("GratiaConfiguration._make_subscription started") if self._subscription_present(probe_file, probe_host): self.log("Subscription found %s probe, returning" % probe) self.log("GratiaConfiguration._make_subscription completed") return True if probe == 'gridftp': probe = 'gridftp-transfer' try: buf = open(probe_file).read() buf = re.sub(r'(\s*)ProbeName\s*=.*', r'\1ProbeName="' + "%s:%s" % (probe, hostname) + '"', buf, 1) buf = re.sub(r'(\s*)SiteName\s*=.*', r'\1SiteName="' + site + '"', buf, 1) buf = re.sub(r'(\s*)Grid\s*=.*', r'\1Grid="' + self.grid_group + '"', buf, 1) buf = re.sub(r'(\s*)EnableProbe\s*=.*', r'\1EnableProbe="1"', buf, 1) for var in [ 'SSLHost', 'SOAPHost', 'SSLRegistrationHost', 'CollectorHost' ]: buf = re.sub(r'(\s*)' + var + r'\s*=.*', r'\1' + var + '="' + probe_host + '"', buf, 1) if not utilities.atomic_write(probe_file, buf, mode=420): self.log("Error while configuring gratia probes: " + "can't write to %s" % probe_file, level=logging.ERROR) raise exceptions.ConfigureError("Error configuring gratia") except (IOError, OSError): self.log("Error while configuring gratia probes", exception=True, level=logging.ERROR) raise exceptions.ConfigureError("Error configuring gratia") self.log("GratiaConfiguration._make_subscription completed") return True
def setup_gram_config(self): """ Populate the gram config file with correct values Returns True if successful, False otherwise """ contents = open(SlurmConfiguration.GRAM_CONFIG_FILE).read() for binfile in ['qsub', 'qstat', 'qdel']: bin_location = os.path.join(self.slurm_bin_location, binfile) if validation.valid_file(bin_location): contents = utilities.add_or_replace_setting(contents, binfile, bin_location) if not utilities.atomic_write(SlurmConfiguration.GRAM_CONFIG_FILE, contents): return False return True
def _write_ce_collector_file(self, info_services_file): """Write CE-Collector configuration file which specifies which host(s) to forward ads to """ view_hosts = [] for host in self.ce_collectors: if ':' not in host: view_hosts.append("%s:%d" % (host, HTCONDOR_CE_COLLECTOR_PORT)) else: view_hosts.append(host) info_services_file_contents = """\ # Do not edit - file generated by osg-configure CONDOR_VIEW_HOST = %s """ % ",".join(view_hosts) return utilities.atomic_write(info_services_file, info_services_file_contents)
def _write_ce_collector_file(self, info_services_file): """Write CE-Collector configuration file which specifies which host(s) to forward ads to """ view_hosts = [] for host in self.ce_collectors: if host.find(':') == -1: view_hosts.append("%s:%d" % (host, HTCONDOR_CE_COLLECTOR_PORT)) else: view_hosts.append(host) info_services_file_contents = """\ # Do not edit - file generated by osg-configure CONDOR_VIEW_HOST = %s """ % ",".join(view_hosts) return utilities.atomic_write(info_services_file, info_services_file_contents)
def setup_gram_config(self): """ Populate the gram config file with correct values Returns True if successful, False otherwise """ buf = open(SGEConfiguration.GRAM_CONFIG_FILE).read() for binfile in ['qsub', 'qstat', 'qdel', 'qconf']: bin_location = os.path.join(self.options['sge_bin_location'].value, binfile) if validation.valid_file(bin_location): buf = utilities.add_or_replace_setting(buf, binfile, bin_location) for setting in ['sge_cell', 'sge_root', 'sge_config']: buf = utilities.add_or_replace_setting(buf, setting, self.options[setting].value) if self.options['seg_enabled'].value: buf = utilities.add_or_replace_setting( buf, 'log_path', self.options['log_file'].value) if self.options['default_queue'].value != '': buf = utilities.add_or_replace_setting( buf, 'default_queue', self.options['default_queue'].value) if self.options['validate_queues'].value: buf = utilities.add_or_replace_setting(buf, 'validate_queues', 'yes', quote_value=False) else: buf = utilities.add_or_replace_setting(buf, 'validate_queues', 'no', quote_value=False) if self.options['available_queues'].value != '': buf = utilities.add_or_replace_setting( buf, 'available_queues', self.options['available_queues'].value) if not utilities.atomic_write(SGEConfiguration.GRAM_CONFIG_FILE, buf): return False return True
def _configure_lsf_probe(self): """ Do lsf probe specific configuration """ if (self._probe_config['lsf']['log_directory'] is None or self._probe_config['lsf']['log_directory'] == ''): self.log( "LSF accounting log directory not given, LSF gratia probe not configured", level=logging.ERROR, option='log_directory', section='LSF') return True log_directory = self._probe_config['lsf']['log_directory'] if not validation.valid_directory(log_directory): self.log( "LSF accounting log not present, LSF gratia probe not configured", level=logging.ERROR, option='log_directory', section='LSF') return True config_location = GRATIA_CONFIG_FILES['lsf'] buf = file(config_location).read() buf = self.replace_setting(buf, 'lsfAcctLogDir', log_directory, xml_file=False) # setup lsfBinDir if (self._probe_config['lsf']['lsf_location'] is None or self._probe_config['lsf']['lsf_location'] == ''): self.log("LSF location not given, lsf gratia probe not configured", level=logging.ERROR, option='lsf_location', section='LSF') return True lsf_bin_dir = os.path.join(self._probe_config['lsf']['lsf_location'], 'bin') buf = self.replace_setting(buf, 'lsfBinDir', lsf_bin_dir, xml_file=False) buf = self.replace_setting(buf, 'lrmsType', 'lsf', xml_file=False) if not utilities.atomic_write(config_location, buf): return False return True
def setup_gram_config(self): """ Populate the gram config file with correct values Returns True if successful, False otherwise """ buf = open(CondorConfiguration.GRAM_CONFIG_FILE).read() for binfile in ['condor_submit', 'condor_rm']: bin_location = os.path.join(self.condor_bin_location, binfile) if validation.valid_file(bin_location): buf = utilities.add_or_replace_setting(buf, binfile, bin_location) if not utilities.blank(self.options['condor_config'].value): buf = utilities.add_or_replace_setting(buf, 'condor_config', self.options['condor_config'].value) if not utilities.atomic_write(CondorConfiguration.GRAM_CONFIG_FILE, buf): return False return True
def _configure_pbs_probe(self): """ Do pbs probe specific configuration """ if (self._probe_config['pbs']['accounting_log_directory'] is None or self._probe_config['pbs']['accounting_log_directory'] == ''): return True accounting_dir = self._probe_config['pbs']['accounting_log_directory'] if not validation.valid_directory(accounting_dir): self.log("PBS accounting log not present, PBS gratia probe not configured", level=logging.ERROR, option='accounting_log_directory', section='PBS') return True config_location = GRATIA_CONFIG_FILES['pbs'] buf = file(config_location).read() buf = self.replace_setting(buf, 'pbsAcctLogDir', accounting_dir, xml_file=False) buf = self.replace_setting(buf, 'lrmsType', 'pbs', xml_file=False) if not utilities.atomic_write(config_location, buf): return False return True
def _make_subscription(self, probe, probe_file, probe_host, site, hostname): """ Check to see if a given probe has the correct subscription and if not make it. """ self.log("GratiaConfiguration._make_subscription started") if self._subscription_present(probe_file, probe_host): self.log("Subscription found %s probe, returning" % probe) self.log("GratiaConfiguration._make_subscription completed") return True if probe == 'gridftp': probe = 'gridftp-transfer' try: buf = open(probe_file).read() buf = self.replace_setting(buf, 'ProbeName', "%s:%s" % (probe, hostname)) buf = self.replace_setting(buf, 'SiteName', site) buf = self.replace_setting(buf, 'Grid', self.grid_group) buf = self.replace_setting(buf, 'EnableProbe', '1') for var in ['SSLHost', 'SOAPHost', 'SSLRegistrationHost', 'CollectorHost']: buf = self.replace_setting(buf, var, probe_host) if not utilities.atomic_write(probe_file, buf, mode=420): self.log("Error while configuring gratia probes: " + "can't write to %s" % probe_file, level=logging.ERROR) raise exceptions.ConfigureError("Error configuring gratia") except(IOError, OSError): self.log("Error while configuring gratia probes", exception=True, level=logging.ERROR) raise exceptions.ConfigureError("Error configuring gratia") self.log("GratiaConfiguration._make_subscription completed") return True
def _configure_lsf_probe(self): """ Do lsf probe specific configuration """ if (self._probe_config['lsf']['log_directory'] is None or self._probe_config['lsf']['log_directory'] == ''): self.log("LSF accounting log directory not given, LSF gratia probe not configured", level=logging.ERROR, option='log_directory', section='LSF') return True log_directory = self._probe_config['lsf']['log_directory'] if not validation.valid_directory(log_directory): self.log("LSF accounting log not present, LSF gratia probe not configured", level=logging.ERROR, option='log_directory', section='LSF') return True config_location = GRATIA_CONFIG_FILES['lsf'] buf = file(config_location).read() buf = self.replace_setting(buf, 'lsfAcctLogDir', log_directory, xml_file=False) # setup lsfBinDir if (self._probe_config['lsf']['lsf_location'] is None or self._probe_config['lsf']['lsf_location'] == ''): self.log("LSF location not given, lsf gratia probe not configured", level=logging.ERROR, option='lsf_location', section='LSF') return True lsf_bin_dir = os.path.join(self._probe_config['lsf']['lsf_location'], 'bin') buf = self.replace_setting(buf, 'lsfBinDir', lsf_bin_dir, xml_file=False) buf = self.replace_setting(buf, 'lrmsType', 'lsf', xml_file=False) if not utilities.atomic_write(config_location, buf): return False return True
def write_blah_disable_wn_proxy_renewal_to_blah_config(self): if os.path.exists(self.BLAH_CONFIG): contents = utilities.read_file(self.BLAH_CONFIG) contents = utilities.add_or_replace_setting(contents, "blah_disable_wn_proxy_renewal", "yes", quote_value=False) utilities.atomic_write(self.BLAH_CONFIG, contents)
def create_missing_service_cert_key(self, service_cert, service_key, user): """Copy the host cert and key to a service cert and key with the appropriate permissions if the service cert and key do not already exist. If they already exist, nothing is done. If only one of them exists, this method returns with an error. Parent directories are created as needed. :param service_cert: Path to the service certificate to create :type service_cert: str :param service_key: Path to the service private key to create :type service_key: str :param user: The name of the user that will own the cert and key :type user: str :return: True if service_cert and service_key are both created or already present, False otherwise """ user_pwd = pwd.getpwnam(user) if not user_pwd: self.log("%r user not found, cannot create service cert/key with correct permissions" % user, level=logging.ERROR) return False if os.path.isfile(service_cert) and os.path.isfile(service_key): self.log("%s and %s both exist; not creating them" % (service_cert, service_key), level=logging.INFO) elif os.path.isfile(service_cert) and not os.path.isfile(service_key): self.log("%s exists but %s does not! Either remove the cert or copy the matching key" % (service_cert, service_key), level=logging.ERROR) return False elif os.path.isfile(service_key) and not os.path.isfile(service_cert): self.log("%s exists but %s does not! Either remove the key or copy the matching cert" % (service_key, service_cert), level=logging.ERROR) return False else: for from_path, to_path, mode in [[HOSTCERT_PATH, service_cert, int('644', 8)], [HOSTKEY_PATH, service_key, int('600', 8)]]: # Create dirs for the cert/key if they don't exist parent_dir = os.path.abspath(os.path.dirname(to_path)) try: os.makedirs(parent_dir) except OSError, err: if err.errno != errno.EEXIST: self.log("Could not create directory %s" % parent_dir, exception=err, level=logging.ERROR) return False try: os.chown(parent_dir, user_pwd.pw_uid, user_pwd.pw_gid) except EnvironmentError, err: self.log("Could not set ownership of %s" % parent_dir, exception=err, level=logging.ERROR) return False from_fh = open(from_path, 'rb') success = utilities.atomic_write(to_path, from_fh.read(), mode=mode) from_fh.close() if not success: self.log("Could not copy %s to %s" % (from_path, to_path), level=logging.ERROR) return False try: os.chown(to_path, user_pwd.pw_uid, user_pwd.pw_gid) except EnvironmentError, err: self.log("Could not set ownership of %s" % to_path, exception=err, level=logging.ERROR) return False
Returns: True if config successfully updated """ if filename is None: return False try: contents = open(filename).read() except EnvironmentError, err: self.log(self.MISSING_JOBMANAGER_CONF_MSG % (filename, err), level=logging.ERROR) return False if contents.startswith('accept_limited,'): contents = contents.replace('accept_limited,', '', 1) if utilities.atomic_write(filename, contents): return True else: self.log('Error disabling accept_limited', level=logging.ERROR) return False if ',accept_limited' in contents: contents = contents.replace(',accept_limited', '', 1) if utilities.atomic_write(filename, contents): return True else: self.log('Error disabling accept_limited', level=logging.ERROR) return False
def write_lsf_confpath_to_blah_config(self): if os.path.exists(self.BLAH_CONFIG): contents = utilities.read_file(self.BLAH_CONFIG) contents = utilities.add_or_replace_setting(contents, 'lsf_confpath', self.options['lsf_conf'].value, quote_value=True) utilities.atomic_write(self.BLAH_CONFIG, contents)
def write_htcondor_ce_sentinel(self): if self.htcondor_gateway_enabled: contents = utilities.read_file(self.HTCONDOR_CE_CONFIG_FILE, default="# This file is managed by osg-configure\n") contents = utilities.add_or_replace_setting(contents, "OSG_CONFIGURED", "true", quote_value=False) utilities.atomic_write(self.HTCONDOR_CE_CONFIG_FILE, contents)