def find_new_entries_in_infosys(config_xml, source, source_type, skip_disabled, vo_name=''): """ For the given information system, find any new entries that are not already in the config. """ try: # Find all config entries not disabled config_dom = minidom.parse(config_xml) config_entries = infosys_lib.parse_entries(config_dom, skip_missing_ref_id=True, skip_disabled=skip_disabled) except: print "Error parsing the config file '%s' for entries, exiting the tool." % config_xml sys.exit(2) # Query the given info system if source_type.lower() == 'bdii': infosys_entries = infosys_lib.query_bdii(source, vo_name) elif source_type.lower() == 'ress': # Update path with condor condor_path = infosys_lib.parse_condor_path(config_dom) os.environ["CONDOR_CONFIG"] = condor_path + "/etc/condor_config" condorExe.set_path(condor_path + "/bin", condor_path + "/sbin") ress_entries = infosys_lib.query_ress(source, vo_name) # Remove duplicate entries infosys_entries = remove_duplicates(ress_entries) elif source_type.lower() == 'tg': infosys_entries = infosys_lib.query_teragrid(source) # Compare config entries with what is found in the information system new_entries = [] for infosys_id in infosys_entries: entry_i = infosys_entries[infosys_id] found_match = False for config_entry in config_entries: entry_c = config_entries[config_entry] # Check if ids match if entry_i['ref_id'] == entry_c['ref_id']: # Check same source types between config and infosys entries # TODO do we need to check source url too? if entry_c['source_type'].lower() == source_type.lower(): found_match = True # already have this entry break else: # Check if content matches for other infosys or manual entries if entry_i['gatekeeper'] == entry_c['gatekeeper'] and entry_i['gridtype'] == entry_c['gridtype'] and entry_i['rsl'] == entry_c['rsl']: found_match = True # already have this entry # TODO here could add ability to update ref_ids if find additional matching entry # not sure if we want to for ress entries tho? break if not found_match: new_entries.append(infosys_entries[infosys_id]) return new_entries
def verify_directories_empty(self): """ This method attempts to clean up all directories so a fresh install can be accomplished successfully. It is consoldiated in a single check so as to only ask once and not for each directory. It also (attempts) to insure that if directories are nested, it does not create a problem. Not an easy task. When privilege separation is in effect, the condor_root_switchboard must be used to clean out the client log and proxy files as the owners are different and permissions problems will occur. """ dirs = {} if len(os.listdir(self.client_log_dir())) > 0: dirs["client_log_dir"] = self.client_log_dir() if len(os.listdir(self.client_proxy_dir())) > 0: dirs["client_proxy_dir"] = self.client_proxy_dir() if len(os.listdir(self.logs_dir())) > 0: dirs["logs_dir"] = self.logs_dir() for dir in ["monitor","stage"]: subdir = os.path.join(self.glidein.web_location(),dir) if os.path.isdir(subdir) and len(os.listdir(subdir)) > 0: dirs["web_location/%s" % dir] = subdir if len(os.listdir(self.install_location())) > 0: if len(os.listdir(self.install_location())) > self.nbr_of_nested_dirs(): dirs["install_location"] = self.install_location() if len(dirs) == 0: return # all directories are empty common.logit("""The following directories must be empty for the install to succeed: """) for option in dirs.keys(): common.logit(""" %(option)s: %(dir)s""" % \ { "option" : option, "dir" : dirs[option] }) common.ask_continue("... can we remove their contents") for option in dirs.keys(): if self.wms.privilege_separation() == "y": if option in ["client_log_dir","client_proxy_dir",]: #-- Factory create requires these directories be empty #-- when privspep is in effect condor_sbin = "%s/sbin" % self.wms.condor_location() condor_bin = "%s/bin" % self.wms.condor_location() condorExe.set_path(condor_bin,condor_sbin) parent_dir = dirs[option] subdirs = os.listdir(parent_dir) for base_dir in subdirs: if os.path.isdir("%s/%s" % (parent_dir,base_dir)): condorPrivsep.rmtree(parent_dir,base_dir) else: common.remove_dir_contents(dirs[option]) else: common.remove_dir_contents(dirs[option]) # this re-validation is performed to resolve problem of nesting some dirs self.validate_needed_directories()
def find_entries_with_partial_id_match(config_xml, skip_disabled): """ Finds the bdii, ress and TeraGrid entries with partial matches. """ try: # Find all enabled config entries with ref ids config_dom = minidom.parse(config_xml) config_entries = infosys_lib.parse_entries(config_dom, skip_missing_ref_id=True, skip_disabled=skip_disabled) # Create an info systems list from factory config infosystems = infosys_lib.parse_info_systems(config_dom) has_ress = False for infosys in infosystems: if infosystems[infosys].lower() == 'ress': has_ress = True break if has_ress: # Update path with condor condor_path = infosys_lib.parse_condor_path(config_dom) os.environ["CONDOR_CONFIG"] = condor_path + "/etc/condor_config" condorExe.set_path(condor_path + "/bin", condor_path + "/sbin") except: print "Error parsing the the config file '%s', exiting the tool." % config_xml sys.exit(2) # Retrieve info systems entries bdii_entries = {} ress_entries = {} tg_entries = {} for infosys, type in infosystems.iteritems(): if type.lower() == 'bdii': bdii_entries.update(infosys_lib.query_bdii(infosys)) elif type.lower() == 'ress': ress_entries.update(infosys_lib.query_ress(infosys)) elif type.lower() == 'tg': tg_entries.update(infosys_lib.query_teragrid(infosys)) partial_match_bdii_entries = find_partial_id_match(bdii_entries, config_entries, 'bdii') partial_match_ress_entries = find_partial_id_match(ress_entries, config_entries, 'ress') partial_match_tg_entries = find_partial_id_match(tg_entries, config_entries, 'tg') return partial_match_bdii_entries, partial_match_ress_entries, partial_match_tg_entries
def get_ress_data(self): common.logit("ReSS host: %s" % self.glidein.ress_host()) #-- validate host --- if not common.url_is_valid(self.glidein.ress_host()): common.logerr("ReSS server (%s) in ress_host option is not valid or inaccssible." % self.glidein.ress_host()) condor_sbin = "%s/sbin" % self.wms.condor_location() condor_bin = "%s/bin" % self.wms.condor_location() condorExe.set_path(condor_bin,condor_sbin) #-- get gatekeeper data from ReSS -- common.logit("Supported VOs: %s" % self.glidein.entry_vos()) constraint = self.glidein.ress_vo_constraint() common.logit("Constraints: %s" % constraint) condor_obj=condorMonitor.CondorStatus(pool_name=self.glidein.ress_host()) try: condor_obj.load(constraint=constraint) condor_data=condor_obj.fetchStored() except Exception,e: common.logerr(e)
def test_query_ress(self): """ Test querying RESS """ # Condor path and config location # These will be set correctly as long as the test is run in the same environment # as what is needed to run the factory/wms collector if not os.environ.has_key("CONDOR_CONFIG"): condor_config="/etc/condor/condor_config" condorExe.init() self.assertTrue(condorExe.condor_bin_path != None and condorExe.condor_sbin_path != None) condorExe.set_path(condorExe.condor_bin_path, condorExe.condor_sbin_path) # Test that information is retrieved and is populated correctly infosys_entries = query_ress("osg-ress-1.fnal.gov", "engage") self.assertNotEqual(infosys_entries, {}) keys = infosys_entries.keys() entry = infosys_entries[keys[0]] self.assertTrue(entry['site_name'] != '') self.assertTrue(entry['gridtype'] != '') self.assertTrue(entry['gatekeeper'] != '') self.assertTrue(entry['wall_clocktime'] != 0) self.assertTrue(entry['ref_id'] != '') self.assertTrue(entry['ce_status'] != '') self.assertTrue(entry['glexec_bin'] == 'OSG') self.assertTrue(entry['work_dir'] == 'OSG') self.assertEqual(entry['source'], "osg-ress-1.fnal.gov") self.assertEqual(entry['source_type'], 'RESS') self.assertTrue(entry['GlueCEUniqueID'] != '') # Test bad ress source self.assertRaises(Exception, query_ress, "bad.url", "cms") # Test bad vo name infosys_entries = query_ress("osg-ress-1.fnal.gov", "junk_testing_bad_vo_name_that_is_not_valid") self.assertEqual(infosys_entries, {}) # Test empty vo name infosys_entries = query_ress("osg-ress-1.fnal.gov", "") self.assertTrue(infosys_entries != {})
def verify_directories_empty(self): """ This method attempts to clean up all directories so a fresh install can be accomplished successfully. It is consoldiated in a single check so as to only ask once and not for each directory. When privilege separation is in effect, the condor_root_switchboard must be used to clean out the client log and proxy files as the owners are different and permissions problems will occur. """ instance_dir = "glidein_%(instance)s" % \ { "instance" : self.glidein.instance_name(), } dirs = {} dirs["logs.........."] = os.path.join(self.logs_dir(),instance_dir) dirs["install......."] = os.path.join(self.install_location(),instance_dir) dirs["config........"] = self.config_dir() for frontend in self.wms.frontend_users().keys(): ## user = "******" + self.wms.frontend_users()[frontend] ## dirs["%s client logs..." % user] = os.path.join(self.client_log_dir(),user,instance_dir) ## dirs["%s client proxies" % user] = os.path.join(self.client_proxy_dir(),user,instance_dir) dirs["client logs"] = self.client_log_dir() dirs["client proxies"] = self.client_proxy_dir() for subdir in ["monitor","stage"]: dirs["web %s " % subdir] = os.path.join(self.glidein.web_location(),subdir,instance_dir) #--- check them -- for type in dirs.keys(): if os.path.isdir(dirs[type]): if len(os.listdir(dirs[type])) == 0: if self.wms.privilege_separation() == "y": if type in ["client logs", "client proxies",]: del dirs[type] # remove from dict else: # will have permission to delete it # os.rmdir(dirs[type]) del dirs[type] # remove from dict else: # will have permission to delete it #os.rmdir(dirs[type]) for rootdir, dirlist, filelist in os.walk(dirs[type],topdown=False): for filename in filelist: os.remove(os.path.join(rootdir, filename)) del dirs[type] # remove from dict else: # it does not exist, remove from dict del dirs[type] #--- if all are empty, return if len(dirs) == 0: os.system("sleep 3") return # all directories are empty #--- See if we can remove them --- common.logit("""The following directories must be empty for the install to succeed: """) types = dirs.keys() types.sort() for type in types: common.logit(""" %(type)s: %(dir)s""" % \ { "type" : type, "dir" : dirs[type] }) common.ask_continue("... can we remove their contents") for type in dirs.keys(): if self.wms.privilege_separation() == "y": if type in ["client logs", "client proxies",]: #-- Factory create requires these directories be empty #-- when privspep is in effect condor_sbin = "%s/sbin" % self.wms.condor_location() condor_bin = "%s/bin" % self.wms.condor_location() condorExe.set_path(condor_bin,condor_sbin) parent_dir = dirs[type] subdirs = os.listdir(parent_dir) for base_dir in subdirs: if os.path.isdir("%s/%s" % (parent_dir,base_dir)): try: condorPrivsep.rmtree(parent_dir,base_dir) except Exception,e: common.logerr("""Encountered a problem in executing condor_root_switchboard to remove this client's sub-directories: %(dir)s %(error)s Check your /etc/condor/privsep.conf file to verify. You may need to configure/install your WMS Collector to resolve or correct the ini file for the %(type)s attribute. Be careful now. """ % { "dir" : parent_dir, "type" : type, "error" : e, } ) common.logit("Files in %s deleted" % parent_dir) else: # not client logs or proxies common.remove_dir_contents(dirs[type]) os.rmdir(dirs[type]) else: #no privsep in effect common.remove_dir_contents(dirs[type]) os.rmdir(dirs[type])