def allScramArchsAndVersions(): """ _allScramArchs_ Downloads a list of all ScramArchs and Versions from the tag collector """ result = {} try: f = urllib.urlopen(TAG_COLLECTOR_URL) domDoc = parseDOM(f) except ExpatError as ex: logging.error("Could not connect to tag collector!") logging.error("Not changing anything!") return {} archDOMs = domDoc.firstChild.getElementsByTagName("architecture") for archDOM in archDOMs: arch = archDOM.attributes.item(0).value releaseList = [] for node in archDOM.childNodes: # Somehow we can get extraneous ('\n') text nodes in # certain versions of Linux if str(node.__class__) == "xml.dom.minidom.Text": continue if not node.hasAttributes(): # Then it's an empty random node created by the XML continue for i in range(node.attributes.length): attr = node.attributes.item(i) if str(attr.name) == 'label': releaseList.append(str(attr.value)) result[str(arch)] = releaseList return result
def allScramArchsAndVersions(): """ _allScramArchs_ Downloads a list of all ScramArchs and Versions from the tag collector """ result = {} try: f = urllib.urlopen("https://cmstags.cern.ch/tc/ReleasesXML/?anytype=1") domDoc = parseDOM(f) except ExpatError, ex: logging.error("Could not connect to tag collector!") logging.error("Not changing anything!") return {}
def allScramArchsAndVersions(): """ _allScramArchs_ Downloads a list of all ScramArchs and Versions from the tag collector """ result = {} try: f = urllib.urlopen(TAG_COLLECTOR_URL) domDoc = parseDOM(f) except ExpatError, ex: logging.error("Could not connect to tag collector!") logging.error("Not changing anything!") return {}