def read_xlsx_file(filename, sheetid): """ Read contents of specified sheet in Excel 2007 (.xlsx) workbook file. .xlsx files are actually zip files containing xml files. Returns a 2d list of cell values. """ if sheetid is None: sheetid = 1 else: sheetid += 1 # sheets are numbered starting from 1 in xlsx files # Get cell data from specified worksheet. try: zf = zipfile.ZipFile(filename) sheetdata = zf.read('xl/worksheets/sheet%s.xml' % sheetid) xml = xml2obj(sheetdata) rows = xml.sheetData.row except: raise FileError("Could not read xlsx file %s, worksheet id %s" % ( filename, sheetid - 1)) # Get shared strings xml. Cell values are given as ordinal index # references into sharedStrings.xml:ssi.si elements, whose string-value # is found in the node's .t element. try: stringdata = zf.read('xl/sharedStrings.xml') xml = xml2obj(stringdata) strings = xml.si except: raise FileError("Could not parse sharedStrings.xml of xlsx file") # Map strings to row values and return result return extract_xlsx_lines(rows, strings)
def listPBSnodes(hostExpression): """ Create a list of nodes that are free to run jobs Example: freePBSnodes('node01[05-11],node02[01-80]') """ nodeNames = expand_hostlist(hostExpression) nodeString = "".join(["%s " % n for n in nodeNames]) pbsOut = check_output(["pbsnodes -x %s" % nodeString], shell=True) nodes = xml2obj(pbsOut) freenodelist = [] # find free nodes # state = free # no jobs are runing # no message for node in nodes["Node"]: status = {} messages = [] jobs = [] name = node["name"] state = node["state"] freenodelist.append(name) return freenodelist
def xmlparseLastResponseWith(*args): try: if restContext.verbose: print "parsing xml response with " + args[0] + "@" + args[1] mod = __import__(args[0]) func = getattr(mod, args[1]) func(xml2obj(restContext.lastContentResponse), restContext.keyvalues) except Exception as inst: raise TwillAssertionError("at " + args[0] + " could not be called " + args[1] + ' due "' + inst.__str__() + '"')
def enablefs(self, keypool, uid, secretKey): # switch self.bucket_switch(keypool, 'readwrite', uid, secretKey) # wait self.wait(keypool, 'readwrite', uid, secretKey) # list result = x2o.xml2obj(self.bucket_fileaccesslist(keypool, uid, secretKey).encode('ascii', 'ignore')) exportpoint = result['fileaccess_response']['mountPoints'] # mount self.domount(exportpoint) self.print_list(result)
def read_xlsx_sheet_names(filename): """Get a list of sheets and their ids from xlsx file.""" try: zf = zipfile.ZipFile(filename) sheetsdata = zf.read('xl/workbook.xml') xml = xml2obj(sheetsdata) sheets = xml.sheets.sheet except: raise FileError("Could not open '%s' for sheet listing." % filename) output = [] for sheet in sheets: m = re.match('rId(\d+)', sheet.r_id) if not m: raise FileError("Could not read list of xlsx's worksheets.") output.append((sheet.name, int(m.group(1)) - 1)) return output
def call(self): parts = urlparse.urlparse(self.url) if parts.scheme == 'http': conn = httplib.HTTPConnection(parts.netloc) else: conn = httplib.HTTPSConnection(parts.netloc) if parts.query: request_url = '%s?%s' % (parts.path, parts.query) else: request_url = parts.path conn.request('GET', request_url) self.response = conn.getresponse() self.data = self.response.read() conn.close() if self.response.status == 200: self.obj = xml2obj.xml2obj(self.data) else: self.obj = None return
def get_metadata(t_name, quality, offset=0): t_name, season, number = get_series_ssn(t_name, offset=offset) fname_base = "S%02dE%02d" % (int(season), int(number)) target_dir = config.wrkdir file_base = os.path.join(target_dir, t_name, fname_base) parser = MetaHTMLParser() page = GetPage.getpage(ssn_url(t_name, season, number))["page"].decode('utf-8') iasid = GetPage.p.check_ssid() parser.feed(page) try: xml_metadata = wb64(unquote(parser.metadata)) except AttributeError: print "No more episodes" sys.exit(0) #print "Got XML" # xml_metadata metadata = xml2obj.xml2obj(xml_metadata) metadata["fetched_quality"] = quality if metadata["sizes"]["hq"] == "0": metadata["fetched_quality"] = "default" quality = "default" metadata["iasid"] = iasid metadata["season"] = season metadata["number"] = number if not os.path.isdir(os.path.dirname(file_base)): os.mkdir(os.path.dirname(file_base)) if not os.path.isfile(file_base + ".meta") or os.stat(file_base + ".meta").st_size == 0: fd = open(file_base + ".meta", "w") fd.write(json.dumps(metadata)) fd.close() metadata.update({ 'bitrate': float(metadata["sizes"][quality]) / float(metadata['duration']) }) #print "bitrate: %s byte/sec" % metadata['bitrate'] return metadata, file_base
def freePBSnodes(hostExpression): """ Create a list of nodes that are free to run jobs Example: freePBSnodes('node01[05-11],node02[01-80]') """ nodeNames = expand_hostlist(hostExpression) nodeString = "".join(["%s " % n for n in nodeNames]) pbsOut = check_output(["pbsnodes -x %s" % nodeString], shell=True) nodes = xml2obj(pbsOut) freenodelist = [] # find free nodes # state = free # no jobs are runing # no message for node in nodes["Node"]: status = {} messages = [] jobs = [] name = node["name"] state = node["state"] if state == "free": if node["jobs"]: # node has a job running jobs = node["jobs"].split(", ") continue if "status" in node: for item in node["status"].split(","): (S, value) = item.split("=") status[S] = value if status.has_key("message"): continue # print "%s free" % name freenodelist.append(name) return freenodelist
def get_metadata(t_name, quality, offset=0): t_name, season, number = get_series_ssn(t_name, offset=offset) fname_base = "S%02dE%02d" % (int(season), int(number)) target_dir = config.wrkdir file_base = os.path.join(target_dir, t_name, fname_base) parser = MetaHTMLParser() page = GetPage.getpage(ssn_url(t_name, season, number))["page"].decode('utf-8') iasid = GetPage.p.check_ssid() parser.feed(page) try: xml_metadata = wb64(unquote(parser.metadata)) except AttributeError: print "No more episodes" sys.exit(0) #print "Got XML" # xml_metadata metadata = xml2obj.xml2obj(xml_metadata) metadata["fetched_quality"] = quality if metadata["sizes"]["hq"] == "0": metadata["fetched_quality"] = "default" quality = "default" metadata["iasid"] = iasid metadata["season"] = season metadata["number"] = number if not os.path.isdir(os.path.dirname(file_base)): os.mkdir(os.path.dirname(file_base)) if not os.path.isfile(file_base+".meta") or os.stat(file_base+".meta").st_size == 0: fd = open(file_base+".meta", "w") fd.write(json.dumps(metadata)) fd.close() metadata.update({'bitrate': float(metadata["sizes"][quality])/float(metadata['duration'])}) #print "bitrate: %s byte/sec" % metadata['bitrate'] return metadata, file_base
def load(self): self.alert = xml2obj(self.xml) self.FIPS6 = [g.value for g in self.alert.info.area.geocode if g.valueName.upper() == 'FIPS6'] self.UGC = [g.value for g in self.alert.info.area.geocode if g.valueName.upper() == 'UGC'] self.INFO_PARAMS = {} [self.INFO_PARAMS.update({p.valueName:p.value}) for p in self.alert.info.parameter]
def print_usage_and_exit(): print "usage: {0} <UNPACKED FONT>".format(sys.argv[0]) sys.exit(1) if len(sys.argv) != 2: print_usage_and_exit() fontfile = sys.argv[1] if not os.path.exists(fontfile): print_usage_and_exit() glyphs = [] with file(fontfile + ".fontdef.xml", 'r') as f: xmlobj = xml2obj(f.read()) font_y_advance = int(xmlobj.height) for g in xmlobj.glyph: glyphs.append((unicode(g.symbol), int(g.offset_x), int(g.offset_y), int(g.width), int(g.height), float(g.tx), float(g.ty), float(g.tx2), float(g.ty2), int(g.x_advance))) with file(fontfile[:-4] + ".font", 'w') as f: f.write("MFNT") f.write(struct.pack("<I", len(glyphs))) f.write(struct.pack("<I", font_y_advance)) for g in glyphs: f.write(struct.pack("<iiIIffffI", g[1], g[2], g[3], g[4], g[5], g[6], g[7], g[8], g[9])) unicode_fontcp = []
def stat(self, stream, element): """ Calls the AccuRev Stat command which reports the status of elements in the specified stream stream : name of the stream used with '-s' option, ex "DVT_System_7.0" element: depot-relative path to a file in accurev to fetch ex. \.\APM\System\dvt\Docs\hercules.ini THIS CANNOT BE A DIRECTORY, IT MUST BE A SINGLE FILE @rtype : C{dict} @return returnStat: Dictionary of the element whose status was queried """ returnStat = {} _stat = AccuRevCommands.objects.get(name="Status") statCmd = [self.exe, _stat.cmd_name, ] if _stat.cmd_opts: statCmd.append(_stat.cmd_opts) # Append the stream statCmd.append('-s') statCmd.append(str(stream)) # Append the depot-relative full pathname of the element to populate statCmd.append(str(element)) # Ready to stat, if the logout idle time has passed, re-login if (self.idleTime + self.lastAccessTime < datetime.datetime.now()): self._login('stat') # Fetch the files using the populate command logging.info("Command: %s" % " ".join(statCmd)) statProc = subprocess.Popen(statCmd, env=self.env, stdout=self.stdout, stderr=self.stderr, universal_newlines=True) # Wait for the populate command to finish, error out if needed, out, err = statProc.communicate() if err != '': raise Exception('An error has occured while attempting to get file status from the VCS. Error: %s' % err) # Populate command was successful, retrieve the XML response statLog = xml2obj.xml2obj(out) # If there are no elements, return an empty dict if statLog.element is None: pass # Otherwise, populate the dictionary with all the goodies else: returnStat['real'] = statLog.element.Real.replace('\\','/') returnStat['virtual'] = statLog.element.Virtual.replace('\\','/') returnStat['dir'] = statLog.element.dir returnStat['eid'] = statLog.element.id returnStat['modTime'] = datetime.datetime.fromtimestamp(int(statLog.element.modTime)) returnStat['stream_version'] = statLog.element.namedVersion.replace('\\',r'/') returnStat['status'] = statLog.element.status return returnStat
def pop(self, stream, localRepo, elementList, recursive=True, override=False): """ Method that calls the AccuRev pop (populate) command to fetch file(s) from a particular stream. stream : name of the stream used with '-v' option, ex "DVT_System_7.0" localRepo: path to local directory where pop'ed files will be sent elementList: depot-relative path to files in accurev to fetch ex. \.\APM\System\dvt\Docs\hercules.ini (single file) ex. \.\APM\System\dvt\TestProtocols (entire directory, but only with recursive = True) recursive: sets '-R' so AccuRev recurses into subdirectories override : sets '-O' to override existing files @rtype : C{list} @return returnList: List of the (full) pathnames in localRepo of the files fetched from AccuRev and AccuRev depot- relative pathname in a dictionary. Returns an empty list if nothing was fetched Examples: returnList[2]['local_path'] = u'C:\\tmp\\APM\\...\\Some_Protocol.xml' returnList[2]['depot_path'] = u'\\.\\APM\\...\\Some_Protocol.xml' """ returnList = [] _pop = AccuRevCommands.objects.get(name="Populate") popCmd = [self.exe, _pop.cmd_name, _pop.cmd_opts, ] if recursive: popCmd.append('-R') if override: popCmd.append('-O') # Set the stream name popCmd.append('-v') popCmd.append(str(stream)) # Set the location on the local machine where the files will be placed # Do not error out if the path doesn't exist, only if it cannot be # created by the system if not os.path.isdir(localRepo): try: os.makedirs(localRepo) except: raise Exception("The local repository %s could not be created on the system." % localRepo) # Create the dump site popCmd.append('-L') popCmd.append(str(localRepo)) # Set the list of elements that are to be populated for elements in elementList: popCmd.append(elements.replace("\\",os.sep)) # Ready to fetch files, if the logout idle time has passed, re-login if (self.idleTime + self.lastAccessTime < datetime.datetime.now()): self._login('pop') # Fetch the files using the populate command logging.info("Command: %s" % " ".join(popCmd)) popProc = subprocess.Popen(popCmd, env=self.env, stdout=self.stdout, stderr=self.stderr, universal_newlines=True) # Wait for the populate command to finish, error out if needed out, err = popProc.communicate() if err != '': raise Exception('An error has occurred while attempting to populate from the VCS. Error: %s' % err) # Populate command was successful, retrieve the XML response popLog = xml2obj.xml2obj(out) if hasattr(popLog.message, 'error'): raise Exception('An error has occurred while attempted to populate from the VCS. Error: %s' % popLog.message.data) # If no elements have been popped by AccuRev, return an # empty list if popLog.element is None: return [] # Else fetch the file locations (including localRepo) and ensure that # the file exists in the local repository else: # popLog.element.location gives the files that were populated # popLog.message gives the depot-relative path name if len(popLog.element) > 1: for i,entry in enumerate(popLog.element): localPath = localRepo+entry.location.replace('/',os.sep).replace('\\',os.sep) depotPath = popLog.message[i].split(' ')[2] returnList.append({'local_path':localPath, 'depot_path':depotPath}) else: returnList.append({'local_path':localRepo+popLog.element.location.replace('/',os.sep).replace('\\',os.sep), 'depot_path':popLog.message.split(' ')[2]}) for item in returnList: if not os.path.isfile(item['local_path']): raise Exception(item) return returnList
def files(self, stream, path): """ Calls the AccuRev files command to grab a list of all of the elements in a particular directory stream: name of the stream used with '-s' option, ex "DVT_System_7.0" element: depot-relative path to a directory in accurev to list @rtype: C{list} @return returnList: List of dictionaries with data sent by AccuRev """ returnList = [] _files = AccuRevCommands.objects.get(name="Files") filesCmd = [self.exe, _files.cmd_name, _files.cmd_opts, ] # Append the stream filesCmd.append('-s') filesCmd.append(stream) # Append the depot-relative full pathname of the element to populate for el in path: filesCmd.append(el) logging.info("Command: %s" % " ".join(filesCmd)) # Ready to fetch files, if the logout idle time has passed, re-login if (self.idleTime + self.lastAccessTime < datetime.datetime.now()): self._login('files') # Grab a listing of the files using the files command filesProc = subprocess.Popen(filesCmd, env=self.env, stdout=self.stdout, stderr=self.stderr, universal_newlines=True) # Wait for the populate command to finish, error out if needed, # otherwise reset the last use time out, err = filesProc.communicate() if err != '': raise Exception('An error has occured while attempting to retrieve file information from the VCS. Error: %s' % err) # Populate command was successful, retrieve the XML response filesLog = xml2obj.xml2obj(out) # If there are no elements that have been popped by AccuRev, return an # empty list if filesLog.element is None: return [] # Otherwise, populate the dictionary with all the goodies else: for element in filesLog.element: # Elink Handling if element.linkTargetLocation == None: ltl = None else: ltl = element.linkTargetLocation.replace('\\', r'/') # Fetch the real target filesCmd[-1] = element.linkTargetLocation # Grab a listing of the files using the files command filesProc = subprocess.Popen(filesCmd, env=self.env, stdout=self.stdout, stderr=self.stderr, universal_newlines=True) # Wait for the populate command to finish, error out if needed, # otherwise reset the last use time out, err = filesProc.communicate() if err != '': raise Exception('An error has occured while attempting to retrieve file information from the VCS. Error: %s' % err) # Populate command was successful, retrieve the XML response filesLogLinkTarget = xml2obj.xml2obj(out) element = filesLogLinkTarget.element returnList.append({\ 'depot_path' : element.location, 'file' : element.location.split(os.sep)[-1], 'real' : element.Real.replace('\\',r'/'), 'virtual' : element.Virtual.replace('\\',r'/'), 'elemType': element.elemType, 'linkTargetLocation': ltl, 'dir' : element.dir, 'eid' : element.id, 'modTime' : datetime.datetime.fromtimestamp(int(element.modTime)), 'stream_version' : element.namedVersion.replace('\\',r'/'), 'status' : element.status.strip('()') }) return returnList
def print_usage_and_exit(): print "usage: {0} <UNPACKED FONT>".format(sys.argv[0]) sys.exit(1) if len(sys.argv) != 2: print_usage_and_exit() fontfile = sys.argv[1] if not os.path.exists(fontfile): print_usage_and_exit() glyphs = [] with file(fontfile + ".fontdef.xml", 'r') as f: xmlobj = xml2obj(f.read()) font_y_advance = int(xmlobj.height) for g in xmlobj.glyph: glyphs.append((unicode(g.symbol), int(g.offset_x), int(g.offset_y), int(g.width), int(g.height), float(g.tx), float(g.ty), float(g.tx2), float(g.ty2), int(g.x_advance))) with file(fontfile[:-4] + ".font", 'w') as f: f.write("MFNT") f.write(struct.pack("<I", len(glyphs))) f.write(struct.pack("<I", font_y_advance)) for g in glyphs: f.write( struct.pack("<iiIIffffI", g[1], g[2], g[3], g[4], g[5], g[6], g[7],
def getDocFromUrl(self, url, headers=headers_global): resp = self.session.get(url, headers=headers) doc = resp.content obj = xml2obj.xml2obj(doc) parsedObject = list(obj) return parsedObject[0]
def get_bucket_access(self, namespace, bucket): falist = self.fileaccess_ops["getFileList"](namespace, bucket, self.key, self.secret) xml = x2o.xml2obj(falist.text.strip().encode('ascii', 'ignore')) return xml['fileaccess_response']
def parse(self, protocolStr): """ Method that does the parsing of a Hercules-generated protocol @type protocolStr: C{str} @param protocolStr: A string containing the contents of a Hercules- generated test protocol file. @rtype : dict @return protocolDict: dictionary containing the protocol meta, test and case information @raise : None """ # Returnable protocol dictionary, all info will be returned with this protocolDict = {} # Parse the file and build the python XML mapper object try: protocol = xml2obj.xml2obj(protocolStr) except: raise ProtocolParseError('An unknown parsing error has occurred.'+\ '\nEnsure that the XML contents are '+\ 'supplied and not the file path.') # Check the XML schema (Hercules version) to ensure that it is supported protocolDict['herculesVersion'] = self._confirmHerculesVersion(protocol.AppInfo) # Header Information header = HeaderSection(protocolDict['herculesVersion'], protocol.Header) protocolDict['name'] = header.getName() protocolDict['authorNum'] = header.getAuthorNumber() protocolDict['modTime'] = header.getModTime() protocolDict['version'] = header.getVersion() protocolDict['archive'] = header.getAccuRevInfo() # Body Section protocolDict['variability'] = protocol.Body.Variability # Test and Case Information, tests will be a list of dictionaries # Access the 3rd test, for example, via protocolDict['tests'][2] testList = [] protocolDict['tests'] = testList # TestTable Section testTable = protocol.Body.TestTable if not testTable: raise ProtocolParseError("No tests exist for this protocol.") # Loop through the protocol tests placing the results in a dictionary in # a list for each test for i, t in enumerate(testTable.Test): testDict = {} testDict['number'] = t.Index testDict['name'] = self.fixName(t.Name).lstrip('_') # Test case name per Hercules conventions testDict['status'] = t.Status testDict['traceability'] = self._formatReqs(t.Traceability) testDict['variability'] = t.Variability # Create the list of cases for this test, cases will be a list of # dictionaries just like a test. To get a case attribute, for example # the case name, do the following: # >>> protocolDict['tests'][0]['cases'][1]['name'] # CaseTable Section caseTable = t.CaseTable caseList = [] testDict['cases'] = caseList # Loop through the cases in this test and place the info in a dictionary for c in caseTable.Case: caseDict = {} caseDict['number'] = c.Index try: caseDict['name'] = self.fixName(c.Name).lstrip('_') except: caseDict['name'] = self.fixName(c.Name.data).lstrip('_') # Parse case name per Hercules conventions caseDict['status'] = c.Status caseDict['traceability'] = self._formatReqs(c.Traceability) caseDict['variability'] = c.Variability caseList.append(caseDict) # Populate the test with the test and case informations for this test testList.append(testDict) # Return the protocol dictionary return protocolDict