def _get_article_categories(tree): """ Given an ElementTree, return (some) article categories. """ categories = [] article_categories = ElementTree(tree).find('.//*article-categories') for subject_group in article_categories.iter('subj-group'): try: if subject_group.attrib['subj-group-type'] == 'heading': continue except KeyError: # no attribute “subj-group-type” pass for subject in subject_group.iter('subject'): if subject.text is None: continue if '/' in subject.text: category_text = subject.text.split('/')[-1] else: category_text = subject.text if ' ' in category_text and not 'and' in category_text and \ category_text not in categories: categories.append(category_text) keywords = [] article_keywords = ElementTree(tree).find('.//*kwd-group') if article_keywords != None: for keyword in article_keywords.iter('kwd'): if keyword.text is None: continue keywords.append(keyword.text) return categories+keywords
def _get_article_categories(tree): """ Given an ElementTree, return (some) article categories. """ categories = [] article_categories = ElementTree(tree).find('.//*article-categories') for subject_group in article_categories.iter('subj-group'): try: if subject_group.attrib['subj-group-type'] == 'heading': continue except KeyError: # no attribute “subj-group-type” pass for subject in subject_group.iter('subject'): if subject.text is None: continue if '/' in subject.text: category_text = subject.text.split('/')[-1] else: category_text = subject.text if ' ' in category_text and not 'and' in category_text and \ category_text not in categories: categories.append(category_text) keywords = [] article_keywords = ElementTree(tree).find('.//*kwd-group') if article_keywords != None: for keyword in article_keywords.iter('kwd'): if keyword.text is None: continue keywords.append(keyword.text) return categories + keywords
def detail(nomor): tree = ElementTree() tree.parse("apps/data/KidungJemaat.xml") all_doc_no = [] all_headline = [] all_text = [] for node in tree.iter("DOCNO"): all_doc_no.append(node.text) for node in tree.iter("HEADLINE"): # all_headline.append(node.text.replace("\n"," ")) all_headline.append(node.text) head = all_headline for node in tree.iter("TEXT"): # all_text.append(node.text.replace("\n"," ")) all_text.append(node.text) N_DOC = len(all_text) text = [] judul = [] hasil = [] id = str(nomor) for i in range(N_DOC): check = all_doc_no[i] if check == id: text = all_text[i] judul = all_headline[i] return text, judul
def find_PubMed_articles_with_supplementary_materials(filename): """ This function finds articles having supplementary materials. """ with tarfile.open(filename) as archive: for item in archive: if os.path.splitext(item.name)[1] == '.nxml': content = archive.extractfile(item) tree = ElementTree() tree.parse(content) for xref in tree.iter('xref'): try: if xref.attrib['ref-type'] == 'supplementary-material': rid = xref.attrib['rid'] for sup in tree.iter('supplementary-material'): if sup.attrib['id'] == rid: media = ElementTree(sup).find('media') if media.attrib[ 'mimetype']: # in ('audio', 'video'): href = media.attrib[ '{http://www.w3.org/1999/xlink}href'] for aid in tree.iter('article-id'): if aid.attrib[ 'pub-id-type'] == 'pmc': PMCID = aid.text sys.stderr.write( PubMed_absolute_URL(PMCID, href) + '\n') sys.stderr.flush() except KeyError: pass except AttributeError: pass
def find_PubMed_articles_with_supplementary_materials(filename): """ This function finds articles having supplementary materials. """ with tarfile.open(filename) as archive: for item in archive: if os.path.splitext(item.name)[1] == '.nxml': content = archive.extractfile(item) tree = ElementTree() tree.parse(content) for xref in tree.iter('xref'): try: if xref.attrib['ref-type'] == 'supplementary-material': rid = xref.attrib['rid'] for sup in tree.iter('supplementary-material'): if sup.attrib['id'] == rid: media = ElementTree(sup).find('media') if media.attrib['mimetype']: # in ('audio', 'video'): href = media.attrib['{http://www.w3.org/1999/xlink}href'] for aid in tree.iter('article-id'): if aid.attrib['pub-id-type'] == 'pmc': PMCID = aid.text sys.stderr.write( PubMed_absolute_URL(PMCID, href) + '\n' ) sys.stderr.flush() except KeyError: pass except AttributeError: pass
def get_mirrors(self, mirror_template=None): """ Provide a set of mirrors where you can get the distribution from """ # the main server is stored in the template self.main_server = self.source_template.base_uri # other used servers for medium in self.used_media: if not medium.startswith("cdrom:"): # seems to be a network source self.used_servers.append(medium) if len(self.main_sources) == 0: self.default_server = self.main_server else: self.default_server = self.main_sources[0].uri # get a list of country codes and real names self.countries = {} fname = "/usr/share/xml/iso-codes/iso_3166.xml" if os.path.exists(fname): et = ElementTree(file=fname) # python2.6 compat, the next two lines can get removed # once we do not use py2.6 anymore if getattr(et, "iter", None) is None: et.iter = et.getiterator it = et.iter('iso_3166_entry') for elm in it: try: descr = elm.attrib["common_name"] except KeyError: descr = elm.attrib["name"] try: code = elm.attrib["alpha_2_code"] except KeyError: code = elm.attrib["alpha_3_code"] self.countries[code.lower()] = gettext.dgettext('iso_3166', descr) # try to guess the nearest mirror from the locale self.country = None self.country_code = None locale = os.getenv("LANG", default="en_UK") a = locale.find("_") z = locale.find(".") if z == -1: z = len(locale) country_code = locale[a + 1:z].lower() if mirror_template: self.nearest_server = mirror_template % country_code if country_code in self.countries: self.country = self.countries[country_code] self.country_code = country_code
def GetDetails(ids,db): responses=[] start = time() for ID in ids: #print ID request_time = time()-start if request_time<1: sleep(1-request_time) #print "sleeping "+str(1-request_time) tree = ElementTree() tree.parse(StringIO(amazon.ItemLookup(ItemId=ID,ResponseGroup="OfferFull,ItemAttributes",Condition="New"))) namespace = tree.getroot().tag[:tree.getroot().tag.find("}")+1] start=time() if len([t for t in tree.iter(namespace+"Errors")])>0: print "item missing or other error" pass if [t for t in tree.iter(namespace+'TotalOffers')][0].text=='0': print "no offers for item "+ID pass else: # offers_url = [t for t in tree.iter(namespace+'MoreOffersUrl')][0].text url = [t for t in tree.iter(namespace+'DetailPageURL')][0].text title = [t for t in tree.iter(namespace+'Title')][0].text if len([t for t in tree.iter(namespace+'Merchant')]) > 0: merchant = list([t for t in tree.iter(namespace+'Merchant')][0])[0].text else: merchant = '' try: price, shipping_price = soupThePrices(offers_url) except Exception as e: exc_type, exc_value, exc_traceback = sys.exc_info() y=traceback.format_exception(exc_type, exc_value, exc_traceback) price = float(list([t for t in tree.iter(namespace+"Price")][0])[0].text)/100.0 if merchant == "Amazon.com": shipping_price = 0 else: shipping_price = '???' if merchant == "Amazon.com": prime = True else: prime = False data = {'Title':title,'Price':price,'URL':url,'ID':ID,'Shipping':shipping_price,'Prime': prime, 'SS': getSS(url)} responses.append(data) responses.sort(key=lambda student: student['Price']) return responses
def append_ir_info(file: str, meta_info: dict = dict(), mean_data: [list, None] = None, input_names: list = None): path_to_xml = file + ".xml" path_to_bin = file + ".bin" et = ElementTree() et.parse(path_to_xml) net = et.getroot() if mean_data: mean_offset, mean_size = serialize_mean_image(path_to_bin, mean_data=mean_data) create_pre_process_block_for_image(net, input_names, mean_offset, mean_size) add_meta_data(net, meta_info) for elem in et.iter(): if elem.text: elem.text = elem.text.strip() if elem.tail: elem.tail = elem.tail.strip() pretty_xml_as_string = parseString(tostring(net)).toprettyxml() with open(path_to_xml, 'wb') as file: file.write(bytes(pretty_xml_as_string, "UTF-8"))
def test_image(): anno_dir = "output/annotations/" for anno_file in os.listdir(anno_dir): anno_tree = ElementTree() anno_tree.parse(os.path.join(anno_dir, anno_file)) img = cv.imdecode( np.fromfile(os.path.join("output", anno_tree.findtext("path")), dtype=np.uint8), -1) box = list() for obj in anno_tree.iter(tag="object"): box_node = obj.find("bndbox") box.append([ int(box_node.findtext("xmin")), int(box_node.findtext("ymin")), int(box_node.findtext("xmax")), int(box_node.findtext("ymax")), obj.findtext("name") ]) for box in box: cv.rectangle(img, (box[0], box[1]), (box[2], box[3]), (0, 255, 0), 4) cv.putText(img, box[4], (box[0], box[1] - 5), cv.FONT_HERSHEY_SIMPLEX, 2, (0, 255, 0), 2) _s = min(1080.0 / img.shape[1], 720.0 / img.shape[0]) resized = cv.resize(img, None, fx=_s, fy=_s, interpolation=cv.INTER_AREA) cv.imshow("resized", resized) cv.waitKey() cv.destroyAllWindows()
class XmlReader(object): """Read a certain XML File and return a metadict""" def __init__(self, xmlpath): """ Convert any valid data.xml to a metadict :xmlpath: path to data.xml """ self.__xmlpath = xmlpath self.__tree = ElementTree() self.__tree.parse(xmlpath) def parse(self): """ Do actual parsing :returns: a new metadict """ metadict, tagdict = dict(), dict() for node in self.__tree.iter(): tag = actual_name(node.tag) if tag == 'meta': metadict = makecleandict(node.attrib) if tag == 'commitTag': tagdict = makecleandict(node.attrib) return dict(metadict, **tagdict)
def __init__(self): # get a list of country codes and real names self.countries = {} fname = "/usr/share/xml/iso-codes/iso_3166.xml" if os.path.exists(fname): et = ElementTree(file=fname) for elm in et.iter('iso_3166_entry'): if "common_name" in elm.attrib: descr = elm.attrib["common_name"] else: descr = elm.attrib["name"] if "alpha_2_code" in elm.attrib: code = elm.attrib["alpha_2_code"] else: code = elm.attrib["alpha_3_code"] self.countries[code] = gettext.dgettext('iso_3166',descr) self.country = None self.code = None locale = os.getenv("LANG", default="en.UK") a = locale.find("_") z = locale.find(".") if z == -1: z = len(locale) self.code = locale[a+1:z] self.country = self.get_country_name(self.code)
def _thumb(args): if args is None: args = ask_inputs(_arguments) with open(args.cast, mode="r", encoding="utf-8") as casts_file: casts = json.load(casts_file) undefined = set() day_diff = args.day if day_diff < 0: day_diff = None for file in find_files(".", "*.xml", day_diff): with open(file, mode="r", encoding="utf-8") as xml: first_line = xml.readline() if "tvshow" not in first_line and "movie" not in first_line: continue tree = ElementTree(file=file) root = tree.getroot() # type: Element if root.tag not in ["tvshow", "movie"]: continue for element in tree.iter(): # type: Element if element.text is not None and element.text.isspace(): element.text = None element.tail = None for actor in tree.findall("actor"): # type: Element name_element = actor.find("name") # type: Element if name_element.text in casts: thumb_element = actor.find("thumb") # type: Element if thumb_element is None: thumb_element = SubElement(actor, "thumb") thumb_element.text = casts[name_element.text] else: undefined.add(name_element.text) tree.write(file, encoding="utf-8", short_empty_elements=False) for actor in undefined: print(actor)
def parse_entry_element(self, entry: ET.ElementTree) -> dict: """Converts the XML entry element into a python dictionary. Arguments: ---- entry {ET.ElementTree} -- An entry element, that contains filing information. Returns: ---- dict -- A dictionary version of the entry element. """ entry_element_dict = {} replace_tag = self.entries_namespace['atom_with_quote'] for entry in entry.findall("./", namespaces=self.entries_namespace): for element in entry.iter(): name = element.tag.replace(replace_tag, '') if element.text: name = name.replace('-', '_') entry_element_dict[name] = element.text.strip() if element.attrib: for key, value in element.attrib.items(): key = key.replace('-', '_') entry_element_dict[name + "_{}".format(key)] = value return entry_element_dict
def _get_article_contrib_authors(tree): """ Given an ElementTree, returns article authors in a format suitable for citation. """ authors = [] front = ElementTree(tree).find('front') for contrib in front.iter('contrib'): if contrib.attrib['contrib-type'] != 'author': continue contribTree = ElementTree(contrib) try: surname = contribTree.find('name/surname').text except AttributeError: # author is not a natural person try: citation_name = contribTree.find('collab').text if citation_name is not None: authors.append(citation_name) continue except AttributeError: # name has no immediate text node continue try: given_names = contribTree.find('name/given-names').text citation_name = ' '.join([surname, given_names[0]]) except AttributeError: # no given names citation_name = surname except TypeError: # also no given names citation_name = surname if citation_name is not None: authors.append(citation_name) return ', '.join(authors)
def removeXmlElement(name, directory, file_pattern, logger=None): for path, dirs, files in os.walk(os.path.abspath(directory)): for filename in fnmatch.filter(files, file_pattern): filepath = os.path.join(path, filename) tree = ElementTree() tree.parse(filepath) root = tree.getroot() remove = root.findall( './/{{http://soap.sforce.com/2006/04/metadata}}{}'.format(name) ) if not remove: continue if logger: logger.info( 'Modifying {} to remove <{}> elements'.format( filepath, name, ) ) parent_map = {c: p for p in tree.iter() for c in p} for elem in remove: parent = parent_map[elem] parent.remove(elem) tree.write( filepath, encoding="UTF-8", default_namespace='http://soap.sforce.com/2006/04/metadata', )
class pybWidgets(): # base class for all pyBuilder Widgets # static variables switch = { # switch to contain the widget classes "lineedit":pybLineEdit, "checkbox":pybCheckBox, "list":pybList } # instance methods def generateLayout(self,descriptor,verticalLayout): self.defaults = {} # container of all 'default' descriptors self.wList = [] # contains all of the widgets self._layout = verticalLayout self._descriptor = descriptor self._layoutTree = ET(file=self._descriptor) for element in self._layoutTree.iter(): if element.tag in pybWidgets.switch.keys(): widget = pybWidgets.switch[element.tag](element) verticalLayout.addLayout(widget.getLayout()) self.wList.append(widget) elif element.tag == "default": # default value handler for i in element: self.defaults[i.tag]=i.text def __init__(self): pass
def _get_pmid(tree): """ Given an ElementTree, returns PubMed Central ID. """ front = ElementTree(tree).find('front') for article_id in front.iter('article-id'): if article_id.attrib['pub-id-type'] == 'pmid': return article_id.text
def scale_size(tree: ElementTree, body_name: str, scale: float) -> str: tree = copy.deepcopy(tree) target_body: Element = tree.find(f".//body[@name='{body_name}']") parent_map: Dict[Element, Element] = {c: p for p in tree.iter() for c in p} if body_name == "torso": update_torso(tree, torso_body=target_body, size_scaling_factor=scale) raise NotImplementedError(f"WIP")
def parse_tree(self, tree: ElementTree.ElementTree) -> List[Recipe]: recipes = [] for recipe_node in tree.iter(): if to_lower(recipe_node.tag) != "recipe": continue recipe = self.parse_recipe(recipe_node) recipes.append(recipe) return recipes
def get_file_actual_date(tree: ET.ElementTree) -> Optional[datetime.date]: """Получаем дату актуальности файла""" for element in tree.iter(): if element.tag == "ДатаФайл": actual_date = _validate_period_string(element.text, '%d.%m.%Y') logger.info("Актуальная дата файла {0}".format(actual_date)) return actual_date return None
def __init__(self, filename, mesh=None, deepcopy=False): """ Initialize a the reader with a pvd or a vtu filename """ if not os.path.isfile(filename): raise IOError("File '%s' does not excist"%filename) filetype = filename.split(".")[-1] self._name = ".".join(filename.split(".")[0:-1]) if filetype not in ["pvd", "vtu"]: raise TypeError("Expected a 'pvd' or a 'vtu' file") # Get dirname dirname = os.path.dirname(filename) # Check mesh argument if mesh is not None and not isinstance(mesh, Mesh): raise TypeError, "Expected a 'Mesh' for the mesh arguments" # Store deepcopy argument self._deepcopy = deepcopy # Store mesh self._mesh = mesh # Initialize the filename cache self._filenames = [] if filetype == "vtu": self._filenames.append(filename) self._times = np.array([]) else: # Parse pvd file tree = ElementTree(file=filename) times = [] for item in tree.iter(): if item.tag == "DataSet": self._filenames.append(os.path.join(\ dirname,item.attrib["file"])) times.append(float(item.attrib["timestep"])) times = np.array(times, dtype='d') # If there are no time data stored in the file use an empty array if np.all(np.diff(times)==1): times = np.array([]) # Store time data self._times = times # Construct file reader self.reader = vtk.vtkXMLUnstructuredGridReader() # Read in data from file self._update_vtk_data() # Init dolfin structures (Function, FunctionSpace) self._init_dolfin_data()
def __init__(self, filename, mesh=None, deepcopy=False): """ Initialize a the reader with a pvd or a vtu filename """ if not os.path.isfile(filename): raise IOError("File '%s' does not excist" % filename) filetype = filename.split(".")[-1] self._name = ".".join(filename.split(".")[0:-1]) if filetype not in ["pvd", "vtu"]: raise TypeError("Expected a 'pvd' or a 'vtu' file") # Get dirname dirname = os.path.dirname(filename) # Check mesh argument if mesh is not None and not isinstance(mesh, Mesh): raise TypeError, "Expected a 'Mesh' for the mesh arguments" # Store deepcopy argument self._deepcopy = deepcopy # Store mesh self._mesh = mesh # Initialize the filename cache self._filenames = [] if filetype == "vtu": self._filenames.append(filename) self._times = np.array([]) else: # Parse pvd file tree = ElementTree(file=filename) times = [] for item in tree.iter(): if item.tag == "DataSet": self._filenames.append(os.path.join(\ dirname,item.attrib["file"])) times.append(float(item.attrib["timestep"])) times = np.array(times, dtype='d') # If there are no time data stored in the file use an empty array if np.all(np.diff(times) == 1): times = np.array([]) # Store time data self._times = times # Construct file reader self.reader = vtk.vtkXMLUnstructuredGridReader() # Read in data from file self._update_vtk_data() # Init dolfin structures (Function, FunctionSpace) self._init_dolfin_data()
def get_payers_from_xml(tree: ET.ElementTree, list_with_lines_of_payers: List[int]) -> Dict: """Поиск и добавление в словарь Плательщиков с номерами строк""" payers_dict: Dict[int, Element] = dict() count = 0 for elem in tree.iter(): if elem.tag == 'Плательщик': payers_dict[list_with_lines_of_payers[count] + 1] = elem count = count + 1 return payers_dict
def _get_article_doi(tree): """ Given an ElementTree, returns DOI. """ front = ElementTree(tree).find('front') for article_id in front.iter('article-id'): try: if article_id.attrib['pub-id-type'] == 'doi': return article_id.text except KeyError: pass
def extract_text(hocr_filename): bbox_regex = re.compile(r"bbox((\s+\d+){4})") textangle_regex = re.compile(r"textangle(\s+\d+)") hocr = ElementTree() hocr.parse(hocr_filename) texts = [] for line in hocr.iter(): if line.attrib.get("class") != "ocr_line": continue try: textangle = textangle_regex.search(line.attrib["title"]).group(1) except Exception: logging.info("Can't extract textangle from ocr_line: %s" % line.attrib.get("title")) logging.debug("Exception occurred:\n%s" % traceback.format_exc()) textangle = 0 textangle = int(textangle) for word in line.iter(): if word.attrib.get("class") != "ocrx_word": continue text = "" # Sometimes word has children like "<strong>text</strong>" for e in word.iter(): if e.text: text += e.text text = text.strip() if not text: logging.info("ocrx_word with empty text found") continue try: box = bbox_regex.search(word.attrib["title"]).group(1).split() except Exception: logging.info("Can't extract bbox from ocrx_word: %s" % word.attrib.get("title")) logging.debug("Exception occurred:\n%s" % traceback.format_exc()) continue box = [int(i) for i in box] textdirection = word.get("dir", "ltr") if textdirection not in ("ltr", "rtl", "ttb"): logging.info("ocrx_word with unknown textdirection found: %s" % textdirection) textdirection = "ltr" texts.append({ "x": box[0], "y": box[1], "width": box[2] - box[0], "height": box[3] - box[1], "rotation": textangle, "text": text, "direction": textdirection }) return texts
def _get_journal_title(tree): """ Given an ElementTree, returns journal title. """ front = ElementTree(tree).find('front') for journal_meta in front.iter('journal-meta'): for journal_title in journal_meta.iter('journal-title'): title = journal_title.text # take only the part before the colon, strip whitespace title = title.split(':')[0].strip() title = title.replace('PLoS', 'PLOS').replace('PloS', 'PLOS') return title
def readExecCountFromMapping(mapfile): tree = ElementTree() tree.parse(mapfile) map = {} for ins in tree.iter("ins"): orig_function = int(ins.attrib['orig_function_first'], base=16) execcnt = int(ins.attrib['guessed_train_exec_count']) if orig_function != 0: utils.add_or_initialize_to(map, orig_function, execcnt) return map
def extract_text(hocr_filename): bbox_regex = re.compile("bbox((\s+\d+){4})") textangle_regex = re.compile("textangle(\s+\d+)") hocr = ElementTree() hocr.parse(hocr_filename) texts = [] for line in hocr.iter(): if line.attrib.get("class") != "ocr_line": continue try: textangle = textangle_regex.search(line.attrib["title"]).group(1) except Exception as e: logging.info("Can't extract textangle from ocr_line: %s" % line.attrib.get("title")) logging.debug("Exception occurred:\n%s" % traceback.format_exc()) textangle = 0 textangle = int(textangle) for word in line.iter(): if word.attrib.get("class") != "ocrx_word": continue text = "" # Sometimes word has children like "<strong>text</strong>" for e in word.iter(): if e.text: text += e.text text = text.strip() if not text: logging.info("ocrx_word with empty text found") continue try: box = bbox_regex.search(word.attrib["title"]).group(1).split() except Exception as e: logging.info("Can't extract bbox from ocrx_word: %s" % word.attrib.get("title")) logging.debug( "Exception occurred:\n%s" % traceback.format_exc()) continue box = [int(i) for i in box] textdirection = word.get("dir", "ltr") if textdirection not in ("ltr", "rtl", "ttb"): logging.info("ocrx_word with unknown textdirection found: %s" % textdirection) textdirection = "ltr" texts.append({ "x": box[0], "y": box[1], "width": box[2] - box[0], "height": box[3] - box[1], "rotation": textangle, "text": text, "direction": textdirection }) return texts
def __walk_xml_regs( xmltree: ElementTree.ElementTree ) -> Iterator[Tuple[int, str, int]]: regnum = -1 for reg in xmltree.iter('reg'): # if regnum is not specified, assume it follows the previous one regnum = int(reg.get('regnum', regnum + 1)) name = reg.attrib['name'] bitsize = reg.attrib['bitsize'] yield regnum, name, int(bitsize)
def transformed_to_orig_map(mapfile): ### TODO mwhuu, wrong way around? result = {} tree = ElementTree() tree.parse(mapfile) for ins in tree.iter("ins"): origaddr = int(ins.attrib['address'], base=16) if origaddr != 0: #result[int(ins.text, base=16)] = origaddr # TODO, multiple source addresses result[origaddr] = int(ins.text, base=16)# TODO, multiple source addresses return result
def _read_modules(self, xml: ElementTree) -> None: """Reads all modules from the DeCoP system model. Args: xml (ElementTree): The DeCoP system model. """ for mod in xml.iter(tag='module'): name = mod.get('name') node = Module(name) self._read_params(mod, node, UserLevel.NORMAL) self._read_cmds(mod, node) self.modules[name] = node
def dependencies(self): # '解析DAG拓扑结构' tree = ElementTree(file=self.xml_file) deps = [] # namespace = self.xml_file.strip('.xml') for child in tree.iter(tag=self.child_tag): child_id = child.get('ref') parents = [] for parent in child.findall(self.parent_tag): parents.append(parent.get('ref')) dependency = {'child': child_id, 'parents': parents} deps.append(dependency) return deps
def orig_addr_to_diablo_fun(mapfile): xml = ElementTree() xml.parse(mapfile) insns = list(xml.iter("ins")) m = {} for ins in insns: orig_address = int(ins.text, base=16) if orig_address != 0: m[orig_address] = int(ins.attrib['orig_function_first'], base=16) return m
def import_xml(xmlfile): global XML_TO_EL_TRANSLATION et = ElementTree() et.parse(xmlfile) calibration_dict = {38e3: {}, 120e3: {}} temp_dict = {} freq = None for tag in et.iter(): name = tag.tag value = tag.text.strip() if name == 'frequency': value = int(value) freq_dict = calibration_dict[value] freq_dict['frequency'] = value freq = value freq_dict.update(temp_dict) temp_dict = {} elif name in list(XML_TO_EL_TRANSLATION.keys()): translated_name, conv_func = XML_TO_EL_TRANSLATION[name] value = conv_func(value) if freq is None: temp_dict[translated_name] = value else: if translated_name in list(freq_dict.keys()): freq = None temp_dict = {translated_name: value} else: freq_dict[translated_name] = value for freq in [38e3, 120e3]: indx = calibration_dict[freq]['pulse_length_table'].index(calibration_dict[freq]['pulse_length']) calibration_dict[freq]['sa_correction'] = float(calibration_dict[freq]['sa_correction_table'][indx]) del calibration_dict[freq]['sa_correction_table'] if 'offset' not in list(calibration_dict[freq].keys()): calibration_dict[freq]['offset'] = 0 return calibration_dict
def _read_xtypedefs(self, xml: ElementTree) -> None: """Reads all typedefs from the DeCoP system model. Args: xml (ElementTree): The DeCoP system model. """ for xtypedef in xml.iter(tag='xtypedef'): name = xtypedef.get('name') is_atomic = str(xtypedef.get('is_atomic')).lower() == 'true' node = Typedef(name, is_atomic) self._read_params(xtypedef, node, None) self._read_cmds(xtypedef, node) self.typedefs[name] = node
def orig_to_transformed_map(mapfile): result = {} tree = ElementTree() tree.parse(mapfile) for ins in tree.iter("ins"): fileaddr = int(ins.attrib['address'], base=16) for orig_addr_text in ins.text.split(" "): if orig_addr_text != "": orig_addr = int(orig_addr_text, base=16) utils.add_to_map_of_list(result, orig_addr, fileaddr) return result
def read_session_file(path, simulation_state): """Reads the session file to get a list of simulations.""" global session_element sim_list = [] tree = ET().parse(path) session_element = tree for simulation in tree.iter('simulation'): if (simulation_state == int(simulation.attrib['in_batch'])): sim_list.append(list(simulation)[0].text) return sim_list
def graphml_generate(self, node_list, edge_list, out_name, debug_flag=False): # node & edge tuple list sample #[('n1', 'v_node_a'), ('n2', 'v_node_b')] #[('e0', 'NONE', 'n0', 'n1'), ('e1', 'e_home', 'n2', 'n1')] ns_list = {'gns': '{http://graphml.graphdrawing.org/xmlns}', 'yns': '{http://www.yworks.com/xml/graphml}'} elm_root = ET.Element('graphml', xmlns=ns_list['gns'].strip('{}')) elm_root.set('xmlns:y', ns_list['yns'].strip('{}')) elm_share_key = ET.SubElement(elm_root, 'key', id="d6") elm_share_key.set('yfiles.type', "nodegraphics") elm_share_key = ET.SubElement(elm_root, 'key', id="d10") elm_share_key.set('for', "edge") elm_share_key.set('yfiles.type', "edgegraphics") elm_graph = ET.SubElement(elm_root, 'graph', edgedefault="directed", id="G") for n in node_list: elm_node_tmp = ET.SubElement(elm_graph, 'node', id=n[0]) elm_node_data = ET.SubElement(elm_node_tmp, 'data', key="d6") elm_node_data_SN = ET.SubElement(elm_node_data, 'y:ShapeNode') elm_node_data_NL = ET.SubElement(elm_node_data_SN, 'y:NodeLabel') elm_node_data_NL.text = n[1] for n in edge_list: elm_edge_tmp = ET.SubElement(elm_graph, 'edge', id=n[0], source=n[2], target=n[3]) elm_edge_data = ET.SubElement(elm_edge_tmp, 'data', key="d10") elm_edge_data_PLE = ET.SubElement(elm_edge_data, 'y:PolyLineEdge') if n[1] is not "NONE": elm_edge_data_PLE_EL = ET.SubElement(elm_edge_data_PLE, 'y:EdgeLabel') elm_edge_data_PLE_EL.text = n[1] tree_root = ElementTree() tree_root._setroot(elm_root) tree_root.write(out_name, 'UTF-8', 'True') if debug_flag is True: print 'Node_List:' for n in node_list: print n print '\nEdge_List:' for n in edge_list: print n print '\nElementTree Dump:' ET.dump(elm_root) print '\nTree Dump:' for child in tree_root.iter(): print child.tag, child.attrib
class XMLFileReader(BaseReader): def __init__( self, filename, yieldkv=True ): BaseReader.__init__(self,filename, yieldkv) self.curelem = 0 self.tree = ElementTree() self.tree.parse( filename ) # ugh! self.numelems = len(list(self.tree.iter())) def read( self ): for elem in self.tree.iter(): self.curelem = self.curelem + 1 if self.yieldkv: yield self.filename, elem.text else: yield elem.text self.complete = True def progress( self ): return float( float(self.curelem) / self.numelems ) * 100 def close( self ): pass
def parseMedia(self, key): # open library url library = "http://" + config.server + ":32400" + key xml = urllib2.urlopen("http://" + config.server + ":32400" + key) # parse xml tree = ElementTree() tree.parse(xml) # store info from MediaContainer tag MediaContainer = tree.getroot() info = Info(MediaContainer) # store items from Directory/Video tags xmlItems = list(tree.iter()) items = [] for i in xmlItems: kind = i.get("type") if i.tag == "Directory": if key[:17] == "/library/sections": # Don't interpret sections as media items item = Directory(i, prefix=key) items.append(item) else: if kind == "season": item = Season(i) items.append(item) elif kind == "show": item = Show(i) items.append(item) elif kind == None: item = Directory(i, prefix=key) items.append(item) if i.tag == "Video": if kind == "episode": item = Episode(i) items.append(item) elif kind == "movie": item = Movie(i) items.append(item) if key == "/library/recentlyAdded": info.title = "Recently Added" return info, items
def load_simulation(sensor_xml_path): """Loads a simulation file and returns a dictionary mapping sensors to simulated variables. """ tree = ET().parse(sensor_xml_path) sensor_map = {} for sensor in tree.iter('sensor'): sensor_map[sensor.attrib['location']] = {} sensor_map[sensor.attrib['location']]['name'] = sensor.attrib['name'] sensor_map[sensor.attrib['location']]['colors'] = list(sensor)[0].text sensor_map[sensor.attrib['location']]['variables'] = [] if len(list(sensor)) > NUMBER_OF_BASE_PLUGINS: for simulator in list(sensor)[NUMBER_OF_BASE_PLUGINS]: for variable in simulator: sensor_map[sensor.attrib['location']]['variables'].append(simulator.attrib['name'] + variable.text) return sensor_map
def main(self, accountXML): tree = ElementTree() tree.parse(accountXML) values = {} i = 0 for nodes in tree.iter('account'): account = {} for node in list(nodes): if node.tag == "name": account["name"] = node.text if node.tag == "password": account["password"] = node.text if node.tag == "protocol": account["protocol"] = node.text if "name" in account: i += 1 values["user%d" % i] = account return {self.__class__.__name__:values}
def createOffsetFiles(file, directory_prefix='new_files'): """" Reads one xml file, searches all document-elements and writes them to pmid specific files. """ tree = ElementTree() tree.parse(file) documents = [document for document in list(tree.iter('document')) if document.getchildren()] # get all the documents with child elements. for document in documents: id = document.get('pmid') offset_list = [] for offset in document.getchildren(): offset_string = offset.get('id') + '\t' + offset.get('charOffset') + '\n' offset_list.append(offset_string) directory_path = os.path.join(os.getcwd(), directory_prefix, getPath(id)) file_path = os.path.join(directory_path, id+'.txt.soffsets') createDirectory(directory_path) output_file = open(file_path, 'w') output_file.writelines(offset_list) output_file.close()
def removeXmlElement(name,directory,file_pattern,logger=None): for path, dirs, files in os.walk(os.path.abspath(directory)): for filename in fnmatch.filter(files, file_pattern): filepath = os.path.join(path, filename) tree = ElementTree() tree.parse(filepath) root = tree.getroot() remove = root.findall('.//{{http://soap.sforce.com/2006/04/metadata}}{}'.format(name)) if not remove: continue if logger: logger.info('Modifying {} to remove <{}> elements'.format(filepath, name)) parent_map = {c:p for p in tree.iter() for c in p} for elem in remove: parent = parent_map[elem] parent.remove(elem) tree.write(filepath, encoding="UTF-8", default_namespace='http://soap.sforce.com/2006/04/metadata')
def get(self): page = self.pageWorker.get() if not isinstance(page, Page): _log.error('somethings wrong') return tree = ElementTree() tree.parse(StringIO(page.table)) substitutions = self.data.substitutions self.data.teachers.append({ page.date.isoformat(): page.teachers }) self.data.datetime = datetime.datetime.now() current = None l = 0 next = 0 trs = list(tree.iter('tr')) for a in range(len(trs)): tds = list(trs[a].iter('td')) if next > 0: next -= 1 for b in range(len(tds)): if b == 0 and next == 0: next = int(tds[b].attrib['rowspan']) l = a current = Substitution(tds[b].text.split(', ')) substitutions.append( current ) continue if b == 1 and l == a or b == 0 and l != a: current.hour = int(tds[b].text) current.date = page.date continue if b == 2 and l == a or b == 1 and l != a: current.teacher = tds[b].text if b == 3 and l == a or b == 2 and l != a: current.parseLesson(tds[b].text) # here can be a change -> if b == 4 and l == a or b == 3 and l != a: current.parseStatus(tds[b].text) if b == 5 and l == a or b == 4 and l != a: current.parseRoom(tds[b].text) if b == 6 and l == a or b == 5 and l != a: current.notice = tds[b].text return self.data
#! /usr/bin/env python import sys from xml.etree.ElementTree import ElementTree if len(sys.argv) < 2: print 'usage: get_grl_runs.pt [grl-file]' sys.exit() tree = ElementTree() tree.parse(sys.argv[1]) links = list(tree.iter("Metadata")) grl_runs = [] for l in links: if l.get('Name') == 'RunList': grl_runs = l.text.split(',') for run in grl_runs: print run
add_item(items, child, t) except KeyError: pass if __name__ == '__main__': parser = argparse.ArgumentParser(description='Convert a list of JIRA items in the XML format into an XMind document.') parser.add_argument('source', help='xml source file created from a Jira export') parser.add_argument('destination', help='XMind destination file') args = parser.parse_args() tree = ElementTree() tree.parse(args.source) items = {} root_items = [] # Create all the items for i in tree.iter('item'): key = i.find('key').text summary = i.find('summary').text status = i.find('status').text description = i.find('description').text url = i.find('link').text children = [k.text for k in i.findall("issuelinks/issuelinktype[@id='10000']/inwardlinks/issuelink/issuekey")] parents = [k.text for k in i.findall("issuelinks/issuelinktype[@id='10000']/outwardlinks/issuelink/issuekey")] items[key] = { 'summary': summary, 'status': status, 'url': url, 'description': description, 'children': children, 'parents': parents }
# exaqmples: http://www.doughellmann.com/PyMOTW/xml/etree/ElementTree/parse.html#parsing-an-entire-document from xml.etree.ElementTree import ElementTree def getDevice(devicesTree, deviceid): for device in devicesTree.findall('device'): if device.get('id') == deviceid: return device return None thisTree = ElementTree() thisTree.parse("tinajaconfig.xml") for node in thisTree.iter(): # print the whole tree # print node.tag, node.attrib pass print "" #Get the root node tinajaRoot = thisTree.getroot() deviceslist = tinajaRoot.find('./devices') thisRadio = 1 thisDevice = getDevice(deviceslist, str(thisRadio)) if thisDevice != None:
################################################# JSON tree = ElementTree().parse('twitter-output.xml') # parse out xml file into a tree jsonwrite=open("twitter-output.json", "w+", encoding="utf-8") #open json file jsonwrite.write('{') # beging printing json format elements jsonwrite.write('\n \"documents\":{') jsonwrite.write('\n \"sentences": [') print('{', file=f1) print('\n \"documents\":{', file=f1) print('\n \"sentences": [', file=f1) i = 0 last = 0 for e in tree.iter('sentence'): # find the last element in the file last +=1 for sent in tree.iter('sentence'): # main loop if i != last-1: #if not the last element jsonwrite.write('\n {') jsonwrite.write('\n \"text\": \"'+sent[0].text+'\",') jsonwrite.write('\n \"avg\": \"'+sent[1].text+'\",') jsonwrite.write('\n \"id\": ') jsonwrite.write(sent.get('id')) jsonwrite.write('\n },') print('\n {', file=f1) print('\n \"text\": \"'+sent[0].text+'\",', file=f1) print('\n \"avg\": \"'+sent[1].text+'\",', file=f1) print('\n \"id\": ', file=f1)
class CmsisPack(object): """! @brief Wraps a CMSIS Device Family Pack. This class provides a top-level interface for extracting device information from CMSIS-Packs. After an instance is constructed, a list of the devices described within the pack is available from the `devices` property. Each item in the list is a CmsisPackDevice object. The XML element hierarchy that defines devices is as follows. ``` family [-> subFamily] -> device [-> variant] ``` Internally, this class is responsible for collecting the device-related XML elements from each of the levels of the hierarchy described above. It determines which elements belong to each defined device and passes those to CmsisPackDevice. It is then CmsisPackDevice that performs the parsing of each element type into pyOCD-compatible data. """ def __init__(self, file_or_path): """! @brief Constructor. Opens the CMSIS-Pack and builds instances of CmsisPackDevice for all the devices and variants defined within the pack. @param self @param file_or_path The .pack file to open. May be a string that is the path to the pack, or may be a ZipFile, or a file-like object that is already opened. @exception MalformedCmsisPackError The pack is not a zip file, or the .pdsc file is missing from within the pack. """ if isinstance(file_or_path, zipfile.ZipFile): self._pack_file = file_or_path else: try: self._pack_file = zipfile.ZipFile(file_or_path, 'r') except BadZipFile as err: six.raise_from(MalformedCmsisPackError("Failed to open CMSIS-Pack '{}': {}".format( file_or_path, err)), err) # Find the .pdsc file. for name in self._pack_file.namelist(): if name.endswith('.pdsc'): self._pdscName = name break else: raise MalformedCmsisPackError("CMSIS-Pack '{}' is missing a .pdsc file".format(file_or_path)) # Convert PDSC into an ElementTree. with self._pack_file.open(self._pdscName) as pdscFile: self._pdsc = ElementTree(file=pdscFile) self._state_stack = [] self._devices = [] # Extract devices. for family in self._pdsc.iter('family'): self._parse_devices(family) @property def pdsc(self): """! @brief Accessor for the ElementTree instance for the pack's PDSC file.""" return self._pdsc @property def devices(self): """! @brief A list of CmsisPackDevice objects for every part number defined in the pack.""" return self._devices def _parse_devices(self, parent): # Extract device description elements we care about. newState = _DeviceInfo(element=parent) children = [] for elem in parent: if elem.tag == 'memory': newState.memories.append(elem) elif elem.tag == 'algorithm': newState.algos.append(elem) elif elem.tag == 'debug': newState.debugs.append(elem) # Save any elements that we will recurse into. elif elem.tag in ('subFamily', 'device', 'variant'): children.append(elem) # Push the new device description state onto the stack. self._state_stack.append(newState) # Create a device object if this element defines one. if parent.tag in ('device', 'variant'): # Build device info from elements applying to this device. deviceInfo = _DeviceInfo(element=parent, families=self._extract_families(), memories=self._extract_memories(), algos=self._extract_algos(), debugs=self._extract_debugs() ) dev = CmsisPackDevice(self, deviceInfo) self._devices.append(dev) # Recursively process subelements. for elem in children: self._parse_devices(elem) self._state_stack.pop() def _extract_families(self): families = [] for state in self._state_stack: elem = state.element if elem.tag == 'family': families += [elem.attrib['Dvendor'], elem.attrib['Dfamily']] elif elem.tag == 'subFamily': families += [elem.attrib['DsubFamily']] return families def _extract_items(self, state_info_name, filter): map = {} for state in self._state_stack: for elem in getattr(state, state_info_name): try: filter(map, elem) except (KeyError, ValueError) as err: LOG.debug("error parsing CMSIS-Pack: " + str(err)) return list(map.values()) def _extract_memories(self): def filter(map, elem): if 'name' in elem.attrib: name = elem.attrib['name'] elif 'id' in elem.attrib: name = elem.attrib['id'] else: # Neither option for memory name was specified, so skip this region. LOG.debug("skipping unnamed memmory region") return map[name] = elem return self._extract_items('memories', filter) def _extract_algos(self): def filter(map, elem): # We only support Keil FLM style flash algorithms (for now). if ('style' in elem.attrib) and (elem.attrib['style'] != 'Keil'): LOG.debug("skipping non-Keil flash algorithm") return None, None # Both start and size are required. start = int(elem.attrib['start'], base=0) size = int(elem.attrib['size'], base=0) memrange = (start, size) # An algo with the same range as an existing algo will override the previous. map[memrange] = elem return self._extract_items('algos', filter) def _extract_debugs(self): def filter(map, elem): if 'Pname' in elem.attrib: name = elem.attrib['Pname'] unit = elem.attrib.get('Punit', 0) name += str(unit) if '*' in map: map.clear() map[name] = elem else: # No processor name was provided, so this debug element applies to # all processors. map.clear() map['*'] = elem return self._extract_items('debugs', filter) def get_file(self, filename): """! @brief Return file-like object for a file within the pack. @param self @param filename Relative path within the pack. May use forward or back slashes. @return A BytesIO object is returned that contains all of the data from the file in the pack. This is done to isolate the returned file from how the pack was opened (due to particularities of the ZipFile implementation). """ filename = filename.replace('\\', '/') return io.BytesIO(self._pack_file.read(filename))
class DataSetEvaluator(object): def __init__(self, xml_filename): # Build element tree self.__elem_tree = ElementTree() self.__elem_tree.parse(xml_filename) # Initialize class attributes self.instances = self.init_instances() self.instances_values = self.update_iterations_values() self.iterations = int(self.init_iterations()) self.template = self.init_template() def init_instances(self): ''' Parse __elem_tree to determine and new the data set objects present in the XML file . ''' # Obtain all the <dataset> elements from the XML file instances = {} dsb = DataSetBuilder() dataset_list = list(self.__elem_tree.iter("dataset")) if len(dataset_list) == 0: sys.stderr.write('Warning: Data sets not defined.\n') for dataset in dataset_list: if 'name' in dataset.attrib.keys(): dataset_name = dataset.attrib['name'] else: sys.stderr.write('Error: Unnamed data set. Aborting .\n') sys.stderr.write('Exiting (-1)') sys.exit(-1) if 'type' in dataset.attrib.keys(): dataset_type = dataset.attrib['type'] else: sys.stderr.write('Error: Untyped data set. Aborting. \n') sys.stderr.write('Exiting (-1)') sys.exit(-1) # Create the ds_dict for the Abstract Data Set subclasses ds_dict = {key: value for (key, value) in dataset.attrib.items() if key != 'type'} # Build instances of Data Sets if dataset_type not in dsb.classes: sys.stderr.write('Error: Unknown type: \'%s\'. Aborting. \n' % dataset_type) sys.stderr.write('Exiting (-1) .\n') sys.exit(-1) instances[dataset_name] = dsb.new(dataset_type, ds_dict) return instances def init_iterations(self): ''' Returns the number of iterations (how many subsequent lines to generate) ''' return int(self.__elem_tree.getroot().attrib['iterations']) def init_template(self): ''' Retrieves the template string from the XML file ''' return self.__elem_tree.findall('template')[0].text def update_iterations_values(self): ''' Keep a dictionary with instances values to preserve next_value() across iteration . ''' return {key: instance.next_value() for (key, instance) in \ self.instances.items()} def write_output(self, output=sys.stdout): ''' Parse the template and write the output to a stream . The default stream is sys.stdout . ''' # Find all #{data_set_names} and replace them with # self.instances[data_set_name].next_value() regex = '(?:^|(?<=[^#]))#{\w+}' def inner_subst(matchobj): # Removing unneeded characters from key key = matchobj.group(0) for c in ['{', '#', '}']: key = key.replace(c, '') # replace #{word} with instance values . return str(self.instances_values[key]) for i in range(self.iterations): if (i % 10000 == 0): print i output.write(re.sub(regex, inner_subst, self.template)) self.instances_values = self.update_iterations_values()
def __init__(self, languagelist_file, datadir): self._datadir = datadir LANGUAGELIST = os.path.join(datadir, 'data', languagelist_file) # map language to human readable name, e.g.: # "pt"->"Portuguise", "de"->"German", "en"->"English" self._lang = {} # map country to human readable name, e.g.: # "BR"->"Brasil", "DE"->"Germany", "US"->"United States" self._country = {} # map locale (language+country) to the LANGUAGE environment, e.g.: # "pt_PT"->"pt_PT:pt:pt_BR:en_GB:en" self._languagelist = {} # read lang file et = ElementTree(file="/usr/share/xml/iso-codes/iso_639_3.xml") it = et.iter('iso_639_3_entry') for elm in it: if "common_name" in elm.attrib: lang = elm.attrib["common_name"] else: lang = elm.attrib["name"] if "part1_code" in elm.attrib: code = elm.attrib["part1_code"] else: code = elm.attrib["id"] self._lang[code] = lang # Hack for Chinese langpack split # Translators: please translate 'Chinese (simplified)' and 'Chinese (traditional)' so that they appear next to each other when sorted alphabetically. self._lang['zh-hans'] = _("Chinese (simplified)") # Translators: please translate 'Chinese (simplified)' and 'Chinese (traditional)' so that they appear next to each other when sorted alphabetically. self._lang['zh-hant'] = _("Chinese (traditional)") # end hack # read countries et = ElementTree(file="/usr/share/xml/iso-codes/iso_3166.xml") it = et.iter('iso_3166_entry') for elm in it: if "common_name" in elm.attrib: descr = elm.attrib["common_name"] else: descr = elm.attrib["name"] if "alpha_2_code" in elm.attrib: code = elm.attrib["alpha_2_code"] else: code = elm.attrib["alpha_3_code"] self._country[code] = descr # read the languagelist with open(LANGUAGELIST) as f: for line in f: tmp = line.strip() if tmp.startswith("#") or tmp == "": continue w = tmp.split(";") # FIXME: the latest localechoosers "languagelist" does # no longer have this field for most languages, so # deal with it and don't set LANGUAGE then # - the interessting question is what to do # if LANGUAGE is already set and the new localeenv = w[6].split(":") #print(localeenv) self._languagelist[localeenv[0]] = '%s' % w[6]
if element['status']=='ZERO_RESULTS': print 'ZERO_RESULTS for ' + result['origin_addresses'][i] + ' to ' + result['destination_addresses'][j] times[i][j]=-1 distances[i][j]=-1 else: times[i][j] = element['duration']['value'] distances[i][j] = element['distance']['value'] return times,distances,result stations_file = urllib2.urlopen('http://thehubway.com/data/stations/bikeStations.xml') tree = ElementTree() tree.parse(stations_file) stations = list() for s in tree.iter('station'): station = Station() station.id = int(s.find('id').text) station.lat = s.find('lat').text station.long = s.find('long').text station.name = s.find('name').text stations.append(station) ##################### Change These ####################### number_of_hackers=2 # This partitions the problem this_hacker=0 # hacker number 0 of 2 debug=2000 #debug is a limit for testing, make it very big for a full run data_dir = 'data' os.path.exists(data_dir) or os.mkdir(data_dir) assert this_hacker < number_of_hackers
class Project(): def __init__(self): self.tree = ElementTree(); self.git = Git() if not os.path.exists(PROJECT_CONFIG_PATH): os.mkdir(PROJECT_CONFIG_PATH) try: self.tree.parse(PROJECT_CONFIG_FILE) except: root = Element('Project', {'name':os.path.basename(os.getcwd())}) self.tree._setroot(root) def save(self): self.tree.write(PROJECT_CONFIG_FILE, xml_declaration=True, method="xml") def iter(self): return self.tree.iter('SubProject') def find(self, name): for i in self.iter(): if i.get('name') == name: return i def inSubProject(self): cwd = os.getcwd() for node in self.iter(): name = node.get('name') print("") print("On:%s" % name) print("***************************************") os.chdir("/".join([cwd, name])) yield print("***************************************") def clone(self): for module in self.iter(): self.git.clone(module) def __init_module(self, module): if not os.path.exists(module): print("module %s not exists." % module) return None cwd = os.getcwd() os.chdir("/".join([cwd, module])) if self.git.is_repo(): node = Element('SubProject') node.set("name", module) current_branch = self.git.current_branch() if current_branch != None: node.set("branch", current_branch) remote_uri = self.git.current_remote_uri(branch=current_branch) if remote_uri != None: node.set("uri", remote_uri) else: node = None else: print("fatal: Not a git repository") node = None os.chdir(cwd) return node def __append_ignore_file(self, module): if os.path.exists(".gitignore"): ignoreFile = open(".gitignore","r") for line in ignoreFile: if module == line.strip(): return ignoreFile.close() ignoreFile = open(".gitignore", "a") ignoreFile.write(module + "\n") ignoreFile.close() def __remove_ignore_file(self, modules): if os.path.exists(".gitignore"): ignoreFile = open(".gitignore","r") print modules data = [line.strip() for line in ignoreFile if not (line.strip() in modules)] ignoreFile = open(".gitignore", "w") ignoreFile.write("\n".join(data)+"\n") ignoreFile.close() data = None def append(self, module): if module == None: return -1 node = self.find(module) root = self.tree.getroot() if node != None: root.remove(node) node = self.__init_module(module) if node == None: return -1 else: root.append(node) self.__append_ignore_file(module) self.save() return 0 def remove(self, module=None): if module != None: node = self.find(module) root = self.tree.getroot() if node != None: root.remove(node) self.__remove_ignore_file(module) self.save() else: data = [node.get('name') for node in self.iter()] self.__remove_ignore_file(data)