def print_error_reports_from_report_file(file_path):
    tree = ElementTree()
    try:
        tree.parse(file_path)
    except:
        print "-" * 50
        print "Error parsing {0!s}".format(file_path)
        f = open(file_path, "r");
        print f.read();
        print "-" * 50
        return

    testcases = tree.findall(".//testcase")
    for testcase in testcases:
        error = testcase.find("error")
        if error is not None:
            print_detail_information(testcase, error)

        fail = testcase.find("fail")
        if fail is not None:
            print_detail_information(testcase, fail)

        failure = testcase.find("failure")
        if failure is not None:
            print_detail_information(testcase, failure)
def writeFile(path, element, verbose=False):
    
    if verbose:
        if os.path.exists(path):
            # TODO  Determine if the element has not changed
            print("## Overwrite existing file: ", path)
        else:
            print("@@ Creating new file: ", path)
    
    # Get the namespace of this element
    namespace = element.getNamespace()
    # Register this namespace with the parser as the default namespace
    xml.etree.ElementTree.register_namespace('', namespace)
    e = element.getElement()
    # Fix up the element so it will print nicely
    OvalDocument.indent(e)
    # Create a new ElementTree with this element as the root
    tree = ElementTree(e)
    # And finally, write the full tree to a file including the xml declaration
    parent = os.path.dirname(path)
    if not os.path.isdir(parent):
        try :
            os.makedirs(parent, 0o755, True)
        except:
            return False
    
    tree.write(path, "UTF-8", True)
#     xml.etree.ElementTree.dump(tree)
    return True
Example #3
0
    def _get_build_metadata(self, dir_name):
        if os.path.exists(os.path.join(dir_name, 'jenkins_build.tar.gz')):
            raise Skip('the build has already been archived', dir_name)

        # Read the build.xml metadata file that Jenkins generates
        build_metadata = os.path.join(dir_name, 'build.xml')

        if not os.access(build_metadata, os.R_OK):
            self.log.debug("Can't read build file at %s" % (build_metadata))
            raise Exception("Can't access build.xml at %s" % (build_metadata))
        else:
            tree = ElementTree()
            tree.parse(build_metadata)

            keys = ['result', 'number', 'duration']

            kv_pairs = ((k, tree.find(k)) for k in keys)
            d = dict([(k, v.text) for k, v in kv_pairs if v is not None])

            try:
                d['branch'] = tree.find('actions') \
                    .find('hudson.plugins.git.util.BuildData') \
                    .find('buildsByBranchName') \
                    .find('entry') \
                    .find('hudson.plugins.git.util.Build') \
                    .find('revision') \
                    .find('branches') \
                    .find('hudson.plugins.git.Branch') \
                    .find('name') \
                    .text
            except Exception:
                pass
            return d
Example #4
0
def main():
    parser = argparse.ArgumentParser(description="convert Mitsuba scenes to appleseed format.")
    parser.add_argument("input_file", metavar="input-file", help="Mitsuba scene (*.xml)")
    parser.add_argument("output_file", metavar="output-file", help="appleseed scene (*.appleseed)")
    args = parser.parse_args()

    # Create a log target that outputs to stderr, and binds it to the renderer's global logger.
    # Eventually you will want to redirect log messages to your own target.
    # For this you will need to subclass appleseed.ILogTarget.
    log_target = asr.ConsoleLogTarget(sys.stderr)

    # It is important to keep log_target alive, as the global logger does not
    # take ownership of it. In this example, we do that by removing the log target
    # when no longer needed, at the end of this function.
    asr.global_logger().add_target(log_target)
    asr.global_logger().set_verbosity_level(asr.LogMessageCategory.Warning)

    tree = ElementTree()
    try:
        tree.parse(args.input_file)
    except IOError:
        fatal("Failed to load {0}".format(args.input_file))

    project = convert(tree)

    asr.ProjectFileWriter().write(project, args.output_file,
                                  asr.ProjectFileWriterOptions.OmitHandlingAssetFiles)
Example #5
0
    def add_text_layer(self,pdf, hocrfile, page_num,height, dpi):
      """Draw an invisible text layer for OCR data"""
      p1 = re.compile('bbox((\s+\d+){4})')
      p2 = re.compile('baseline((\s+[\d\.\-]+){2})')
      hocr = ElementTree()
      hocr.parse(hocrfile)
      logging.debug(xml.etree.ElementTree.tostring(hocr.getroot()))
      for c in hocr.getroot():  # Find the <body> tag
          if c.tag != 'body':
              continue
      for page in c: # Each child in the body is a page tag
          if (page.attrib['class'] != "ocr_page"):
              assert ("Why is this hocr not paging properly??")
          if page.attrib['id'] == 'page_%d' %(page_num):
              break

      for line in page.findall(".//{http://www.w3.org/1999/xhtml}span"):
      #for line in page.findall(".//span"):
        if line.attrib['class'] != 'ocr_line':
          continue
        linebox = p1.search(line.attrib['title']).group(1).split()

        try:
          baseline = p2.search(line.attrib['title']).group(1).split()
        except AttributeError:
          baseline = [ 0, 0 ]

        linebox = [float(i) for i in linebox]
        baseline = [float(i) for i in baseline]

        for word in line:
          if word.attrib['class'] != 'ocrx_word':
            continue
          word_text = []
          for child in word.iter():
              if child.text:
                  word_text.append(child.text)
          word.text = ' '.join(word_text)
          logging.debug(word.text)
          #for child in word:
             #if child.tag:
                 #word.text = child.text

          if word.text is None:
            continue
          font_width = pdf.stringWidth(word.text.strip(), 'invisible', 8)
          if font_width <= 0:
            continue
          box = p1.search(word.attrib['title']).group(1).split()
          box = [float(i) for i in box]
          b = self.polyval(baseline, (box[0] + box[2]) / 2 - linebox[0]) + linebox[3]
          text = pdf.beginText()
          text.setTextRenderMode(3)  # double invisible
          text.setFont('invisible', 8)
          text.setTextOrigin(box[0] * 72 / dpi, height - b * 72 / dpi)
          box_width = (box[2] - box[0]) * 72 / dpi
          text.setHorizScale(100.0 * box_width / font_width)
          text.textLine(word.text.strip())
          #logging.debug( "Pg%s: %s" % (page_num,word.text.strip()))
          pdf.drawText(text)
Example #6
0
def determine_subtype_element(in_file, delete_extra = True):
    hits = defaultdict(int)
    try:    
        tree = ElementTree(file = in_file)

        for it in tree.getiterator('Iteration'):
            hit_list = it.getiterator('Hit')
            if len(hit_list) > 0:
                hit = hit_list[0].find('Hit_def').text
                hits[hit.split('_')[1]] += 1
                if delete_extra:
                    for hit in hit_list[1:]:
                        hit.clear()
        if delete_extra:
            tree.write(in_file)
    except ExpatError:
        return None
    
    count = sum(hits.values())
    if count < 5:
        return None
    elif all([x < count*0.6 for x in hits.values()]):
        print 'too heterogenus %s' % ','.join(map(str,hits.items()))
        return None
    else:
        for key, val in hits.items():
            if val > count*0.6:
                return key
Example #7
0
def guess_location(in_xml, in_fasta, write_out = False):
    """Tries to guess the genomic location of a fragment."""
    
    name, seq = fasta_iter(in_fasta).next()
    parts = name.split('|')
    if len(parts) == 3:
        return
    
    starts = []
    seq_len = len(seq)
    try:
        tree = ElementTree(file = in_xml)
        for it in tree.getiterator('Iteration'):
            start_elem = it.find('Iteration_hits/Hit/Hit_hsps/Hsp/Hsp_hit-from')
            name_elem = it.find('Iteration_query-def')
            if start_elem is not None:
                tstart = int(name_elem.text.split('_')[1])
                starts.append(int(start_elem.text)-tstart)
    except ExpatError:
        return None
    if starts:
        start = max(sum(starts)/len(starts), 1)
        if write_out:
            with open(in_fasta, 'w') as handle:
                handle.write('>%s|%i|%i\n%s\n' % (name, start, start+seq_len, seq))
        return start     
Example #8
0
 def __init__(self, scraperPath = None):
     self.xml = ElementTree()
     self.addonxml = ElementTree()
     self.settingsxml= ElementTree()
     self.requires = []
     self.deps = []
     if scraperPath:
         scraperPath = os.path.normpath(scraperPath)
         self.addonxml.parse( os.path.join(scraperPath, "addon.xml") )
         xmlpath = self.addonxml.find("extension").attrib["library"]
         self.xml.parse( os.path.join(scraperPath, xmlpath) )
         if os.path.exists(os.path.join(scraperPath, "resources/settings.xml")):
             self.settingsxml.parse( os.path.join(scraperPath, "resources/settings.xml") )
         requires = self.addonxml.find("requires")
         if requires:
             for require in requires.findall("import"):
                 self.requires.append({})
                 self.requires[-1]["scraper"] = require.attrib["addon"]
                 self.requires[-1]["version"] = require.attrib["version"]
         else:
             logging.warning("could not find <requires> in %s/addon.xml" % scraperPath)
         self.basepath = os.path.split(scraperPath)[0]
     self.path = scraperPath
     if hasattr(self, "__stage2__"):
         self.__stage2__()
def qrcode_render(content, size, padding, version, em, ec, newline, parent):
    
    if newline:
        content = content.replace("\\n", "\n")
        content = content.replace("\\r", "\r")
    
    # Generate QR Code - call web service
    qrcode = qrcode_generate(content, size, padding, version, em, ec)
    #if not result:
    #    return
    
    # Parse SVG and draw elements to the workspace
    tree = ElementTree()
    tree.parse(StringIO(qrcode))
    root = tree.getroot()
    xmlns = "{http://www.w3.org/2000/svg}"
    modules = list(root.getiterator(xmlns + "rect"))
    for m in modules:
        # <rect x="32" y="32" width="8" height="8" style="fill:rgb(0,0,0);" />
        x = m.attrib["x"]
        y = m.attrib["y"]
        w = m.attrib["width"]
        h = m.attrib["height"]
        style = m.attrib["style"]
        qrcode_draw_module((w,h), (x,y), style, parent)
Example #10
0
    def __init__(self, xmlfile = None):
        tree = ElementTree()
        tree.parse(xmlfile)
        root = tree.getroot()

        self._serial = root[1].text
        self._sdate = root[2].text
        self._tdate = email.utils.parsedate(self._sdate)
        uccgrp = root[3]
        grpreg = root[4]

        for ucc in uccgrp:
            prefix = ucc[0].text
            for grp in ucc[2]:
                length = int(grp[1].text)
                start, end = grp[0].text.split('-')
                self._range_grp[prefix + start] = length
                self._range_grp[prefix + end] = length

        for grp in grpreg:
            prefix = grp[0].text.replace('-','')
            for reg in grp[2]:
                length = int(reg[1].text)
                start, end = reg[0].text.split('-')
                self._range_reg[prefix + start] = length
                self._range_reg[prefix + end] = length
Example #11
0
def read_config(config_file):
    tree = ElementTree()
    tree.parse(config_file)
    root = tree.getroot()
    server = root.attrib['name']
    server_vm = root.attrib['virtual_machine']
    protocals = root.getchildren()
    acnt = [0,0,0,0]
    cur_datetime = datetime.now()
    guacamole_client_list=[]
    for protocal in protocals:
        pro_name = protocal.attrib['name']
        clients = protocal.getchildren()
        cnt = 0
        for client in clients:
            cnt+=1
            client_name = client.attrib['name']
            client_host = client[0].text
            client_vm = client[1].text
            guacamoleClientInfo = GuacamoleClientInfo('','',server,client_name,pro_name,client_host,client_vm,0,cur_datetime)
            guacamole_client_list.append(guacamoleClientInfo)
        if pro_name=='vnc':
            acnt[0] = cnt
        elif pro_name=='vnc-read-only':
            acnt[1] = cnt
        elif pro_name=='ssh':
            acnt[2] = cnt
        else:
            acnt[3] = cnt
    
    guacamoleServerLoad = GuacamoleServerLoad(server,server_vm,acnt[0],acnt[1],acnt[2],acnt[3],sum(acnt),cur_datetime,0)
    guacamoleServerLoad.guacamole_client_info = guacamole_client_list
    return guacamoleServerLoad
def prepareLCSIMFile(inputlcsim, outputlcsim, numberofevents,
                     trackingstrategy, inputslcio, jars = None,
                     cachedir = None, outputFile = None,
                     outputRECFile = None, outputDSTFile = None,
                     debug = False):
  """Writes out a lcsim file for LCSIM
  
  Takes the parameters passed from :mod:`~ILCDIRAC.Workflow.Modules.LCSIMAnalysis`
  
  :param string inputlcsim: name of the provided lcsim
  :param string outputlcsim: name of the lcsim file on which LCSIM is going to run, defined in :mod:`~ILCDIRAC.Workflow.Modules.LCSIMAnalysis`
  :param int numberofevents: Number of events to process
  :param string trackingstrategy: trackingstrategy file to use, can be empty
  :param inputslcio: list of slcio files on which LCSIM should run
  :type inputslcio: list of strings
  :param jars: list of jar files that should be added in the classpath definition
  :type jars: list of strings
  :param string cachedir: folder that holds the cache directory, instead of Home
  :param string outputFile: File name of the output
  :param string outputDSTFile: filename of the DST file
  :param string outputRECFile: filename of the REC file
  :param bool debug: By default set verbosity to true
  
  :return: S_OK(string)
  """
  printtext = ''

  tree = ElementTree()
  try:
    tree.parse(inputlcsim)
  except Exception, x:
    print "Found Exception %s %s" % (Exception, x)
    return S_ERROR("Found Exception %s %s" % (Exception, x))
Example #13
0
def patch(pom_file, version):
  '''Updates the version in a POM file.  We need to locate //project/parent/version, //project/version and 
  //project/properties/project-version and replace the contents of these with the new version'''
  if settings['verbose']:
    prettyprint("Patching %s" % pom_file, Levels.DEBUG)
  tree = ElementTree()
  tree.parse(pom_file)    
  need_to_write = False
  
  tags = []
  tags.append(get_parent_version_tag(tree))
  tags.append(get_project_version_tag(tree))
  tags.append(get_properties_version_tag(tree))
  
  for tag in tags:
    if tag != None and "-SNAPSHOT" in tag.text:
      if settings['verbose']:
        prettyprint("%s is %s.  Setting to %s" % (str(tag), tag.text, version), Levels.DEBUG)
      tag.text=version
      need_to_write = True
    
  if need_to_write:
    # write to file again!
    write_pom(tree, pom_file)
    return True
  else:
    if settings['verbose']:
      prettyprint("File doesn't need updating; nothing replaced!", Levels.DEBUG)
    return False
Example #14
0
File: error.py Project: edsilv/iiif
    def as_xml(self):
        """XML representation of the error to be used in HTTP response.

        This XML format follows the IIIF Image API v1.0 specification,
        see <http://iiif.io/api/image/1.0/#error>
        """
        # Build tree
        spacing = ("\n" if (self.pretty_xml) else "")
        root = Element('error', {'xmlns': I3F_NS})
        root.text = spacing
        e_parameter = Element('parameter', {})
        e_parameter.text = self.parameter
        e_parameter.tail = spacing
        root.append(e_parameter)
        if (self.text):
            e_text = Element('text', {})
            e_text.text = self.text
            e_text.tail = spacing
            root.append(e_text)
        # Write out as XML document to return
        tree = ElementTree(root)
        xml_buf = io.BytesIO()
        if (sys.version_info < (2, 7)):
            tree.write(xml_buf, encoding='UTF-8')
        else:
            tree.write(xml_buf, encoding='UTF-8',
                       xml_declaration=True, method='xml')
        return(xml_buf.getvalue().decode('utf-8'))
Example #15
0
def _validate_pomdesc(fd):
    """check Maven POM for title, description and organization"""

    NS = '{http://maven.apache.org/POM/4.0.0}'
    PROJECT_NAME_REGEX = re.compile(r'^[a-z][a-z0-9-]*$')
    tree = ElementTree()
    try:
        elem = tree.parse(fd)
    except Exception as e:
        _detail('%s: %s' % (e.__class__.__name__, e))
        return False
    # group = elem.findtext(NS + 'groupId')
    name = elem.findtext(NS + 'artifactId')
    # ver = elem.findtext(NS + 'version')
    title = elem.findtext(NS + 'name')
    if title == '${project.artifactId}':
        title = name
    description = elem.findtext(NS + 'description')
    organization = elem.findtext(NS + 'organization/' + NS + 'name')

    if not name or not PROJECT_NAME_REGEX.match(name):
        _detail('has invalid name (does not match %s)' % PROJECT_NAME_REGEX.pattern)
    if not title:
        _detail('is missing title (<name>...</name>)')
    elif title.lower() == name.lower():
        _detail('has same title as name/artifactId')
    if not description:
        _detail('is missing description (<description>..</description>)')
    elif len(description.split()) < 3:
        _detail('has a too short description')
    if not organization:
        _detail('is missing organization (<organization><name>..</name></organization>)')
    return not VALIDATION_DETAILS
Example #16
0
def parse_test_objects(category, feature_name, percentile, trial, paths, feature_paths):
 
  info_file = "/Users/isa/Experiments/bof_bmvc12/trial_" + str(trial) + "/bof_category_test_info.xml"
 
  info_tree = ElementTree();
  info_tree.parse(info_file);

  
  scene_elms = info_tree.findall('scene');
  print 'Found: ' + str(len(scene_elms)) + 'scenes'


  for scene in scene_elms:
    
    site_name = scene.get('site_name');
  
    obj_elms = scene.findall('object');
    
    if obj_elms is None:
      print "Invalid scene info file: No objects element"
      sys.exit(-1);
      
    print 'Found: ' + str(len(obj_elms)) + 'objects'
    
    for elm in obj_elms:
        class_name = elm.get('class_name');
        if(class_name==category):
          mesh_name = elm.get('ply_name')
          ply_path = "/data/helicopter_providence_3_12/" + site_name + "/objects_with_aux/" + category + "_" + str(percentile) + "/" + mesh_name + ".ply";
          feature_path = "/Users/isa/Experiments/shape_features_bmvc12/" + site_name + "/" + feature_name + "/" + category + "_" + str(percentile) + "/" + mesh_name + ".npy";
          paths.append(ply_path);
          feature_paths.append(feature_path)
def parse_shed_tool_conf(file):
    """
    Parses the xml in shed_tool_conf xml and returns a dictionary in the following format:
    {
        section_id: [
            name:
            owner:
            revision:
            tool_shed_url:
        ]
    }
    """
    sections = defaultdict(lambda: {})
    doc = ElementTree(file=file)
    for section in doc.findall("//section"):
        for tool in section.findall('tool'):
            sections[
                section.get('id')][
                tool.find('repository_name').text +
                '|' +
                tool.find('installed_changeset_revision').text] = {
                'name': tool.find('repository_name').text,
                'owner': tool.find('repository_owner').text,
                'revision': tool.find('installed_changeset_revision').text,
                'tool_shed_url': 'https://' +
                tool.find('tool_shed').text}
    return sections
Example #18
0
    def vm_read_config(self):
        """
        This method parses the libvirt xml config file and fills the 
        cfg_details dictionary. This method returns the dictionary or 
        raise exception if xml is not valid.
        """
        
        domain = ET().parse(self.cfg_file)
        vm_type = domain.get('type') 
        self.cfg_details['vm_type'] = HVM_LIBVIRT_NAMEMAP[vm_type]
        self.cfg_details['vm_type_str'] = vm_type
        self.cfg_details['displayName'] = domain.find('name').text
        self.cfg_details['memsize'] = int(domain.find('memory').text) >> 10

        primary_disk_list = []
        for disk in domain.findall('devices/disk'):
            disk_details = self.get_disk_details(disk)
            if disk.get('type') == 'file' and \
               disk.get('device') == 'disk' and \
               disk_details['dev'] in ('sda', 'hda', 'vda') and \
               disk_details['bus'] in ('ide', 'scsi', 'virtio'):
                primary_disk_list.append(os.path.basename(disk_details['file']))
                break

        self.cfg_details['primary_disk'] = primary_disk_list
        self.cfg_details['primary_disk_str'] = ','.join(primary_disk_list)

        if not self.cfg_details:
            raise config_file_invalid()
        else:
            return self.cfg_details
Example #19
0
def read_xml(in_path):
  '''读取并解析xml文件
    in_path: xml路径
    return: ElementTree'''
  tree = ElementTree()
  tree.parse(in_path)
  return tree
Example #20
0
def read_xml(in_path):
    #读取并解析xml文件
    #in_path:xml路径
    #return:ElementTree
    tree = ElementTree()
    tree.parse(in_path)
    return tree
Example #21
0
def creating_xml(file1,file2,path_for_reference_xsd, xsd_path,schema_url,outputfile):
    value_list,template_id,templatestring,last_value1=test(file1,file2,path_for_reference_xsd, xsd_path)
    root=Element("template")
    
    p=templatestring.split("-",1)[1]
    tree=ElementTree(root)
    root.set("xsi:schemaLocation","https://appliedbroadband.com/ipdr/template_block template_block.xsd")
    root.set("xmlns","https://appliedbroadband.com/ipdr/template_block")
    root.set("xmlns:xsi","http://www.w3.org/2001/XMLSchema-instance")
    templateId=Element("templateId")
    schemaName=Element("schemaName")
    typeName=Element("typeName")
    root.append(templateId)
    root.append(schemaName)
    root.append(typeName)
    templateId.text=str(template_id)
    schemaName.text=str(schema_url)+str(last_value1)
    typeName.text=str(templatestring)+":"+str(templatestring.split("-",1)[1])
    for i in value_list:
        field=Element("field")
        root.append(field)
        typeId=Element("typeId")
        fieldId=Element("fieldId")
        fieldName=Element("fieldName")
        isEnabled=Element("isEnabled")
        field.append(typeId)
        field.append(fieldId)
        field.append(fieldName)
        field.append(isEnabled)
        typeId.text=str(i[0])
        fieldId.text=str(i[1])
        fieldName.text=str(i[2])
        isEnabled.text=str(i[3])
    print Etree.tostring(root)
    tree.write(open(r'%s'%outputfile,"w+"))   
Example #22
0
    def process(self, request):
        """
        Process SOAP request
        """
        if request.body is not None and len(request.body) > 0:
            body = urllib.unquote_plus(request.body)
            tree = ElementTree(file=StringIO(body))
            envelope = tree.getroot()
            if envelope is None:
                raise InvalidRequestException('Invalid syntax')
            body = envelope.find("q:Body", namespaces=namespaces)
            if body is None:
                raise InvalidRequestException('Invalid syntax')

            soap_req = body.find("m:GetUserOofSettingsRequest",
                                 namespaces=namespaces)
            if soap_req is not None:
                return self.process_get_request(soap_req)

            soap_req = body.find("m:SetUserOofSettingsRequest",
                                 namespaces=namespaces)
            if soap_req is not None:
                return self.process_set_request(soap_req)

            raise InvalidRequestException('Unknown SOAP request')

        raise InvalidRequestException('No body in request')
    def test_separate_timer_test_case(self):
        """Check that the elapsed time for each test is set separately.
        
        This test encodes a bug in which the elapsed time of the most recently
        run test was reported as the elapsed time for each test.
        """

        # reset runner to record elapsed times
        self.runner = xmlrunner.XMLTestRunner(output=self.stream,
            stream=self.fake_stream, elapsed_times=True)

        self.runner.run(unittest.makeSuite(testcases.SeparateTimerTestCase))
        f = StringIO(self.stream.getvalue())
        try:
            tree = ElementTree(file=f)
            (first, second) = tree.findall('testcase')

            # allow 25ms beyond the sleep() time for garbage collection

            self.assertEqual('test_run_for_100ms', first.attrib['name'])
            first_time = float(first.attrib['time'])
            self.assertTrue(0.100 <= first_time < 0.125,
                'expected about 0.1s. actual: %ss' % first_time)

            self.assertEqual('test_run_for_50ms', second.attrib['name'])
            second_time = float(second.attrib['time'])
            self.assertTrue(0.050 <= second_time < 0.075,
                'expected about 0.05s. actual: %ss' % second_time)
        finally:
            f.close()
Example #24
0
    def getWeatherData(self, location):
        """
        """
        ## if cached 
        yymmddhh = datetime.datetime.now().replace(tzinfo=UTC).astimezone(JST).strftime('%Y%m%d%H')
        key = str(yymmddhh + "_" + location)
        mc = Caching.MemcacheStore()
        data = mc.get(key)
        if data:
            return data

        data = []
        patt = re.compile(u"^.* \[ ([0-9]+).* \] ([0-9.]+).*$")
        cur = self._rdb.cursor(MySQLdb.cursors.DictCursor)
        cur.execute("""SELECT sht.prefid AS prefid FROM area a 
                        JOIN area_has_state ahs ON (a.areaid=ahs.areaid) 
                        JOIN state s ON (s.stateid=ahs.stateid) 
                        JOIN state_has_tenkijpPref sht ON (s.stateid=sht.stateid) 
                        WHERE a.location=%s""", (location,))
        row = cur.fetchone()

        cur.close()

        ## temperature
        fh    = urllib2.urlopen(WEATHERAPI % row['prefid'])
        rss   = ElementTree(file=fh)
        items = rss.findall('.//item')
        for item in items:
            title = item.find('title').text
            data.append({'title':title})

        mc.set(key, data)
        return data
    def parse_fzp(self, fzp_file):
        """ Parse the Fritzing component file """

        tree = ElementTree(file=fzp_file)

        try:
            prefix = tree.find('label').text
        except AttributeError:
            pass
        else:
            self.component.add_attribute('_prefix', prefix)

        symbol = Symbol()
        self.component.add_symbol(symbol)

        self.body = SBody()
        symbol.add_body(self.body)

        self.cid2termid.update(self.parse_terminals(tree))
        self.terminals.update(self.cid2termid.values())

        layers = tree.find('views/schematicView/layers')
        if layers is None:
            self.image = None
        else:
            self.image = layers.get('image')
Example #26
0
File: neko.py Project: mpmedia/neko
def import_opml(opml_file):
    tree = ElementTree()    
    tree.parse(opml_file)
    outlines = tree.findall(".//outline")
    tag = None
    # TODO: fix this for all opml formats
    for o in outlines:
        xmlurl = None
        try:
            xmlurl = o.attrib['xmlUrl']
        except:
            tag = o.attrib['text']

        if xmlurl:
            try:
#                print "inserting ", tag, o.attrib['xmlUrl'], o.attrib['htmlUrl'], o.attrib['text'], tag
                f = {
                    '_id': str(uuid.uuid1()),
                    'title': o.attrib['text'],
                    'url': o.attrib['xmlUrl'],
                    'web_url': o.attrib['htmlUrl'],
                    'tag': tag,                
                    }
                db.feeds.update({'url': f['url']}, f, True)
            except:
                pass
Example #27
0
def parse_config(name):
    tree = ElementTree()
    tree.parse(name)
    items = []
    for item in list(tree.getroot()):
        items.append(process_element(item))
    return items
Example #28
0
 def __init__(self):
   #Running gtk.Window's init method
   super(MainWindow, self).__init__() 
   
   self.set_size_request(280,700)
   #connect gui close button to quit signal
   self.connect("destroy", gtk.main_quit)
   
   #The table is the real gui, the window just holds it.  
   #Gizmos are added to the table, not the window.  
   self.table = gtk.Table(12,4,True)
   
   #----------------------------------
   etree = ElementTree()
   etree.parse("launchers.xml")
   #XML insists on nesting everything a dozen times
   launchers = etree.find("launchers")
   for i, launcherConfig in enumerate(launchers.getchildren()):
     launcher = gtk.Button()
     launcher.set_label(launcherConfig.find("name").text)
     self.table.attach(launcher, 0, 1, i, i+1)
   #-----------------------------------
     
   #add the table to the window
   self.add(self.table)
   #if you don't show or show_all, no gui
   self.show_all()
def parse_scenes_info(scenes_info_file, model_dirs, ply_paths):
    
    print 'Parsing: ' + scenes_info_file
    
    #parse xml file
    bof_tree = ElementTree();
    bof_tree.parse(scenes_info_file);
    
    scenes_elm = bof_tree.getroot().findall('scene');
    
    if scenes_elm is None:
      print "Invalid bof info file: No scenes element"
      sys.exit(-1);
         
 
    #find scene paths
    for s in range(0, len(scenes_elm)):
        path = scenes_elm[s].get("output_dir");
        ply_file = scenes_elm[s].get("ply_path");
        
        if path is None:
            print "Invalid info file: Error parsing scene path"
            sys.exit(-1);
            
        if ply_file is None:
            print "Invalid info file: Error parsing ply_file"
            sys.exit(-1);
        
        model_dirs.append(path); 
        ply_paths.append(ply_file);  
	def _getTVDBThumbnail(self):
		import os, time
		if self.id:
			# check if the file already exists
			if os.path.isfile(Config['tmpdir']+self.id+'-'+Config['tvdblang']+'.xml'):
				# if it is older than config['cacherenew'] days, delete the files and download again
				if os.path.getctime(Config['tmpdir']+self.id+'-'+Config['tvdblang']+'.xml') < time.time()-(Config['cacherenew']*86400):
					os.remove(Config['tmpdir']+self.id+'-'+Config['tvdblang']+'.xml')
			if not os.path.isfile(Config['tmpdir']+self.id+'-'+Config['tvdblang']+'.xml'):
				URL('http://www.thetvdb.com/api/'+Config['tvdbapikey']+'/series/'+self.id+'/all/'+Config['tvdblang']+'.xml').download(Config['tmpdir']+self.id+'-'+Config['tvdblang']+'.xml')
			from xml.etree.ElementTree import ElementTree
			tree = ElementTree()
			try:
				tree.parse(Config['tmpdir']+self.id+'-'+Config['tvdblang']+'.xml')
				if Config['posterforpilot'] == True and self.season == 1 and self.episode == 1:
					series = tree.find('Series')
					if series.find('poster').text:
						self.thumbnail =  'http://www.thetvdb.com/banners/'+series.find('poster').text
						return True
				for episode in tree.findall('Episode'):
					if int(episode.find('SeasonNumber').text) == self.season and int(episode.find('EpisodeNumber').text) == self.episode:			
						if episode.find('filename').text:		
							self.thumbnail =  'http://www.thetvdb.com/banners/'+episode.find('filename').text
							return True
			except:
				pass
		return False
Example #31
0
from ndspy.rom import NintendoDSRom

from xml.etree.ElementTree import Element, ElementTree
from skytemple_files.common.xml_util import prettify
from skytemple_files.common.types.file_types import FileType
from skytemple_files.common.util import get_files_from_rom_with_extension, get_ppmdu_config_for_rom
from skytemple_files.graphics.fonts.font_sir0.handler import FontSir0Handler

from PIL import Image

base_dir = os.path.join(os.path.dirname(__file__), '..', '..', '..', '..', '..')
out_dir = os.path.join(os.path.dirname(__file__), 'dbg_output')
os.makedirs(out_dir, exist_ok=True)

rom = NintendoDSRom.fromFile(os.path.join(base_dir, 'skyworkcopy_us.nds'))

for fn in ["FONT/kanji.dat", "FONT/kanji_b.dat", "FONT/unknown.dat"]:
    font_ref = rom.getFileByName(fn)
    font = FontSir0Handler.deserialize(font_ref)
    tree = ElementTree()
    xml = tree.parse(os.path.join(out_dir, fn.replace('/', '_') + f'.xml'))
    tables = dict()
    for i in range(256):
        path = os.path.join(out_dir, fn.replace('/', '_') + f'.{i}.png')
        if os.path.exists(path):
            tables[i] = Image.open(path, 'r')
            
    font.import_from_xml(xml, tables)
    assert FontSir0Handler.serialize(font)==font_ref
Example #32
0
    def __init__(self, root: Form) -> None:
        root.set("relation", "root")

        ElementTree.__init__(self, root)
Example #33
0
def main(argv=sys.argv[1:]):
    parser = argparse.ArgumentParser(
        description='Run the test command passed as an argument and ensures'
                    'that the expected result file is generated.')
    parser.add_argument(
        'result_file', help='The path to the xunit result file')
    parser.add_argument(
        '--command',
        nargs='+',
        help='The test command to execute. '
             'It must be passed after other arguments since it collects all '
             'following options.')
    parser.add_argument(
        '--env',
        nargs='+',
        help='Extra environment variables to set when running, e.g. FOO=foo BAR=bar')
    parser.add_argument(
        '--append-env',
        nargs='+',
        help='Extra environment variables to append, or set, when running, e.g. FOO=foo BAR=bar')
    parser.add_argument(
        '--output-file',
        help='The path to the output log file')
    parser.add_argument(
        '--generate-result-on-success',
        action='store_true',
        default=False,
        help='Generate a result file if the command returns with code zero')

    if '--command' in argv:
        index = argv.index('--command')
        argv, command = argv[0:index + 1] + ['dummy'], argv[index + 1:]
    args = parser.parse_args(argv)
    args.command = command

    # if result file exists remove it before test execution
    if os.path.exists(args.result_file):
        os.remove(args.result_file)

    # create folder if necessary
    if not os.path.exists(os.path.dirname(args.result_file)):
        try:
            os.makedirs(os.path.dirname(args.result_file))
        except OSError as e:
            # catch case where folder has been created in the mean time
            if e.errno != errno.EEXIST:
                raise

    # generate result file with one failed test
    # in case the command segfaults or timeouts and does not generate one
    failure_result_file = _generate_result(
        args.result_file,
        'The test did not generate a result file.'
    )
    with open(args.result_file, 'w') as h:
        h.write(failure_result_file)

    # collect output / exception to generate more detailed result file
    # if the command fails to generate it
    output = ''
    output_handle = None
    if args.output_file:
        output_path = os.path.dirname(args.output_file)
        if not os.path.exists(output_path):
            os.makedirs(output_path)
        output_handle = open(args.output_file, 'wb')

    def log(msg, **kwargs):
        print(msg, **kwargs)
        if output_handle:
            output_handle.write((msg + '\n').encode())
            output_handle.flush()

    env = None
    if args.env or args.append_env:
        env = dict(os.environ)
        if args.env:
            log('-- run_test.py: extra environment variables:')
            for env_str in args.env:
                key, value = separate_env_vars(env_str, 'env', parser)
                log(' - {0}={1}'.format(key, value))
                env[key] = value
        if args.append_env:
            log('-- run_test.py: extra environment variables to append:')
            for env_str in args.append_env:
                key, value = separate_env_vars(env_str, 'append-env', parser)
                log(' - {0}={1}'.format(key, value))
                if key not in env:
                    env[key] = ''
                if not env[key].endswith(os.pathsep):
                    env[key] += os.pathsep
                env[key] += value

    log("-- run_test.py: invoking following command in '%s':\n - %s" %
        (os.getcwd(), ' '.join(args.command)))
    if output_handle:
        output_handle.write('\n'.encode())
        output_handle.flush()

    try:
        proc = subprocess.Popen(
            args.command, stdout=subprocess.PIPE, stderr=subprocess.STDOUT,
            env=env
        )
        while True:
            line = proc.stdout.readline()
            if not line:
                break
            decoded_line = line.decode()
            print(decoded_line, end='')
            output += decoded_line
            if output_handle:
                output_handle.write(line)
                output_handle.flush()
        proc.wait()
        rc = proc.returncode
        if output_handle:
            # separate progress of this script from subprocess output
            output_handle.write('\n\n'.encode())
        log('-- run_test.py: return code ' + str(rc), file=sys.stderr if rc else sys.stdout)
    except Exception as e:
        if output_handle:
            # separate subprocess output from progress of this script
            output_handle.write('\n\n'.encode())
        log('-- run_test.py: invocation failed: ' + str(e), file=sys.stderr)
        output += str(e)
        rc = 1

    if not rc and args.generate_result_on_success:
        # generate result file with one passed test
        # if it was expected that no result file was generated
        # and the command returned with code zero
        log("-- run_test.py: generate result file '%s' with successful test" % args.result_file)
        success_result_file = _generate_result(args.result_file)
        with open(args.result_file, 'w') as h:
            h.write(success_result_file)

    elif os.path.exists(args.result_file):
        # check if content of result file has actually changed
        with open(args.result_file, 'r') as h:
            not_changed = h.read() == failure_result_file

        if not_changed:
            log("-- run_test.py: generate result file '%s' with failed test" % args.result_file,
                file=sys.stderr)
            # regenerate result file to include output / exception of the invoked command
            failure_result_file = _generate_result(
                args.result_file,
                'The test did not generate a result file:\n\n' + output
            )
            with open(args.result_file, 'w') as h:
                h.write(failure_result_file)

        log("-- run_test.py: verify result file '%s'" % args.result_file)
        # if result file exists ensure that it contains valid xml
        # unit test suites are not good about screening out
        # illegal unicode characters
        tree = None
        try:
            tree = ElementTree(None, args.result_file)
        except ParseError as e:
            modified = _tidy_xml(args.result_file)
            if not modified:
                log("Invalid XML in result file '%s': %s" %
                    (args.result_file, str(e)), file=sys.stderr)
            else:
                try:
                    tree = ElementTree(None, args.result_file)
                except ParseError as e:
                    log("Invalid XML in result file '%s' (even after trying to tidy it): %s" %
                        (args.result_file, str(e)), file=sys.stderr)

        if not tree:
            # set error code when result file is not parsable
            rc = 1
        else:
            # set error code when result file contains errors or failures
            root = tree.getroot()
            num_errors = int(root.attrib.get('errors', 0))
            num_failures = int(root.attrib.get('failures', 0))
            if num_errors or num_failures:
                rc = 1

    # ensure that a result file exists at the end
    if not rc and not os.path.exists(args.result_file):
        log('-- run_test.py: override return code since no result file was '
            'generated', file=sys.stderr)
        rc = 1

    return rc
Example #34
0
    def loadSystems(self, XMLFolder=None):
        """
        Load and pre-process the specified punctuation systems.
        """
        if not self._XMLSystems:  # Only ever do this once
            if XMLFolder == None:
                XMLFolder = os.path.join(
                    os.path.dirname(__file__), "DataFiles",
                    "PunctuationSystems")  # Relative to module, not cwd
            self.__XMLFolder = XMLFolder
            if BibleOrgSysGlobals.verbosityLevel > 2:
                print(
                    _("Loading punctuations systems from {}…").format(
                        self.__XMLFolder))
            filenamePrefix = "BIBLEPUNCTUATIONSYSTEM_"
            for filename in os.listdir(self.__XMLFolder):
                filepart, extension = os.path.splitext(filename)

                if extension.upper() == '.XML' and filepart.upper().startswith(
                        filenamePrefix):
                    punctuationSystemCode = filepart[len(filenamePrefix):]
                    if BibleOrgSysGlobals.verbosityLevel > 3:
                        print(
                            _("Loading {} punctuation system from {}…").format(
                                punctuationSystemCode, filename))
                    self._XMLSystems[punctuationSystemCode] = {}
                    self._XMLSystems[punctuationSystemCode][
                        'tree'] = ElementTree().parse(
                            os.path.join(self.__XMLFolder, filename))
                    assert self._XMLSystems[punctuationSystemCode][
                        'tree']  # Fail here if we didn't load anything at all

                    # Check and remove the header element
                    if self._XMLSystems[punctuationSystemCode][
                            'tree'].tag == self.treeTag:
                        header = self._XMLSystems[punctuationSystemCode][
                            'tree'][0]
                        if header.tag == self.headerTag:
                            self._XMLSystems[punctuationSystemCode][
                                "header"] = header
                            self._XMLSystems[punctuationSystemCode][
                                'tree'].remove(header)
                            BibleOrgSysGlobals.checkXMLNoText(header, "header")
                            BibleOrgSysGlobals.checkXMLNoTail(header, "header")
                            BibleOrgSysGlobals.checkXMLNoAttributes(
                                header, "header")
                            if len(header) > 1:
                                logging.info(
                                    _("Unexpected elements in header"))
                            elif len(header) == 0:
                                logging.info(
                                    _("Missing work element in header"))
                            else:
                                work = header[0]
                                BibleOrgSysGlobals.checkXMLNoText(
                                    work, "work in header")
                                BibleOrgSysGlobals.checkXMLNoTail(
                                    work, "work in header")
                                BibleOrgSysGlobals.checkXMLNoAttributes(
                                    work, "work in header")
                                if work.tag == "work":
                                    self._XMLSystems[punctuationSystemCode][
                                        'version'] = work.find('version').text
                                    self._XMLSystems[punctuationSystemCode][
                                        "date"] = work.find("date").text
                                    self._XMLSystems[punctuationSystemCode][
                                        "title"] = work.find("title").text
                                else:
                                    logging.warning(
                                        _("Missing work element in header"))
                        else:
                            logging.warning(
                                _("Missing header element (looking for {!r} tag)"
                                  ).format(self.headerTag))
                    else:
                        logging.error(
                            _("Expected to load {!r} but got {!r}").format(
                                self.treeTag,
                                self._XMLSystems[punctuationSystemCode]
                                ['tree'].tag))
                    bookCount = 0  # There must be an easier way to do this
                    for subelement in self._XMLSystems[punctuationSystemCode][
                            'tree']:
                        bookCount += 1
                    if BibleOrgSysGlobals.verbosityLevel > 2:
                        print(
                            _("    Loaded {} books for {}").format(
                                bookCount, punctuationSystemCode))
                    logging.info(
                        _("    Loaded {} books for {}").format(
                            bookCount, punctuationSystemCode))

                    if BibleOrgSysGlobals.strictCheckingFlag:
                        self._validateSystem(
                            self._XMLSystems[punctuationSystemCode]['tree'],
                            punctuationSystemCode)
        return self
Example #35
0
class GraphMLReader(GraphML):
    """Read a GraphML document.  Produces NetworkX graph objects."""
    def __init__(self,
                 node_type=str,
                 edge_key_type=int,
                 force_multigraph=False):
        self.node_type = node_type
        self.edge_key_type = edge_key_type
        self.multigraph = force_multigraph  # If False, test for multiedges
        self.edge_ids = {}  # dict mapping (u,v) tuples to edge id attributes

    def __call__(self, path=None, string=None):
        if path is not None:
            self.xml = ElementTree(file=path)
        elif string is not None:
            self.xml = fromstring(string)
        else:
            raise ValueError("Must specify either 'path' or 'string' as kwarg")
        (keys, defaults) = self.find_graphml_keys(self.xml)
        for g in self.xml.findall(f"{{{self.NS_GRAPHML}}}graph"):
            yield self.make_graph(g, keys, defaults)

    def make_graph(self, graph_xml, graphml_keys, defaults, G=None):
        # set default graph type
        edgedefault = graph_xml.get("edgedefault", None)
        if G is None:
            if edgedefault == "directed":
                G = nx.MultiDiGraph()
            else:
                G = nx.MultiGraph()
        # set defaults for graph attributes
        G.graph["node_default"] = {}
        G.graph["edge_default"] = {}
        for key_id, value in defaults.items():
            key_for = graphml_keys[key_id]["for"]
            name = graphml_keys[key_id]["name"]
            python_type = graphml_keys[key_id]["type"]
            if key_for == "node":
                G.graph["node_default"].update({name: python_type(value)})
            if key_for == "edge":
                G.graph["edge_default"].update({name: python_type(value)})
        # hyperedges are not supported
        hyperedge = graph_xml.find(f"{{{self.NS_GRAPHML}}}hyperedge")
        if hyperedge is not None:
            raise nx.NetworkXError("GraphML reader doesn't support hyperedges")
        # add nodes
        for node_xml in graph_xml.findall(f"{{{self.NS_GRAPHML}}}node"):
            self.add_node(G, node_xml, graphml_keys, defaults)
        # add edges
        for edge_xml in graph_xml.findall(f"{{{self.NS_GRAPHML}}}edge"):
            self.add_edge(G, edge_xml, graphml_keys)
        # add graph data
        data = self.decode_data_elements(graphml_keys, graph_xml)
        G.graph.update(data)

        # switch to Graph or DiGraph if no parallel edges were found
        if self.multigraph:
            return G

        G = nx.DiGraph(G) if G.is_directed() else nx.Graph(G)
        # add explicit edge "id" from file as attribute in NX graph.
        nx.set_edge_attributes(G, values=self.edge_ids, name="id")
        return G

    def add_node(self, G, node_xml, graphml_keys, defaults):
        """Add a node to the graph.
        """
        # warn on finding unsupported ports tag
        ports = node_xml.find(f"{{{self.NS_GRAPHML}}}port")
        if ports is not None:
            warnings.warn("GraphML port tag not supported.")
        # find the node by id and cast it to the appropriate type
        node_id = self.node_type(node_xml.get("id"))
        # get data/attributes for node
        data = self.decode_data_elements(graphml_keys, node_xml)
        G.add_node(node_id, **data)
        # get child nodes
        if node_xml.attrib.get("yfiles.foldertype") == "group":
            graph_xml = node_xml.find(f"{{{self.NS_GRAPHML}}}graph")
            self.make_graph(graph_xml, graphml_keys, defaults, G)

    def add_edge(self, G, edge_element, graphml_keys):
        """Add an edge to the graph.
        """
        # warn on finding unsupported ports tag
        ports = edge_element.find(f"{{{self.NS_GRAPHML}}}port")
        if ports is not None:
            warnings.warn("GraphML port tag not supported.")

        # raise error if we find mixed directed and undirected edges
        directed = edge_element.get("directed")
        if G.is_directed() and directed == "false":
            msg = "directed=false edge found in directed graph."
            raise nx.NetworkXError(msg)
        if (not G.is_directed()) and directed == "true":
            msg = "directed=true edge found in undirected graph."
            raise nx.NetworkXError(msg)

        source = self.node_type(edge_element.get("source"))
        target = self.node_type(edge_element.get("target"))
        data = self.decode_data_elements(graphml_keys, edge_element)
        # GraphML stores edge ids as an attribute
        # NetworkX uses them as keys in multigraphs too if no key
        # attribute is specified
        edge_id = edge_element.get("id")
        if edge_id:
            # self.edge_ids is used by `make_graph` method for non-multigraphs
            self.edge_ids[source, target] = edge_id
            try:
                edge_id = self.edge_key_type(edge_id)
            except ValueError:  # Could not convert.
                pass
        else:
            edge_id = data.get("key")

        if G.has_edge(source, target):
            # mark this as a multigraph
            self.multigraph = True

        # Use add_edges_from to avoid error with add_edge when `'key' in data`
        # Note there is only one edge here...
        G.add_edges_from([(source, target, edge_id, data)])

    def decode_data_elements(self, graphml_keys, obj_xml):
        """Use the key information to decode the data XML if present."""
        data = {}
        for data_element in obj_xml.findall(f"{{{self.NS_GRAPHML}}}data"):
            key = data_element.get("key")
            try:
                data_name = graphml_keys[key]["name"]
                data_type = graphml_keys[key]["type"]
            except KeyError as e:
                raise nx.NetworkXError(
                    f"Bad GraphML data: no key {key}") from e
            text = data_element.text
            # assume anything with subelements is a yfiles extension
            if text is not None and len(list(data_element)) == 0:
                if data_type == bool:
                    # Ignore cases.
                    # http://docs.oracle.com/javase/6/docs/api/java/lang/
                    # Boolean.html#parseBoolean%28java.lang.String%29
                    data[data_name] = self.convert_bool[text.lower()]
                else:
                    data[data_name] = data_type(text)
            elif len(list(data_element)) > 0:
                # Assume yfiles as subelements, try to extract node_label
                node_label = None
                for node_type in ["ShapeNode", "SVGNode", "ImageNode"]:
                    pref = f"{{{self.NS_Y}}}{node_type}/{{{self.NS_Y}}}"
                    geometry = data_element.find(f"{pref}Geometry")
                    if geometry is not None:
                        data["x"] = geometry.get("x")
                        data["y"] = geometry.get("y")
                    if node_label is None:
                        node_label = data_element.find(f"{pref}NodeLabel")
                if node_label is not None:
                    data["label"] = node_label.text

                # check all the different types of edges avaivable in yEd.
                for e in [
                        "PolyLineEdge",
                        "SplineEdge",
                        "QuadCurveEdge",
                        "BezierEdge",
                        "ArcEdge",
                ]:
                    pref = f"{{{self.NS_Y}}}{e}/{{{self.NS_Y}}}"
                    edge_label = data_element.find(f"{pref}EdgeLabel")
                    if edge_label is not None:
                        break

                if edge_label is not None:
                    data["label"] = edge_label.text
        return data

    def find_graphml_keys(self, graph_element):
        """Extracts all the keys and key defaults from the xml.
        """
        graphml_keys = {}
        graphml_key_defaults = {}
        for k in graph_element.findall(f"{{{self.NS_GRAPHML}}}key"):
            attr_id = k.get("id")
            attr_type = k.get("attr.type")
            attr_name = k.get("attr.name")
            yfiles_type = k.get("yfiles.type")
            if yfiles_type is not None:
                attr_name = yfiles_type
                attr_type = "yfiles"
            if attr_type is None:
                attr_type = "string"
                warnings.warn(f"No key type for id {attr_id}. Using string")
            if attr_name is None:
                raise nx.NetworkXError(f"Unknown key for id {attr_id}.")
            graphml_keys[attr_id] = {
                "name": attr_name,
                "type": self.python_type[attr_type],
                "for": k.get("for"),
            }
            # check for "default" subelement of key element
            default = k.find(f"{{{self.NS_GRAPHML}}}default")
            if default is not None:
                graphml_key_defaults[attr_id] = default.text
        return graphml_keys, graphml_key_defaults
Example #36
0
 def dump(self, stream):
     if self.prettyprint:
         self.indent(self.xml)
     document = ElementTree(self.xml)
     document.write(stream, encoding=self.encoding, xml_declaration=True)
Example #37
0
 def load_fixtures(self, commands=None):
     """ loading data """
     config_file = 'pfsense_rule_config.xml'
     self.parse.return_value = ElementTree(
         fromstring(load_fixture(config_file)))
Example #38
0
    def add_text_layer(self,pdf, hocrfile, page_num,height, dpi):
      """Draw an invisible text layer for OCR data.

        This function really needs to get cleaned up
        
      """
      hocr = ElementTree()
      try: 
        # It's possible tesseract has failed and written garbage to this hocr file, so we need to catch any exceptions
          hocr.parse(hocrfile)
      except Exception:
          logging.info("Error loading hocr, not adding any text")
          return 

      logging.debug(xml.etree.ElementTree.tostring(hocr.getroot()))
      for c in hocr.getroot():  # Find the <body> tag
          if c.tag != 'body':
              continue
      for page in c: # Each child in the body is a page tag
          if (page.attrib['class'] != "ocr_page"):
              assert ("Why is this hocr not paging properly??")
          if page.attrib['id'] == 'page_%d' %(page_num):
              break

      for line in page.findall(".//{http://www.w3.org/1999/xhtml}span"):
      #for line in page.findall(".//span"):
        if line.attrib['class'] != 'ocr_line':
          continue
        linebox = self.regex_bbox.search(line.attrib['title']).group(1).split()
        textangle = self.regex_textangle.search(line.attrib['title'])
        if textangle:
            textangle = self._atoi(textangle.group(1))
        else:
            textangle = 0

        try:
          baseline = self.regex_baseline.search(line.attrib['title']).group(1).split()
        except AttributeError:
          baseline = [ 0, 0 ]

        linebox = [float(i) for i in linebox]
        baseline = [float(i) for i in baseline]

        for word in line:
          if word.attrib['class'] != 'ocrx_word':
            continue
          word_text = []
          for child in word.iter():
              if child.text:
                  word_text.append(child.text)
          word.text = ' '.join(word_text)
          if word.text is None:
            continue
          logging.debug("word: %s, angle: %d" % ( word.text.strip(), textangle))


          box = self.regex_bbox.search(word.attrib['title']).group(1).split()
          #b = self.polyval(baseline, (box[0] + box[2]) / 2 - linebox[0]) + linebox[3]
          box = [float(i) for i in box]

          # Transform angle to x,y co-ords needed for proper text placement
          # We only support 0, 90, 180, 270!.  Anything else, we'll just use the normal orientation for now

          coords = { 0: (box[0], box[1]),
                    90: (box[0], box[3]),  # facing right
                    180: (box[2], box[3]), # upside down
                    270: (box[2], box[1]), # facing left
                    }
          x,y = coords.get(textangle, (box[0], box[1]))

          style = getSampleStyleSheet()
          normal = style["BodyText"]
          normal.alignment = TA_LEFT
          normal.leading = 0
          font_name, font_size = self._get_font_spec(word.attrib['title'])
          normal.fontName = "Helvetica"
          normal.fontSize = font_size

          para = RotatedPara(escape(word.text.strip()), normal, textangle)
          para.wrapOn(pdf, para.minWidth(), 100)  # Not sure what to use as the height  here
          para.drawOn(pdf, x*72/dpi['x'], height - y*72/dpi['y'])
Example #39
0
    def run(self):
        log_progress(__modname__,
                     "QC report generation start",
                     f=self._log_file)
        ### root element
        qc_report = Element("qc_report")

        ### main page
        main_page = self.main_page()
        qc_report.append(main_page)

        ### header contents
        header_contents = self.header_contents()
        qc_report.append(header_contents)

        ### contents
        # Step 1. analisis information
        analysis_information = self.analysis_information()
        qc_report.append(analysis_information)
        # Step 2. sequencing information
        sequencing_information = self.sequencing_information()
        qc_report.append(sequencing_information)
        # Step 3. Raw fastq format
        raw_fastq_format = self.raw_fastq_format()
        qc_report.append(raw_fastq_format)
        # Step 4. Data summary
        data_summary = self.data_summary()
        qc_report.append(data_summary)
        # Step 5. Raw read quality
        raw_read_quality = self.raw_read_quality()
        qc_report.append(raw_read_quality)
        # Step 6. mapping statistics
        mapping_statistics = self.mapping_statistics()
        qc_report.append(mapping_statistics)
        # Step 7. coverage statistics
        coverage_statistics = self.coverage_statistics()
        qc_report.append(coverage_statistics)
        # Step 8. Warning
        warning = self.warning()
        qc_report.append(warning)
        # Step 9. software and databases
        tools_and_databases = self.tools_and_databases()
        qc_report.append(tools_and_databases)

        ### generate xml file
        self.indent(qc_report)
        ElementTree(qc_report).write(self._xml_file)

        ### Generate pdf file
        exec_cmd = [
            "/NGENEBIO/workflow-app/fop-2.2/fop/fop", "-c",
            "/NGENEBIO/workflow/utils/fop/fop_config.xml", "-xml",
            self._xml_file, "-xsl",
            "/NGENEBIO/workflow/utils/fop/qc_report_template_v3.xsl", "-pdf",
            self._pdf_file
        ]
        proc = subprocess.Popen(exec_cmd,
                                stdout=subprocess.PIPE,
                                stderr=subprocess.PIPE)
        stdout, stderr = proc.communicate()
        print stderr
        log_progress(__modname__,
                     "QC report generation finished",
                     f=self._log_file)
Example #40
0
class USFMMarkersConverter:
    """
    Class for reading, validating, and converting USFMMarkers.
    This is only intended as a transitory class (used at start-up).
    The USFMMarkers class has functions more generally useful.
    """
    def __init__(
            self):  # We can't give this parameters because of the singleton
        """
        Constructor: expects the filepath of the source XML file.
        Loads (and crudely validates the XML file) into an element tree.
        """
        self._filenameBase = "USFMMarkers"

        # These fields are used for parsing the XML
        self._treeTag = "USFMMarkers"
        self._headerTag = "header"
        self._mainElementTag = "USFMMarker"

        # These fields are used for automatically checking/validating the XML
        self._compulsoryAttributes = ()
        self._optionalAttributes = ()
        self._uniqueAttributes = self._compulsoryAttributes + self._optionalAttributes
        self._compulsoryElements = (
            "nameEnglish",
            "marker",
            "compulsory",
            "level",
            "numberable",
            "nests",
            "hasContent",
            "printed",
            "closed",
            "occursIn",
            "deprecated",
        )
        self._optionalElements = ("description", )
        #self._uniqueElements = self._compulsoryElements + self.optionalElements
        self._uniqueElements = (
            "nameEnglish",
            "marker",
        )

        # These are fields that we will fill later
        self._XMLheader, self._XMLtree = None, None
        self.__DataDicts = {}  # Used for import
        self.titleString = self.ProgVersion = self.dateString = ''

    # end of __init__

    def loadAndValidate(self, XMLFilepath=None):
        """
        Loads (and crudely validates the XML file) into an element tree.
            Allows the filepath of the source XML file to be specified, otherwise uses the default.
        """
        if self._XMLtree is None:  # We mustn't have already have loaded the data
            if XMLFilepath is None:
                XMLFilepath = os.path.join(
                    os.path.dirname(__file__), "DataFiles",
                    self._filenameBase + ".xml")  # Relative to module, not cwd
            self.__load(XMLFilepath)
            if BibleOrgSysGlobals.strictCheckingFlag:
                self.__validate()
        else:  # The data must have been already loaded
            if XMLFilepath is not None and XMLFilepath != self.__XMLFilepath:
                logging.error(
                    _("Bible books codes are already loaded -- your different filepath of {!r} was ignored"
                      ).format(XMLFilepath))
        return self

    # end of loadAndValidate

    def __load(self, XMLFilepath):
        """
        Load the source XML file and remove the header from the tree.
        Also, extracts some useful elements from the header element.
        """
        assert XMLFilepath
        self.__XMLFilepath = XMLFilepath
        assert self._XMLtree is None or len(
            self._XMLtree) == 0  # Make sure we're not doing this twice

        if BibleOrgSysGlobals.verbosityLevel > 2:
            print(
                _("Loading USFMMarkers XML file from {!r}…").format(
                    self.__XMLFilepath))
        self._XMLtree = ElementTree().parse(self.__XMLFilepath)
        assert self._XMLtree  # Fail here if we didn't load anything at all

        if self._XMLtree.tag == self._treeTag:
            header = self._XMLtree[0]
            if header.tag == self._headerTag:
                self.XMLheader = header
                self._XMLtree.remove(header)
                BibleOrgSysGlobals.checkXMLNoText(header, "header")
                BibleOrgSysGlobals.checkXMLNoTail(header, "header")
                BibleOrgSysGlobals.checkXMLNoAttributes(header, "header")
                if len(header) > 1:
                    logging.info(_("Unexpected elements in header"))
                elif len(header) == 0:
                    logging.info(_("Missing work element in header"))
                else:
                    work = header[0]
                    BibleOrgSysGlobals.checkXMLNoText(work, "work in header")
                    BibleOrgSysGlobals.checkXMLNoTail(work, "work in header")
                    BibleOrgSysGlobals.checkXMLNoAttributes(
                        work, "work in header")
                    if work.tag == "work":
                        self.ProgVersion = work.find('version').text
                        self.dateString = work.find("date").text
                        self.titleString = work.find("title").text
                    else:
                        logging.warning(_("Missing work element in header"))
            else:
                logging.warning(
                    _("Missing header element (looking for {!r} tag)".format(
                        self._headerTag)))
            if header.tail is not None and header.tail.strip():
                logging.error(
                    _("Unexpected {!r} tail data after header").format(
                        element.tail))
        else:
            logging.error(
                _("Expected to load {!r} but got {!r}").format(
                    self._treeTag, self._XMLtree.tag))

    # end of __load

    def __validate(self):
        """
        Check/validate the loaded data.
        """
        assert self._XMLtree

        uniqueDict = {}
        for elementName in self._uniqueElements:
            uniqueDict["Element_" + elementName] = []
        for attributeName in self._uniqueAttributes:
            uniqueDict["Attribute_" + attributeName] = []

        expectedID = 1
        for j, element in enumerate(self._XMLtree):
            if element.tag == self._mainElementTag:
                BibleOrgSysGlobals.checkXMLNoText(element, element.tag)
                BibleOrgSysGlobals.checkXMLNoTail(element, element.tag)
                if not self._compulsoryAttributes and not self._optionalAttributes:
                    BibleOrgSysGlobals.checkXMLNoAttributes(
                        element, element.tag)
                if not self._compulsoryElements and not self._optionalElements:
                    BibleOrgSysGlobals.checkXMLNoSubelements(
                        element, element.tag)

                # Check compulsory attributes on this main element
                for attributeName in self._compulsoryAttributes:
                    attributeValue = element.get(attributeName)
                    if attributeValue is None:
                        logging.error(
                            _("Compulsory {!r} attribute is missing from {} element in record {}"
                              ).format(attributeName, element.tag, j))
                    if not attributeValue:
                        logging.warning(
                            _("Compulsory {!r} attribute is blank on {} element in record {}"
                              ).format(attributeName, element.tag, j))

                # Check optional attributes on this main element
                for attributeName in self._optionalAttributes:
                    attributeValue = element.get(attributeName)
                    if attributeValue is not None:
                        if not attributeValue:
                            logging.warning(
                                _("Optional {!r} attribute is blank on {} element in record {}"
                                  ).format(attributeName, element.tag, j))

                # Check for unexpected additional attributes on this main element
                for attributeName in element.keys():
                    attributeValue = element.get(attributeName)
                    if attributeName not in self._compulsoryAttributes and attributeName not in self._optionalAttributes:
                        logging.warning(
                            _("Additional {!r} attribute ({!r}) found on {} element in record {}"
                              ).format(attributeName, attributeValue,
                                       element.tag, j))

                # Check the attributes that must contain unique information (in that particular field -- doesn't check across different attributes)
                for attributeName in self._uniqueAttributes:
                    attributeValue = element.get(attributeName)
                    if attributeValue is not None:
                        if attributeValue in uniqueDict["Attribute_" +
                                                        attributeName]:
                            logging.error(
                                _("Found {!r} data repeated in {!r} field on {} element in record {}"
                                  ).format(attributeValue, attributeName,
                                           element.tag, j))
                        uniqueDict["Attribute_" +
                                   attributeName].append(attributeValue)

                # Get the marker to use as a record ID
                marker = element.find("marker").text

                # Check compulsory elements
                for elementName in self._compulsoryElements:
                    if element.find(elementName) is None:
                        logging.error(
                            _("Compulsory {!r} element is missing in record with marker {!r} (record {})"
                              ).format(elementName, marker, j))
                    elif not element.find(elementName).text:
                        logging.warning(
                            _("Compulsory {!r} element is blank in record with marker {!r} (record {})"
                              ).format(elementName, marker, j))

                # Check optional elements
                for elementName in self._optionalElements:
                    if element.find(elementName) is not None:
                        if not element.find(elementName).text:
                            logging.warning(
                                _("Optional {!r} element is blank in record with marker {!r} (record {})"
                                  ).format(elementName, marker, j))

                # Check for unexpected additional elements
                for subelement in element:
                    if subelement.tag not in self._compulsoryElements and subelement.tag not in self._optionalElements:
                        logging.warning(
                            _("Additional {!r} element ({!r}) found in record with marker {!r} (record {})"
                              ).format(subelement.tag, subelement.text, marker,
                                       j))

                # Check the elements that must contain unique information (in that particular element -- doesn't check across different elements)
                for elementName in self._uniqueElements:
                    if element.find(elementName) is not None:
                        text = element.find(elementName).text
                        if text in uniqueDict["Element_" + elementName]:
                            logging.error(
                                _("Found {!r} data repeated in {!r} element in record with marker {!r} (record {})"
                                  ).format(text, elementName, marker, j))
                        uniqueDict["Element_" + elementName].append(text)
            else:
                logging.warning(
                    _("Unexpected element: {} in record {}").format(
                        element.tag, j))
            if element.tail is not None and element.tail.strip():
                logging.error(
                    _("Unexpected {!r} tail data after {} element in record {}"
                      ).format(element.tail, element.tag, j))
        if self._XMLtree.tail is not None and self._XMLtree.tail.strip():
            logging.error(
                _("Unexpected {!r} tail data after {} element").format(
                    self._XMLtree.tail, self._XMLtree.tag))

    # end of __validate

    def __str__(self):
        """
        This method returns the string representation of a Bible book code.

        @return: the name of a Bible object formatted as a string
        @rtype: string
        """
        indent = 2
        result = "USFMMarkersConverter object"
        if self.titleString:
            result += ('\n' if result else
                       '') + ' ' * indent + _("Title: {}").format(
                           self.titleString)
        if self.ProgVersion:
            result += ('\n' if result else
                       '') + ' ' * indent + _("Version: {}").format(
                           self.ProgVersion)
        if self.dateString:
            result += ('\n' if result else ''
                       ) + ' ' * indent + _("Date: {}").format(self.dateString)
        if self._XMLtree is not None:
            result += ('\n' if result else
                       '') + ' ' * indent + _("Number of entries = {}").format(
                           len(self._XMLtree))
        return result

    # end of __str__

    def __len__(self):
        """ Returns the number of SFM markers loaded. """
        return len(self._XMLtree)

    # end of __len__

    def importDataToPython(self):
        """
        Loads (and pivots) the data (not including the header) into suitable Python containers to use in a Python program.
        (Of course, you can just use the elementTree in self._XMLtree if you prefer.)
        """
        assert self._XMLtree
        if self.__DataDicts:  # We've already done an import/restructuring -- no need to repeat it
            return self.__DataDicts

        # Load and validate entries and create the dictionaries and lists
        # Note that the combined lists include the numbered markers, e.g., s as well as s1, s2, …
        rawMarkerDict, numberedMarkerList, combinedMarkerDict, = OrderedDict(
        ), [], {}
        conversionDict, backConversionDict = {}, {}
        newlineMarkersList, numberedNewlineMarkersList, combinedNewlineMarkersList = [], [], []
        internalMarkersList, numberedInternalMarkersList, combinedInternalMarkersList = [], [], []
        noteMarkersList, deprecatedMarkersList = [], []
        for element in self._XMLtree:
            # Get the required information out of the tree for this element
            # Start with the compulsory elements
            nameEnglish = element.find(
                'nameEnglish'
            ).text  # This name is really just a comment element
            marker = element.find('marker').text
            if marker.lower() != marker:
                logging.error(
                    _("Marker {!r} should be lower case").format(marker))
            compulsory = element.find('compulsory').text
            if compulsory not in ('Yes', 'No'):
                logging.error(
                    _("Unexpected {!r} compulsory field for marker {!r}").
                    format(compulsory, marker))
            level = element.find('level').text
            compulsoryFlag = compulsory == 'Yes'
            if level == 'Newline':
                newlineMarkersList.append(marker)
                combinedNewlineMarkersList.append(marker)
            elif level == 'Internal':
                internalMarkersList.append(marker)
            elif level == 'Note':
                noteMarkersList.append(marker)
            else:
                logging.error(
                    _("Unexpected {!r} level field for marker {!r}").format(
                        level, marker))
            numberable = element.find('numberable').text
            if numberable not in ('Yes', 'No'):
                logging.error(
                    _("Unexpected {!r} numberable field for marker {!r}").
                    format(numberable, marker))
            numberableFlag = numberable == "Yes"
            if numberableFlag and level == "Character":
                logging.error(
                    _("Unexpected {!r} numberable field for character marker {!r}"
                      ).format(numberable, marker))
            nests = element.find("nests").text
            if nests not in ('Yes', 'No'):
                logging.error(
                    _("Unexpected {!r} nests field for marker {!r}").format(
                        nests, marker))
            nestsFlag = nests == 'Yes'
            hasContent = element.find('hasContent').text
            if hasContent not in ('Always', 'Never', 'Sometimes'):
                logging.error(
                    _("Unexpected {!r} hasContent field for marker {!r}").
                    format(hasContent, marker))
            printed = element.find('printed').text
            if printed not in ('Yes', 'No'):
                logging.error(
                    _("Unexpected {!r} printed field for marker {!r}").format(
                        printed, marker))
            printedFlag = printed == 'Yes'
            closed = element.find('closed').text
            if closed not in ('No', 'Always', 'Optional'):
                logging.error(
                    _("Unexpected {!r} closed field for marker {!r}").format(
                        closed, marker))
            occursIn = element.find('occursIn').text
            if occursIn not in ('Header', 'Introduction', 'Numbering', 'Text',
                                'Canonical Text', 'Poetry', 'Text, Poetry',
                                'Acrostic verse', 'Table row', 'Footnote',
                                'Cross-reference', 'Front and back matter'):
                logging.error(
                    _("Unexpected {!r} occursIn field for marker {!r}").format(
                        occursIn, marker))
            deprecated = element.find('deprecated').text
            if deprecated not in ('Yes', 'No'):
                logging.error(
                    _("Unexpected {!r} deprecated field for marker {!r}").
                    format(deprecated, marker))
            deprecatedFlag = deprecated == 'Yes'

            # The optional elements are set to None if they don't exist
            #closed = None if element.find("closed") is None else element.find("closed").text
            #if closed is not None and closed not in ( "No", "Always", "Optional" ): logging.error( _("Unexpected {!r} closed field for marker {!r}").format( closed, marker ) )
            #if level=="Character" and closed is None: logging.error( _("Entry for character marker {!r} doesn't have a \"closed\" field").format( marker ) )
            description = None if element.find(
                "description") is None else element.find("description").text
            if description is not None: assert description

            # Now put it into my dictionaries and lists for easy access
            #   The marker is lowercase by definition
            if "marker" in self._uniqueElements:
                assert marker not in rawMarkerDict  # Shouldn't be any duplicates
            rawMarkerDict[marker] = {
                "compulsoryFlag": compulsoryFlag,
                "level": level,
                "numberableFlag": numberableFlag,
                "nestsFlag": nestsFlag,
                "hasContent": hasContent,
                "occursIn": occursIn,
                "printedFlag": printedFlag,
                "closed": closed,
                "deprecatedFlag": deprecatedFlag,
                "description": description,
                "nameEnglish": nameEnglish
            }
            combinedMarkerDict[marker] = marker
            if numberableFlag:  # We have some extra work to do
                conversionDict[marker] = marker + '1'
                for suffix in '1234':  # These are the suffix digits that we allow
                    numberedMarker = marker + suffix
                    backConversionDict[numberedMarker] = marker
                    numberedMarkerList.append(numberedMarker)
                    combinedMarkerDict[numberedMarker] = marker
                    if marker in newlineMarkersList:
                        numberedNewlineMarkersList.append(numberedMarker)
                        combinedNewlineMarkersList.append(numberedMarker)
                    else:
                        numberedInternalMarkersList.append(numberedMarker)
                        combinedInternalMarkersList.append(numberedMarker)
                    if deprecatedFlag:
                        deprecatedMarkersList.append(numberedMarker)
            else:  # it's not numberable
                numberedMarkerList.append(marker)
                if marker in newlineMarkersList:
                    numberedNewlineMarkersList.append(marker)
                else:
                    numberedInternalMarkersList.append(marker)
                if deprecatedFlag: deprecatedMarkersList.append(marker)

        #print( conversionDict ); print( backConversionDict )
        #print( "newlineMarkersList", len(newlineMarkersList), newlineMarkersList )
        #print( "numberedNewlineMarkersList", len(numberedNewlineMarkersList), numberedNewlineMarkersList )
        #print( "combinedNewlineMarkersList", len(combinedNewlineMarkersList), combinedNewlineMarkersList )
        #print( "internalMarkersList", len(internalMarkersList), internalMarkersList )
        #print( "deprecatedMarkersList", len(deprecatedMarkersList), deprecatedMarkersList )
        self.__DataDicts = {
            "rawMarkerDict": rawMarkerDict,
            "numberedMarkerList": numberedMarkerList,
            "combinedMarkerDict": combinedMarkerDict,
            "conversionDict": conversionDict,
            "backConversionDict": backConversionDict,
            "newlineMarkersList": newlineMarkersList,
            "numberedNewlineMarkersList": numberedNewlineMarkersList,
            "combinedNewlineMarkersList": combinedNewlineMarkersList,
            "internalMarkersList": internalMarkersList,
            "numberedInternalMarkersList": numberedInternalMarkersList,
            "combinedInternalMarkersList": combinedInternalMarkersList,
            "noteMarkersList": noteMarkersList,
            "deprecatedMarkersList": deprecatedMarkersList,
        }
        return self.__DataDicts  # Just delete any of the dictionaries that you don't need

    # end of importDataToPython

    def pickle(self, filepath=None):
        """
        Writes the information tables to a .pickle file that can be easily loaded into a Python3 program.
        """
        import pickle

        assert self._XMLtree
        self.importDataToPython()
        assert self.__DataDicts

        if not filepath:
            folder = os.path.join(
                os.path.split(self.__XMLFilepath)[0], "DerivedFiles/")
            if not os.path.exists(folder): os.mkdir(folder)
            filepath = os.path.join(folder,
                                    self._filenameBase + "_Tables.pickle")
        if BibleOrgSysGlobals.verbosityLevel > 1:
            print(_("Exporting to {}…").format(filepath))
        with open(filepath, 'wb') as myFile:
            pickle.dump(self.__DataDicts, myFile)

    # end of pickle

    def exportDataToPython(self, filepath=None):
        """
        Writes the information tables to a .py file that can be cut and pasted into a Python program.
        """
        def exportPythonDict(theFile, theDict, dictName, keyComment,
                             fieldsComment):
            """Exports theDict to theFile."""
            assert isinstance(theDict, dict)
            for dictKey in theDict.keys():  # Have to iterate this :(
                fieldsCount = len(theDict[dictKey]) if isinstance(
                    theDict[dictKey], (tuple, dict, list)) else 1
                break  # We only check the first (random) entry we get
            theFile.write(
                "{} = {{\n  # Key is {}\n  # Fields ({}) are: {}\n".format(
                    dictName, keyComment, fieldsCount, fieldsComment))
            for dictKey in sorted(theDict.keys()):
                theFile.write('  {}: {},\n'.format(repr(dictKey),
                                                   repr(theDict[dictKey])))
            theFile.write("}}\n# end of {} ({} entries)\n\n".format(
                dictName, len(theDict)))

        # end of exportPythonDict

        def exportPythonOrderedDict(theFile, theDict, dictName, keyComment,
                                    fieldsComment):
            """Exports theDict to theFile."""
            assert isinstance(theDict, OrderedDict)
            for dictKey in theDict.keys():  # Have to iterate this :(
                fieldsCount = len(theDict[dictKey]) if isinstance(
                    theDict[dictKey], (tuple, dict, list)) else 1
                break  # We only check the first (random) entry we get
            theFile.write(
                '{} = OrderedDict([\n    # Key is {}\n    # Fields ({}) are: {}\n'
                .format(dictName, keyComment, fieldsCount, fieldsComment))
            for dictKey in theDict.keys():
                theFile.write('  ({}, {}),\n'.format(repr(dictKey),
                                                     repr(theDict[dictKey])))
            theFile.write("]), # end of {} ({} entries)\n\n".format(
                dictName, len(theDict)))

        # end of exportPythonDict

        def exportPythonList(theFile, theList, listName, dummy, fieldsComment):
            """Exports theList to theFile."""
            assert isinstance(theList, list)
            fieldsCount = len(theList[0]) if isinstance(
                theList[0], (tuple, dict, list)) else 1
            theFile.write('{} = [\n    # Fields ({}) are: {}\n'.format(
                listName, fieldsCount, fieldsComment))
            for j, entry in enumerate(theList):
                theFile.write('  {}, # {}\n'.format(repr(entry), j))
            theFile.write("], # end of {} ({} entries)\n\n".format(
                listName, len(theList)))

        # end of exportPythonList

        assert self._XMLtree
        self.importDataToPython()
        assert self.__DataDicts

        if not filepath:
            filepath = os.path.join(
                os.path.split(self.__XMLFilepath)[0], "DerivedFiles",
                self._filenameBase + "_Tables.py")
        if BibleOrgSysGlobals.verbosityLevel > 1:
            print(_("Exporting to {}…").format(filepath))
        with open(filepath, 'wt', encoding='utf-8') as myFile:
            myFile.write("# {}\n#\n".format(filepath))
            myFile.write(
                "# This UTF-8 file was automatically generated by USFMMarkers.py V{} on {}\n#\n"
                .format(ProgVersion, datetime.now()))
            if self.titleString:
                myFile.write("# {} data\n".format(self.titleString))
            if self.ProgVersion:
                myFile.write("#  Version: {}\n".format(self.ProgVersion))
            if self.dateString:
                myFile.write("#  Date: {}\n#\n".format(self.dateString))
            myFile.write(
                "#   {} {} loaded from the original XML file.\n#\n\n".format(
                    len(self._XMLtree), self._treeTag))
            myFile.write("from collections import OrderedDict\n\n")
            dictInfo = {
                "rawMarkerDict":
                (exportPythonOrderedDict,
                 "rawMarker (in the original XML order)", "specified"),
                "numberedMarkerList":
                (exportPythonList, "marker", "rawMarker"),
                "combinedMarkerDict":
                (exportPythonDict, "marker", "rawMarker"),
                "conversionDict":
                (exportPythonDict, "rawMarker", "numberedMarker"),
                "backConversionDict": (exportPythonDict, "numberedMarker",
                                       "rawMarker"),
                "newlineMarkersList": (exportPythonList, "", "rawMarker"),
                "numberedNewlineMarkersList": (exportPythonList, "",
                                               "rawMarker"),
                "combinedNewlineMarkersList": (exportPythonList, "",
                                               "rawMarker"),
                "internalMarkersList": (exportPythonList, "", "rawMarker"),
                "numberedInternalMarkersList": (exportPythonList, "",
                                                "rawMarker"),
                "combinedInternalMarkersList": (exportPythonList, "",
                                                "rawMarker"),
                "noteMarkersList": (exportPythonList, "", "rawMarker"),
                "deprecatedMarkersList": (exportPythonList, "", "rawMarker")
            }
            for dictName in self.__DataDicts:
                exportFunction, keyComment, fieldsComment = dictInfo[dictName]
                exportFunction(myFile, self.__DataDicts[dictName], dictName,
                               keyComment, fieldsComment)
            myFile.write("# end of {}".format(os.path.basename(filepath)))

    # end of exportDataToPython

    def exportDataToJSON(self, filepath=None):
        """
        Writes the information tables to a .json file that can be easily loaded into a Java program.

        See http://en.wikipedia.org/wiki/JSON.
        """
        import json

        assert self._XMLtree
        self.importDataToPython()
        assert self.__DataDicts

        if not filepath:
            filepath = os.path.join(
                os.path.split(self.__XMLFilepath)[0], "DerivedFiles",
                self._filenameBase + "_Tables.json")
        if BibleOrgSysGlobals.verbosityLevel > 1:
            print(_("Exporting to {}…").format(filepath))
        with open(filepath, 'wt', encoding='utf-8') as myFile:
            json.dump(self.__DataDicts, myFile, indent=2)

    # end of exportDataToJSON

    def exportDataToC(self, filepath=None):
        """
        Writes the information tables to a .h and .c files that can be included in c and c++ programs.

        NOTE: The (optional) filepath should not have the file extension specified -- this is added automatically.
        """
        def exportPythonDict(hFile, cFile, theDict, dictName, sortedBy,
                             structure):
            """ Exports theDict to the .h and .c files. """
            def convertEntry(entry):
                """ Convert special characters in an entry… """
                result = ""
                if isinstance(entry, tuple):
                    for field in entry:
                        if result: result += ", "  # Separate the fields
                        if field is None: result += '""'
                        elif isinstance(field, str):
                            result += '"' + str(field).replace('"',
                                                               '\\"') + '"'
                        elif isinstance(field, int):
                            result += str(field)
                        else:
                            logging.error(
                                _("Cannot convert unknown field type {!r} in entry {!r}"
                                  ).format(field, entry))
                elif isinstance(entry, dict):
                    for key in sorted(entry.keys()):
                        field = entry[key]
                        if result: result += ", "  # Separate the fields
                        if field is None: result += '""'
                        elif isinstance(field, str):
                            result += '"' + str(field).replace('"',
                                                               '\\"') + '"'
                        elif isinstance(field, int):
                            result += str(field)
                        else:
                            logging.error(
                                _("Cannot convert unknown field type {!r} in entry {!r}"
                                  ).format(field, entry))
                else:
                    logging.error(
                        _("Can't handle this type of entry yet: {}").format(
                            repr(entry)))
                return result

            # end of convertEntry

            for dictKey in theDict.keys():  # Have to iterate this :(
                fieldsCount = len(
                    theDict[dictKey]
                ) + 1  # Add one since we include the key in the count
                break  # We only check the first (random) entry we get

            #hFile.write( "typedef struct {}EntryStruct { {} } {}Entry;\n\n".format( dictName, structure, dictName ) )
            hFile.write("typedef struct {}EntryStruct {{\n".format(dictName))
            for declaration in structure.split(';'):
                adjDeclaration = declaration.strip()
                if adjDeclaration:
                    hFile.write("    {};\n".format(adjDeclaration))
            hFile.write("}} {}Entry;\n\n".format(dictName))

            cFile.write(
                "const static {}Entry\n {}[{}] = {{\n  // Fields ({}) are {}\n  // Sorted by {}\n"
                .format(dictName, dictName, len(theDict), fieldsCount,
                        structure, sortedBy))
            for dictKey in sorted(theDict.keys()):
                if isinstance(dictKey, str):
                    cFile.write("  {{\"{}\", {}}},\n".format(
                        dictKey, convertEntry(theDict[dictKey])))
                elif isinstance(dictKey, int):
                    cFile.write("  {{{}, {}}},\n".format(
                        dictKey, convertEntry(theDict[dictKey])))
                else:
                    logging.error(
                        _("Can't handle this type of key data yet: {}").format(
                            dictKey))
            cFile.write("]}}; // {} ({} entries)\n\n".format(
                dictName, len(theDict)))

        # end of exportPythonDict

        assert self._XMLtree
        self.importDataToPython()
        assert self.__DataDicts

        raise Exception("C export not written yet, sorry.")
        if not filepath:
            filepath = os.path.join(
                os.path.split(self.__XMLFilepath)[0], "DerivedFiles",
                self._filenameBase + "_Tables")
        hFilepath = filepath + '.h'
        cFilepath = filepath + '.c'
        if BibleOrgSysGlobals.verbosityLevel > 1:
            print(_("Exporting to {}…").format(
                cFilepath))  # Don't bother telling them about the .h file
        ifdefName = self._filenameBase.upper() + "_Tables_h"

        with open( hFilepath, 'wt', encoding='utf-8' ) as myHFile, \
             open( cFilepath, 'wt', encoding='utf-8' ) as myCFile:
            myHFile.write("// {}\n//\n".format(hFilepath))
            myCFile.write("// {}\n//\n".format(cFilepath))
            lines = "// This UTF-8 file was automatically generated by USFMMarkers.py V{} on {}\n//\n".format(
                ProgVersion, datetime.now())
            myHFile.write(lines)
            myCFile.write(lines)
            if self.titleString:
                lines = "// {} data\n".format(self.titleString)
                myHFile.write(lines)
                myCFile.write(lines)
            if self.ProgVersion:
                lines = "//  Version: {}\n".format(self.ProgVersion)
                myHFile.write(lines)
                myCFile.write(lines)
            if self.dateString:
                lines = "//  Date: {}\n//\n".format(self.dateString)
                myHFile.write(lines)
                myCFile.write(lines)
            myCFile.write(
                "//   {} {} loaded from the original XML file.\n//\n\n".format(
                    len(self._XMLtree), self._treeTag))
            myHFile.write("\n#ifndef {}\n#define {}\n\n".format(
                ifdefName, ifdefName))
            myCFile.write('#include "{}"\n\n'.format(
                os.path.basename(hFilepath)))

            CHAR = "const unsigned char"
            BYTE = "const int"
            dictInfo = {
                "referenceNumberDict":
                ("referenceNumber (integer 1..255)",
                 "{} referenceNumber; {}* ByzantineAbbreviation; {}* CCELNumberString; {}* NETBibleAbbreviation; {}* OSISAbbreviation; {} ParatextAbbreviation[3+1]; {} ParatextNumberString[2+1]; {}* SBLAbbreviation; {}* SwordAbbreviation; {}* nameEnglish; {}* numExpectedChapters; {}* possibleAlternativeBooks; {} marker[3+1];"
                 .format(BYTE, CHAR, CHAR, CHAR, CHAR, CHAR, CHAR, CHAR, CHAR,
                         CHAR, CHAR, CHAR, CHAR)),
                "rawMarkerDict":
                ("marker",
                 "{} marker[3+1]; {}* ByzantineAbbreviation; {}* CCELNumberString; {} referenceNumber; {}* NETBibleAbbreviation; {}* OSISAbbreviation; {} ParatextAbbreviation[3+1]; {} ParatextNumberString[2+1]; {}* SBLAbbreviation; {}* SwordAbbreviation; {}* nameEnglish; {}* numExpectedChapters; {}* possibleAlternativeBooks;"
                 .format(CHAR, CHAR, CHAR, BYTE, CHAR, CHAR, CHAR, CHAR, CHAR,
                         CHAR, CHAR, CHAR, CHAR)),
                "CCELDict":
                ("CCELNumberString",
                 "{}* CCELNumberString; {} referenceNumber; {} marker[3+1];".
                 format(CHAR, BYTE, CHAR)),
                "SBLDict":
                ("SBLAbbreviation",
                 "{}* SBLAbbreviation; {} referenceNumber; {} marker[3+1];".
                 format(CHAR, BYTE, CHAR)),
                "EnglishNameDict":
                ("nameEnglish",
                 "{}* nameEnglish; {} referenceNumber; {} marker[3+1];".format(
                     CHAR, BYTE, CHAR))
            }

            for dictName, dictData in self.__DataDicts.items():
                exportPythonDict(myHFile, myCFile, dictData, dictName,
                                 dictInfo[dictName][0], dictInfo[dictName][1])

            myHFile.write("#endif // {}\n\n".format(ifdefName))
            myHFile.write("// end of {}".format(os.path.basename(hFilepath)))
            myCFile.write("// end of {}".format(os.path.basename(cFilepath)))
Example #41
0
 def _tree(self):
     try:
         return ElementTree(file=self.xml)
     except IOError as e:
         raise DataFileNotAvailableException(e)
Example #42
0
def createXML(sources, startFrame, endFrame, fps, timecode, audioRate,
              ardourBasename, audiosFolder):
    '''Creates full Ardour XML to be written to a file'''
    global idCounter
    sources, repeated, tracks, idCounter = getAudioTimeline(audioRate, fps)
    tracks = sorted(set(tracks))[::-1]
    sampleFormat = checkSampleFormat()
    ardourStart = toSamples((startFrame - 1), audioRate, fps)
    ardourEnd = toSamples((endFrame - 1), audioRate, fps)

    ######## ------------------------------------------------------------------
    ######## STATIC XML SECTIONS
    ######## ------------------------------------------------------------------

    Session = Element("Session")  # XML root = Session
    tree = ElementTree(Session)

    # Create Session Elements + Attributes
    xmlSections = [
        "Config", "Metadata", "Sources", "Regions", "Locations", "Bundles",
        "Routes", "Playlists", "UnusedPlaylists", "RouteGroups", "Click",
        "Speakers", "TempoMap", "ControlProtocols", "Extra"
    ]

    for section in xmlSections:
        Session.append(Element(section))

    # Create Option, IO, Tempo and Meter + Attributes
    for counter in range(
            valLength(atOption(audiosFolder, sampleFormat, timecode))):
        Option = SubElement(Session[0], "Option")  # Session > Config > Option
        createSubElementsMulti(Option,
                               atOption(audiosFolder, sampleFormat, timecode),
                               counter)

    Location = SubElement(Session[4],
                          "Location")  # Session > Locations > Location
    IO = SubElement(Session[10], "IO")  # Session > Click > IO
    Tempo = SubElement(Session[12], "Tempo")  # Session > TempoMap > Tempo
    Meter = SubElement(Session[12], "Meter")  # Session > TempoMap > Meter

    createSubElements(Session, atSession(audioRate, ardourBasename, idCounter))
    createSubElements(Location, atLocation(ardourStart, ardourEnd, idCounter))
    idCounter += 1
    createSubElements(IO, atIO(idCounter))
    idCounter += 1
    createSubElements(Tempo, atTempo())
    createSubElements(Meter, atMeter())

    Port = ""
    for counter in range(valLength(atPort())):
        Port = SubElement(IO, "Port")  # Session > Click > IO > Port
        createSubElementsMulti(Port, atPort(), counter)

    ######## ------------------------------------------------------------------
    ######## DYNAMIC XML SECTIONS
    ######## ------------------------------------------------------------------

    # create sources and sources' regions
    for source in sources:
        createAudioSources(Session, source)

    # create another sources' entry for stereo files
    stereoSources = []
    for source in sources:
        if (source['channels'] == 1):
            source['id'] = int(source['id'] + idCounter)
            createAudioSources(Session, source, 1)
            stereoSources.append(source)
            idCounter += 1

    # create playlists (tracks)
    for track in tracks:
        createPlaylists(Session, idCounter, track)

    # correct reference to master-source-0 and source-0 in repeated audios
    for rep in repeated:
        for sour in sources:
            if (rep['name'] == sour['name']):
                rep['sourceID'] = sour['id']

    # create playlists regions (timeline)
    for audio in (sources + repeated):
        track = tracks.index(audio['track'])
        if (audio['channels'] == 0):
            createPlaylistRegions(Session, idCounter, audio, track)
        else:
            for stereos in stereoSources:
                if (audio['name'] == stereos['name']):
                    audio['master-source-1'] = stereos['id']
                    audio['source-1'] = stereos['id']
                    createPlaylistRegions(Session, idCounter, audio, track)

    Session.set('id-counter', str(idCounter))

    return Session, sources
Example #43
0
def get_module_name(pom_file):
  tree = ElementTree()
  tree.parse(pom_file)
  return tree.findtext("./{%s}artifactId" % maven_pom_xml_namespace)
Example #44
0
def _run_test(parser, args, failure_result_file, output_handle):
    output = ''

    def log(msg, **kwargs):
        print(msg, **kwargs)
        if output_handle:
            output_handle.write((msg + '\n').encode())
            output_handle.flush()

    env = None
    if args.env or args.append_env:
        env = dict(os.environ)
        if args.env:
            log('-- run_test.py: extra environment variables:')
            previous_key = None
            updated_env_keys = set()
            for env_str in args.env:
                # if CMake has split a single value containing semicolons
                # into multiple arguments they are put back together here
                if previous_key and '=' not in env_str:
                    key = previous_key
                    value = env[key] + ';' + env_str
                else:
                    key, value = separate_env_vars(env_str, 'env', parser)
                env[key] = value
                updated_env_keys.add(key)
                previous_key = key
            for key in sorted(updated_env_keys):
                log(' - {0}={1}'.format(key, env[key]))
        if args.append_env:
            log('-- run_test.py: extra environment variables to append:')
            previous_key = None
            for env_str in args.append_env:
                # if CMake has split a single value containing semicolons
                # into multiple arguments they are put back together here
                if previous_key and '=' not in env_str:
                    key = previous_key
                    value = env[key] + ';' + env_str
                    log(' - {0}+={1}'.format(key, env_str))
                else:
                    key, value = separate_env_vars(env_str, 'append-env', parser)
                    log(' - {0}+={1}'.format(key, value))
                if key not in env:
                    env[key] = ''
                if not env[key].endswith(os.pathsep):
                    env[key] += os.pathsep
                env[key] += value
                previous_key = key

    log("-- run_test.py: invoking following command in '%s':\n - %s" %
        (os.getcwd(), ' '.join(args.command)))
    if output_handle:
        output_handle.write('\n'.encode())
        output_handle.flush()

    encodings = ['utf-8']
    if locale.getpreferredencoding(False) not in encodings:
        encodings.append(locale.getpreferredencoding(False))

    try:
        proc = subprocess.Popen(
            args.command, stdout=subprocess.PIPE, stderr=subprocess.STDOUT,
            env=env)
        while True:
            line = proc.stdout.readline()
            if not line:
                break
            for i, encoding in enumerate(encodings):
                try:
                    decoded_line = line.decode(encoding)
                except UnicodeDecodeError:
                    if i == len(encodings) - 1:
                        raise
                else:
                    break
            print(decoded_line, end='')
            output += decoded_line
            if output_handle:
                output_handle.write(decoded_line.encode())
                output_handle.flush()
        proc.wait()
        rc = proc.returncode
        if output_handle:
            # separate progress of this script from subprocess output
            output_handle.write('\n\n'.encode())
        log('-- run_test.py: return code ' + str(rc), file=sys.stderr if rc else sys.stdout)
    except Exception as e:
        if output_handle:
            # separate subprocess output from progress of this script
            output_handle.write('\n\n'.encode())
        log('-- run_test.py: invocation failed: ' + str(e), file=sys.stderr)
        output += str(e)
        rc = 1

    if not rc and args.generate_result_on_success:
        # generate result file with one passed test
        # if it was expected that no result file was generated
        # and the command returned with code zero
        log("-- run_test.py: generate result file '%s' with successful test" % args.result_file)
        success_result_file = _generate_result(args.result_file)
        with open(args.result_file, 'w') as h:
            h.write(success_result_file)

    elif os.path.exists(args.result_file):
        # check if content of result file has actually changed
        with open(args.result_file, 'r', encoding='utf-8') as h:
            content = h.read()

        if content == failure_result_file:
            log("-- run_test.py: generate result file '%s' with failed test" % args.result_file,
                file=sys.stderr)
            # regenerate result file to include output / exception of the invoked command
            failure_result_file = _generate_result(
                args.result_file,
                failure_message='The test did not generate a result file:\n\n' + output)
            with open(args.result_file, 'w') as h:
                h.write(failure_result_file)
        else:
            # prefix classname attributes
            if args.result_file.endswith('.gtest.xml') and args.package_name:
                prefix = ' classname="'
                pattern = '%s(?!%s)' % (prefix, args.package_name)
                new_content = re.sub(
                    pattern, prefix + args.package_name + '.', content)
                if new_content != content:
                    log(
                        '-- run_test.py: inject classname prefix into gtest '
                        "result file '%s'" % args.result_file)
                    with open(args.result_file, 'w') as h:
                        h.write(new_content)

        log("-- run_test.py: verify result file '%s'" % args.result_file)
        # if result file exists ensure that it contains valid xml
        # unit test suites are not good about screening out
        # illegal unicode characters
        tree = None
        try:
            tree = ElementTree(None, args.result_file)
        except ParseError as e:
            modified = _tidy_xml(args.result_file)
            if not modified:
                log("Invalid XML in result file '%s': %s" %
                    (args.result_file, str(e)), file=sys.stderr)
            else:
                try:
                    tree = ElementTree(None, args.result_file)
                except ParseError as e:
                    log("Invalid XML in result file '%s' (even after trying to tidy it): %s" %
                        (args.result_file, str(e)), file=sys.stderr)

        if not tree:
            # set error code when result file is not parsable
            rc = 1
        else:
            # set error code when result file contains errors or failures
            root = tree.getroot()
            num_errors = int(root.attrib.get('errors', 0))
            num_failures = int(root.attrib.get('failures', 0))
            if num_errors or num_failures:
                rc = 1

    # ensure that a result file exists at the end
    if not rc and not os.path.exists(args.result_file):
        log('-- run_test.py: override return code since no result file was '
            'generated', file=sys.stderr)
        rc = 1

    return rc
Example #45
0
def scheme_to_etree(scheme, data_format="literal", pickle_fallback=False):
    """
    Return an `xml.etree.ElementTree` representation of the `scheme.
    """
    builder = TreeBuilder(element_factory=Element)
    builder.start(
        "scheme", {
            "version": "2.0",
            "title": scheme.title or "",
            "description": scheme.description or ""
        })

    # Nodes
    node_ids = defaultdict(count().__next__)
    builder.start("nodes", {})
    for node in scheme.nodes:  # type: SchemeNode
        desc = node.description
        attrs = {
            "id": str(node_ids[node]),
            "name": desc.name,
            "qualified_name": desc.qualified_name,
            "project_name": desc.project_name or "",
            "version": desc.version or "",
            "title": node.title,
        }
        if node.position is not None:
            attrs["position"] = str(node.position)

        if type(node) is not SchemeNode:
            attrs["scheme_node_type"] = "%s.%s" % (type(node).__name__,
                                                   type(node).__module__)
        builder.start("node", attrs)
        builder.end("node")

    builder.end("nodes")

    # Links
    link_ids = defaultdict(count().__next__)
    builder.start("links", {})
    for link in scheme.links:  # type: SchemeLink
        source = link.source_node
        sink = link.sink_node
        source_id = node_ids[source]
        sink_id = node_ids[sink]
        attrs = {
            "id": str(link_ids[link]),
            "source_node_id": str(source_id),
            "sink_node_id": str(sink_id),
            "source_channel": link.source_channel.name,
            "sink_channel": link.sink_channel.name,
            "enabled": "true" if link.enabled else "false",
        }
        builder.start("link", attrs)
        builder.end("link")

    builder.end("links")

    # Annotations
    annotation_ids = defaultdict(count().__next__)
    builder.start("annotations", {})
    for annotation in scheme.annotations:
        annot_id = annotation_ids[annotation]
        attrs = {"id": str(annot_id)}
        data = None
        if isinstance(annotation, SchemeTextAnnotation):
            tag = "text"
            attrs.update({"type": annotation.content_type})
            attrs.update({"rect": repr(annotation.rect)})

            # Save the font attributes
            font = annotation.font
            attrs.update({
                "font-family": font.get("family", None),
                "font-size": font.get("size", None)
            })
            attrs = [(key, value) for key, value in attrs.items()
                     if value is not None]
            attrs = dict((key, str(value)) for key, value in attrs)
            data = annotation.content
        elif isinstance(annotation, SchemeArrowAnnotation):
            tag = "arrow"
            attrs.update({
                "start": repr(annotation.start_pos),
                "end": repr(annotation.end_pos),
                "fill": annotation.color
            })
            data = None
        else:
            log.warning("Can't save %r", annotation)
            continue
        builder.start(tag, attrs)
        if data is not None:
            builder.data(data)
        builder.end(tag)

    builder.end("annotations")

    builder.start("thumbnail", {})
    builder.end("thumbnail")

    # Node properties/settings
    builder.start("node_properties", {})
    for node in scheme.nodes:
        data = None
        if node.properties:
            try:
                data, format = dumps(node.properties,
                                     format=data_format,
                                     pickle_fallback=pickle_fallback)
            except Exception:
                log.error("Error serializing properties for node %r",
                          node.title,
                          exc_info=True)
            if data is not None:
                builder.start("properties", {
                    "node_id": str(node_ids[node]),
                    "format": format
                })
                builder.data(data)
                builder.end("properties")

    builder.end("node_properties")
    builder.start("session_state", {})
    builder.start("window_groups", {})

    for g in scheme.window_group_presets():
        builder.start("group", {
            "name": g.name,
            "default": str(g.default).lower()
        })
        for node, data in g.state:
            if node not in node_ids:
                continue
            builder.start("window_state", {"node_id": str(node_ids[node])})
            builder.data(base64.encodebytes(data).decode("ascii"))
            builder.end("window_state")
        builder.end("group")
    builder.end("window_group")
    builder.end("session_state")
    builder.end("scheme")
    root = builder.close()
    tree = ElementTree(root)
    return tree
                    name = SubElement(ob, 'name')
                # name.text = str(bboxes[Row][4])
                    name.text = str(class_dict[(int(bboxes[Row][4]))])
                    pose = SubElement(ob, 'pose')
                    pose.text = 'Unspecified'
                    truncated = SubElement(ob, 'truncated')
                    truncated.text = '0'
                    difficult = SubElement(ob, 'difficult')
                    difficult.text = '0'

                    bndbox = SubElement(ob, 'bndbox')
                    xmin = SubElement(bndbox, 'xmin')
                    xmin.text = str(int(bboxes[Row][0]))
                    ymin = SubElement(bndbox, 'ymin')
                    ymin.text = str(int(bboxes[Row][1]))
                    xmax = SubElement(bndbox, 'xmax')
                    xmax.text = str(int(bboxes[Row][2]))
                    ymax = SubElement(bndbox, 'ymax')
                    ymax.text = str(int(bboxes[Row][3]))
                tree = ElementTree(annotation)
                tree.write(out_xml_path, encoding='utf-8')
                

                # end of save the output xml file

                # save the output img file

                cv2.imwrite(out_image_path, img)
                print('Writing XML:',out_xml_path)
                print('Writing Image:',out_image_path)
Example #47
0
 def load_config(self):
     print('loading ->', self.path)
     self.tree = ET()
     self.tree.parse(self.path)
Example #48
0
def build_appt_xml(appts, maxed_appts=[], write=False):
    '''Takes an array of Appt Objects From Same Regional Center and will write XML file to static docs directory.'''

    maxed_length = len(maxed_appts)

    appts = maxed_appts + appts

    appts_by_client = build_billing_obj(appts, maxed_length=maxed_length)

    invoices = []

    xml_invoice_id = None

    for regional_center_id in appts_by_client:
        regional_center = models.RegionalCenter.query.get(regional_center_id)
        for billing_month in appts_by_client[regional_center_id]:
            total_appts = []
            notes = []
            tai = Element('TAI')
            invoice = ElementTree(element=tai)
            current_month = datetime.datetime.strptime(billing_month,
                                                       '%Y-%m-%d')

            appts_by_rc_by_month = appts_by_client[regional_center_id][
                billing_month]

            for client_id in appts_by_rc_by_month:
                client = models.Client.query.get(client_id)

                for appt_type_id in appts_by_rc_by_month[client_id]:
                    list_of_appts = appts_by_rc_by_month[client_id][
                        appt_type_id]
                    appt_type = models.ApptType.query.get(appt_type_id)

                    if appt_type.name.lower() == 'evaluation':
                        client_auths = client.auths.filter_by(
                            is_eval_only=1).order_by(
                                models.ClientAuth.id).all()
                    else:
                        client_auths = client.auths.filter_by(
                            is_eval_only=0).order_by(
                                models.ClientAuth.id).all()

                    current_auth = None

                    # need to be able to handle auths starting midway through the month
                    # Billing error occurred
                    for auth in client_auths:
                        if current_month >= auth.auth_start_date.replace(
                                day=1) and current_month <= auth.auth_end_date:
                            current_auth = auth

                    if not current_auth:  # add logic to check if billing month is >6 and auth[:2] == billing year [3:]
                        for appt in list_of_appts:
                            note = models.BillingNote()
                            note.note = 'No valid auth for {} as of {}'.format(
                                (client.first_name + ' ' + client.last_name),
                                datetime.datetime.now().strftime('%b %d, %Y'))
                            note.client_appt_id = appt.id
                            notes.append(note)
                        continue

                    invoice_data = SubElement(tai, 'invoicedata')

                    RecType = SubElement(invoice_data, 'RecType')
                    RecType.text = 'D'
                    RCID = SubElement(invoice_data, 'RCID')
                    RCID.text = str(regional_center.rc_id)
                    ATTN = SubElement(invoice_data, 'AttnOnlyFlag')
                    SPNID = SubElement(invoice_data, 'SPNID')
                    SPNID.text = str(client.regional_center.company.vendor_id)
                    UCI = SubElement(invoice_data, 'UCI')
                    UCI.text = str(client.uci_id)
                    lastname = SubElement(invoice_data, 'lastname')
                    lastname.text = client.last_name.upper()
                    firstname = SubElement(invoice_data, 'firstname')
                    firstname.text = client.first_name.upper()
                    auth_number = SubElement(invoice_data, 'AuthNumber')
                    auth_number.text = str(current_auth.auth_id)
                    svc_code = SubElement(invoice_data, 'SVCCode')
                    svc_code.text = str(appt_type.service_code)
                    svcs_code = SubElement(invoice_data, 'SVCSCode')
                    svcs_code.text = appt_type.service_type_code
                    svc_mn_yr = SubElement(invoice_data, 'SVCMnYr')
                    svc_mn_yr.text = current_month.strftime('%Y-%m-%d')
                    industry_type = SubElement(invoice_data, 'IndustryType')
                    wage_amt = SubElement(invoice_data, 'WageAmt')
                    wage_type = SubElement(invoice_data, 'WageType')

                    # Finds if # of Appts is more than Max Appts and truncates those appointments from the array for processing

                    if len(list_of_appts) > current_auth.monthly_visits:
                        unbilled_appts = list_of_appts[current_auth.
                                                       monthly_visits:]
                        list_of_appts = list_of_appts[:current_auth.
                                                      monthly_visits]

                        for unbilled_appt in unbilled_appts:
                            note = models.BillingNote()
                            note.note = 'Max Number of Appts Reached: ' + str(
                                current_auth.monthly_visits
                            ) + ' Not Billing for: ' + ' '.join([
                                unbilled_appt.client.first_name,
                                unbilled_appt.client.last_name
                            ]) + ' on ' + unbilled_appt.start_datetime.strftime(
                                '%b %d, %y')
                            note.client_appt_id = unbilled_appt.id
                            notes.append(note)

                    # Looks to find Duplicate days and moves the second appt to the next open day, starting over if at end of month
                    appt_days = [d.start_datetime.day for d in list_of_appts]

                    new_days = []

                    for i, day in enumerate(appt_days):
                        moved_day = False

                        if current_month.month != list_of_appts[
                                i].start_datetime.month:
                            day = 1
                            moved_day = True
                        # Date should have moved on Eval to start of authorization... why didn't it for Audrielle??
                        if list_of_appts[
                                i].start_datetime < current_auth.auth_start_date:
                            day = current_auth.auth_start_date.day
                            moved_day = True

                        if list_of_appts[
                                i].start_datetime > current_auth.auth_end_date.replace(
                                    hour=23, minute=59):
                            day = current_auth.auth_end_date.day
                            moved_day = True

                        eom = calendar.monthrange(current_month.year,
                                                  current_month.month)[1]
                        if day in new_days:
                            while day in appt_days[i:] or day in new_days:
                                day = (day + 1) % eom
                                if day == 0:
                                    day = eom
                            moved_day = True

                        if moved_day:
                            moved_to_date = list_of_appts[
                                i].start_datetime.replace(day=day)

                            if list_of_appts[
                                    i].start_datetime.month != current_month.month:
                                moved_to_date = moved_to_date.replace(
                                    month=current_month.month)

                            while moved_to_date.weekday() > 4:
                                day = (day + 1) % eom
                                if day == 0:
                                    day = eom
                                moved_to_date = moved_to_date.replace(day=day)

                            moved_to_date_string = moved_to_date.strftime(
                                '%b %d, %Y')

                            note = models.BillingNote()
                            note.note = 'Appt moved from ' + list_of_appts[
                                i].start_datetime.strftime(
                                    '%b %d, %Y'
                                ) + ' to ' + moved_to_date_string
                            note.client_appt_id = list_of_appts[i].id
                            notes.append(note)

                        new_days.append(day)

                    total_appts += list_of_appts

                    appts_total = 0
                    for i in range(1, 32):
                        day = SubElement(invoice_data, 'Day' + str(i))
                        if i in new_days:
                            day.text = '1'
                            appts_total += 1
                    appt_total = SubElement(invoice_data, 'EnteredUnits')
                    appt_total.text = str(appts_total)
                    total_amount = SubElement(invoice_data, 'EnteredAmount')
                    total_amount.text = str(appts_total * appt_type.rate)

            if write and total_appts:
                xml_invoice = models.BillingXml(
                    regional_center_id=regional_center_id,
                    billing_month=current_month)
                xml_invoice.appts = total_appts
                xml_invoice.notes = notes
                db.session.add(xml_invoice)
                db.session.commit()
                file_name = 'invoice_%s_%s_%s.xml' % (
                    regional_center_id, xml_invoice.id, billing_month)

                file_directory_path = os.path.join(
                    os.path.dirname(os.path.realpath(__file__)), '..', 'docs',
                    str(xml_invoice.regional_center.company_id), 'billing/')

                if not os.path.exists(file_directory_path):
                    os.makedirs(file_directory_path)

                file_path = os.path.join(file_directory_path, file_name)
                invoice.write(file_path,
                              xml_declaration=True,
                              encoding='UTF-8')
                xml_invoice.file_name = file_name
                db.session.add(xml_invoice)
                xml_invoice_id = xml_invoice.id
                db.session.commit()

            if write:
                if xml_invoice_id:
                    invoices.append({
                        'invoice': invoice,
                        'notes': notes,
                        'xml_invoice_id': xml_invoice_id
                    })
            else:
                invoices.append({
                    'invoice': invoice,
                    'notes': notes,
                    'xml_invoice_id': xml_invoice_id
                })

    return invoices
Example #49
0
class importOMS_RAI():
    def __init__(self, userProfile, dProject):

        self.domaff_modele = dProject
        self.__filename = ""
        self.fileTree = None

        self.userProfile = userProfile

        # Manejo del log
        self.__logger = protoLog(userProfile.user, userProfile.userTeam, 'RAI')

        # Errors Constants
        self.OK = 0
        self.ERROR_OPEN_FILE = 1
        self.ERR0R_PARSE_XML = 2
        self.OPERATIONAL_ERROR = 3
        self.ADDING_ERROR = 4
        self.ERROR = 5

        # tuples equivalence ( Champs modèle de données RIA, Champs d'OMS )
        self.MODELE = {
            'code': 'nom_modele',
            'idModel': 'idModel',
            'idRef': 'idRef',
        }

        self.ENTITE = {
            'code': 'nom_entite',
            'description': 'description_entite',
            'physicalName': 'physical_name'
        }

        self.ELEMENT_DONNEE = {
            # 'entite'            : 'entite_elem',
            'code': 'nom_element_donnee',
            'alias': 'numero_elem_cn',
            'description': 'description',
        }

        self.ELEMENT_DONNEE_PP = {
            'FORMAT': 'type_de_base',
            'DEFINITION': 'definition',
            'ELEMENTTRANSFORME': 'element_transforme',
            'GABARIT': 'gabarit',
            'ELEMENTTRANSMIS': 'element_transmis',
            'DOMAINEDEVALEURS': 'domaine_valeurs',
            'ENTREEENVIGUEUR': 'date_entree_vigueur',
            'DATEDELADERNIEREMODIFICATION': 'date_derniere_modification',
            'DESCRIPTIONCN': 'consignes_saisie',
            'PRECISIONS': 'pratiques_acceptees',
            'VALIDATIONSSURELEMENT': 'validation_sur_element',
            'VALIDATIONSINTERELEMENT': 'validations_inter_elements',
            'VALIDATIONINTERENREGISTREMENT':
            'validations_inter_enregistrement',
            'REQUISPAR': 'requis_par'
        }

        self.RELATION = {
            # 'entite'            : 'entite_rela1',
            # 'ref'               : 'entite_rela2',
            'code': 'nom_relation',
            'baseConcept': 'tmp_foreign',
            'alias': 'tmp_alias',
            'description': 'description',
            'baseMin': 'baseMin',
            'baseMax': 'baseMax',
            'refMin': 'refMin',
            'refMax': 'refMax',
        }

        self.MODELE_RACCORDEMENT = {
            'code': 'nom_modele_raccordement',
            'source': 'tmp_modrac1',
            'destination': 'tmp_modrac2',
        }

        self.RACCORDEMENT = {
            # modrac_rac
            'code': 'no_raccordement',
            'sourceCol': 'tmp_rac1',
            'destinationCol': 'tmp_rac2',
            'alias': 'tmp_alias',
            'destinationText': 'tmp_destt',
        }

    def doImport(self):

        #
        dictWrite = self.writeDb()
        if (dictWrite['state'] != self.OK):
            return dictWrite

        # Delete working file
        import os
        os.remove(self.__filename)

        return {'state': self.OK, 'message': 'Ecriture effectuee base donnee'}

    # filename doit etre un fichier XML
    def loadFile(self, filename):
        # In oder to conserve the file
        self.fileTree = ElementTree()

        # Logging info
        self.__logger.info("Chargement du fichier...")

        # self.fileTree.parse(filename)
        try:
            self.fileTree.parse(filename)
            self.__filename = filename

        except IOError:
            self.__logger.error("Impossible d ouvrir le fichier...")
            return self.ERROR_OPEN_FILE
        except:
            self.__logger.error("Erreur de traitement fichier...")
            return self.ERROR

        # Logging info
        self.__logger.info("Chargement du fichier effectue...")

        return self.OK

    def writeDb(self):

        # Logging info
        self.__logger.info("Ecriture dans la base de donnee...")

        # need for setSecurityInfo
        data = {}

        # We populate the database
        if (self.fileTree != None):  # A file has been loaded

            xProjects = self.fileTree.getiterator("domain")

            # ------------------------------------------------------------------------------
            xModels = xProjects[0].getiterator("model")
            for xModel in xModels:
                dModel = Modele()
                dModel.domaff_modele = self.domaff_modele

                for child in xModel:
                    if child.tag in self.MODELE:
                        setattr(dModel, self.MODELE[child.tag], child.text)

                try:
                    setSecurityInfo(dModel, data, self.userProfile, True)
                    dModel.save()
                except Exception, e:
                    self.__logger.info("Error dModel.save " + str(e))

                self.__logger.info("Model..." + dModel.__str__())

                # ------------------------------------------------------------------------------
                xEntitys = xModel.getiterator("concept")
                for xEntity in xEntitys:
                    dEntity = Entite()
                    dEntity.entite_mod = dModel

                    for child in xEntity:
                        if (child.tag in self.ENTITE):
                            if child.text is not None:
                                setattr(dEntity, self.ENTITE[child.tag],
                                        child.text)

                            elif type(child.attrib
                                      ) == dict and 'text' in child.attrib:
                                setattr(dEntity, self.ENTITE[child.tag],
                                        child.get('text'))

                    try:
                        setSecurityInfo(dEntity, data, self.userProfile, True)
                        dEntity.save()
                    except Exception, e:
                        self.__logger.info("Error dEntity.save" + str(e))

                    self.__logger.info("Entity..." + dEntity.__str__())

                    # ------------------------------------------------------------------------------
                    xProperties = xEntity.getiterator("property")
                    for xProperty in xProperties:

                        dProperty = ElementDonnee()
                        dProperty.entite_elem = dEntity

                        for child in xProperty:
                            if child.tag in self.ELEMENT_DONNEE:
                                if (child.text is not None):
                                    setattr(dProperty,
                                            self.ELEMENT_DONNEE[child.tag],
                                            child.text)

                                elif type(child.attrib
                                          ) == dict and 'text' in child.attrib:
                                    setattr(dProperty,
                                            self.ELEMENT_DONNEE[child.tag],
                                            child.get('text'))

                            elif child.tag == 'udps':
                                for xUdp in child:
                                    if xUdp.tag in self.ELEMENT_DONNEE_PP:
                                        setattr(
                                            dProperty,
                                            self.ELEMENT_DONNEE_PP[xUdp.tag],
                                            xUdp.get('text'))

                        try:
                            setSecurityInfo(dProperty, data, self.userProfile,
                                            True)
                            dProperty.save()
                        except Exception, e:
                            self.__logger.info("Error prpDom.save" + str(e))

                    # Relationship -------------------------------------------------------------------
                    xForeigns = xEntity.getiterator("foreign")
                    for xForeign in xForeigns:
                        dForeign = Relation()

                        dForeign.entite_rela1 = dEntity
                        dForeign.entite_rela2 = dEntity

                        for child in xForeign:
                            if child.tag in self.RELATION:
                                setattr(dForeign, self.RELATION[child.tag],
                                        child.text)

                        try:
                            setSecurityInfo(dForeign, data, self.userProfile,
                                            True)
                            dForeign.save()
                        except Exception, e:
                            self.__logger.info("Error dForeign.save" + str(e))
Example #50
0
    def _ep_data(self, ep_obj):
        """
        Creates an elementTree XML structure for a WDTV style episode.xml
        and returns the resulting data object.

        ep_obj: a TVShow instance to create the NFO for
        """

        eps_to_write = [ep_obj] + ep_obj.relatedEps

        indexer_lang = ep_obj.show.lang

        try:
            lINDEXER_API_PARMS = srIndexerApi(
                ep_obj.show.indexer).api_params.copy()

            lINDEXER_API_PARMS['actors'] = True

            if indexer_lang and not indexer_lang == sickrage.srCore.srConfig.INDEXER_DEFAULT_LANGUAGE:
                lINDEXER_API_PARMS['language'] = indexer_lang

            if ep_obj.show.dvdorder != 0:
                lINDEXER_API_PARMS['dvdorder'] = True

            t = srIndexerApi(ep_obj.show.indexer).indexer(**lINDEXER_API_PARMS)
            myShow = t[ep_obj.show.indexerid]
        except indexer_shownotfound as e:
            raise ShowNotFoundException(e.message)
        except indexer_error as e:
            sickrage.srCore.srLogger.error(
                "Unable to connect to " +
                srIndexerApi(ep_obj.show.indexer).name +
                " while creating meta files - skipping - {}".format(e.message))
            return False

        rootNode = Element("details")

        # write an WDTV XML containing info for all matching episodes
        for curEpToWrite in eps_to_write:

            try:
                myEp = myShow[curEpToWrite.season][curEpToWrite.episode]
            except (indexer_episodenotfound, indexer_seasonnotfound):
                sickrage.srCore.srLogger.info(
                    "Unable to find episode %dx%d on %s... has it been removed? Should I delete from db?"
                    % (curEpToWrite.season, curEpToWrite.episode,
                       srIndexerApi(ep_obj.show.indexer).name))
                return None

            if ep_obj.season == 0 and not getattr(myEp, 'firstaired', None):
                myEp["firstaired"] = str(datetime.date.fromordinal(1))

            if not (getattr(myEp, 'episodename', None)
                    and getattr(myEp, 'firstaired', None)):
                return None

            if len(eps_to_write) > 1:
                episode = SubElement(rootNode, "details")
            else:
                episode = rootNode

            # TODO: get right EpisodeID
            episodeID = SubElement(episode, "id")
            episodeID.text = str(curEpToWrite.indexerid)

            title = SubElement(episode, "title")
            title.text = ep_obj.prettyName()

            if getattr(myShow, 'seriesname', None):
                seriesName = SubElement(episode, "series_name")
                seriesName.text = myShow["seriesname"]

            if curEpToWrite.name:
                episodeName = SubElement(episode, "episode_name")
                episodeName.text = curEpToWrite.name

            seasonNumber = SubElement(episode, "season_number")
            seasonNumber.text = str(curEpToWrite.season)

            episodeNum = SubElement(episode, "episode_number")
            episodeNum.text = str(curEpToWrite.episode)

            firstAired = SubElement(episode, "firstaired")

            if curEpToWrite.airdate != datetime.date.fromordinal(1):
                firstAired.text = str(curEpToWrite.airdate)

            if getattr(myShow, 'firstaired', None):
                try:
                    year_text = str(
                        datetime.datetime.strptime(myShow["firstaired"],
                                                   dateFormat).year)
                    if year_text:
                        year = SubElement(episode, "year")
                        year.text = year_text
                except Exception:
                    pass

            if curEpToWrite.season != 0 and getattr(myShow, 'runtime', None):
                runtime = SubElement(episode, "runtime")
                runtime.text = myShow["runtime"]

            if getattr(myShow, 'genre', None):
                genre = SubElement(episode, "genre")
                genre.text = " / ".join([
                    x.strip() for x in myShow["genre"].split('|') if x.strip()
                ])

            if getattr(myEp, 'director', None):
                director = SubElement(episode, "director")
                director.text = myEp['director']

            if getattr(myShow, '_actors', None):
                for actor in myShow['_actors']:
                    if not ('name' in actor and actor['name'].strip()):
                        continue

                    cur_actor = SubElement(episode, "actor")

                    cur_actor_name = SubElement(cur_actor, "name")
                    cur_actor_name.text = actor['name']

                    if 'role' in actor and actor['role'].strip():
                        cur_actor_role = SubElement(cur_actor, "role")
                        cur_actor_role.text = actor['role'].strip()

            if curEpToWrite.description:
                overview = SubElement(episode, "overview")
                overview.text = curEpToWrite.description

        # Make it purdy
        indentXML(rootNode)
        data = ElementTree(rootNode)
        return data
Example #51
0
 def openXml(filename):
     tree = ElementTree()
     tree.parse(filename)
     return tree
Example #52
0
class Config:
    def __init__(self, path):
        self.path = path

    def load_config(self):
        print('loading ->', self.path)
        self.tree = ET()
        self.tree.parse(self.path)

    def get_resid(self):
        node_resid = self.tree.find('ResID')
        if node_resid != None:
            print('>>', node_resid.text)
            return node_resid.text
        return None

    def get_slaves(self):
        ret = list()
        xml_list = self.tree.findall('Config/Slave')
        #print('>>',len(xml_list))
        if xml_list != None:
            for xml_slave in xml_list:
                slave = Slave(xml_slave)
                ret.append(slave)
        return ret

    def get_master(self):
        master = None
        xml_master = self.tree.find('Config/Master')
        if xml_master != None:
            master = Master(xml_master)
        return master

    def get_slaves_names(self):
        ret = list()
        xml_list = self.tree.findall('Config/Slave')
        #print('>>',len(xml_list))
        if xml_list != None:
            for xml_slave in xml_list:
                slave = Slave(xml_slave)
                ret.append(slave.device_name)
        return ret

    def get_xml_slave_by_name(self, slave_Name):
        print('get_xml_slave_by_name')
        xml_slave_list = self.tree.findall('Config/Slave')
        if xml_slave_list != None:
            print('xml_slave_list not None')
            for xml_slave in xml_slave_list:
                xml_nameInres = xml_slave.find('Info/NameInResource')
                if xml_nameInres != None:
                    if xml_nameInres.text == slave_Name:
                        print('find slave with name=', slave_Name)
                        #print(ET.tostring(xml_slave))
                        return xml_slave
                else:
                    print('xml_slave has no Info/NameInResource')
        return None

    def decode_slave_InitCmds(self, slave_name):
        #print('decode_slave_InitCmds')
        ret = list()
        xml_slave = self.get_xml_slave_by_name(slave_name)
        if xml_slave != None:
            print('Slave found:')
            print(YoUtil.get_xml_content(xml_slave))
            pass
            # get the list of InitCmds
            # build the list
            # get the list of mailbox\CoE\InitCmds
            # build the list
        return ret

    def get_elmospecial(self):
        return None
Example #53
0
class MainFrame(gui.CeFrame):
    def __init__(self, fn=''):
        self.title = "Albert's XML Editor"
        gui.CeFrame.__init__(self,
                             title=self.title,
                             action=("About", self.about),
                             menu="Menu")
        self.xmlfn = fn
        self.sipp = gui.SIPPref(self)
        self.tree = gui.Tree(self)

        self.filemenu = gui.PopupMenu()
        self.filemenu.append("New", callback=self.newxml)
        self.filemenu.append("Open", callback=self.openxml)
        self.filemenu.append('Save', callback=self.savexml)
        self.filemenu.append('Save As', callback=self.savexmlas)
        self.filemenu.append_separator()
        self.filemenu.append('Exit', callback=self.quit)
        self.editmenu = gui.PopupMenu()
        self.editmenu.append("Edit", callback=self.edit)
        self.editmenu.append_separator()
        self.editmenu.append("Cut", callback=self.cut)
        self.editmenu.append("Copy", callback=self.copy)
        self.pastebeforeitem = self.editmenu.append("Paste Before",
                                                    callback=self.paste)
        self.pasteafteritem = self.editmenu.append("Paste After",
                                                   callback=self.paste_aft)
        self.editmenu.append_separator()
        self.editmenu.append("Insert Attribute", callback=self.add_attr)
        self.editmenu.append('Insert Element Before', callback=self.insert)
        self.editmenu.append('Insert Element After', callback=self.ins_aft)
        self.editmenu.append('Insert Element Under', callback=self.ins_chld)
        self.pastebeforeitem.set_title = "Nothing to Paste"
        self.pastebeforeitem.enable(False)
        self.pasteafteritem.set_title = " "
        self.pasteafteritem.enable(False)
        ## self.helpmenu.append('About', callback = self.about)

        sizer = gui.VBox(border=(2, 2, 2, 2), spacing=2)
        sizer.add(self.tree)
        self.sizer = sizer

        if self.xmlfn == '':
            self.rt = Element('New')
            if DESKTOP:
                self.openxml()
            else:
                self.init_tree("(untitled)")
        else:
            self.rt = ElementTree(file=self.xmlfn).getroot()
            self.init_tree()

        # context menu doesn't work in PC version, cb_menu doesn't in WM2003
        if DESKTOP:
            self.cb_menu.append_menu("File", self.filemenu)
            self.cb_menu.append_menu("Edit", self.editmenu)
        else:
            self.tree.bind(lbdown=self.on_bdown)

    def newxml(self, ev=None):
        h = gui.Dialog.askstring("AXE",
                                 "Enter a name (tag) for the root element")
        if h is not None:
            self.init_tree("(untitled)")

    def openxml(self, ev=None):
        self.openfile()
        self.init_tree()

    def openfile(self, ev=None):
        h = gui.FileDialog.open(wildcards={"XML files": "*.xml"})
        if h:
            try:
                rt = ElementTree(file=h).getroot()
            except:
                h = gui.Message.ok(
                    self.title, 'parsing error, probably not well-formed xml')
            else:
                self.rt = rt
                self.xmlfn = h

    def savexmlfile(self, ev=None):
        def expandnode(node, root):
            for el in node:
                name, value = el.data
                # print name, value
                if el.text.startswith(ELSTART):
                    sub = SubElement(root, name)
                    if value:
                        sub.text = value
                    expandnode(el, sub)
                else:
                    root.set(name, value)

        ## print self.xmlfn
        try:
            shutil.copyfile(self.xmlfn, self.xmlfn + '.bak')
        except IOError, mld:
            ## print mld
            pass
        rt = self.tree.roots[1]
        print rt.text, rt.data
        root = Element(rt.data[0])  # .split(None,1)
        expandnode(rt, root)
        h = ElementTree(root).write(self.xmlfn, encoding="iso-8859-1")
Example #54
0
def read_xml(in_path):
    '''读取并解析xml文件'''
    tree = ElementTree()
    tree.parse(in_path)
    return tree
Example #55
0
    "create table definings (seq integer, defining text, truekanji integer, notes text, freq text)"
)
c.execute(
    "create table readings (seq integer, reading text, freq text, restr text)")
c.execute(
    "create table senses (seq integer, pos text, misc text, gloss text, lang text)"
)

c.execute("create index senseseq on senses (seq)")
c.execute("create index reading on readings (reading)")
c.execute("create index readingseq on readings (seq)")
c.execute("create index defining on definings (defining)")
c.execute("create index definingseq on definings (seq)")
c.execute("create index glosses on senses(gloss)")

tree = ElementTree()
jmdict = tree.parse(sys.argv[1])

entries = jmdict.getiterator("entry")
nentries = 0
for entry in entries:
    nentries += 1
    if nentries % 10000 == 0:
        print nentries
    seq = entry.find("ent_seq").text
    writings = list(entry.getiterator("k_ele"))
    readings = list(entry.getiterator("r_ele"))
    senses = list(entry.getiterator("sense"))
    for writing in writings:
        kebs = list(writing.getiterator("keb"))
        ke_inf = [p.text for p in list(writing.getiterator("ke_inf"))]
Example #56
0
    def __init__(self, f):
        """Data parsing"""

        self.trackers = []  #: tracker properties and data
        self.groups = []  #: groups []
        self.priorities = []  #: priorities used
        self.resolutions = []  #: resolutions (index, name)
        self.tickets = []  #: all tickets
        self.statuses = []  #: status (idx, name)

        self.used_resolutions = {}  #: id:name
        self.used_categories = {}  #: id:name
        # id '100' means no category
        self.used_categories['100'] = None
        self.users = {}  #: id:name

        root = ElementTree().parse(f)

        self.users = {
            FlatXML(u).userid: FlatXML(u).username
            for u in root.find('referenced_users')
        }

        for tracker in root.find('trackers'):
            tr = Tracker(tracker)
            self.trackers.append(tr)

            # groups-versions
            for grp in tr.groups:
                # group ids are tracker-specific even if names match
                g = (grp.id, grp.group_name)
                if g not in self.groups:
                    self.groups.append(g)

            # resolutions
            for res in tr.resolutions:
                r = (res.id, res.name)
                if r not in self.resolutions:
                    self.resolutions.append(r)

            # statuses
            self.statuses = [(s.id, s.name) for s in tr.statuses]

            # tickets
            for tck in tr.tracker_items:
                if type(tck) == str:
                    print(repr(tck))
                self.tickets.append(tck)
                if int(tck.priority) not in self.priorities:
                    self.priorities.append(int(tck.priority))
                res_id = getattr(tck, "resolution_id", None)
                if res_id is not None and res_id not in self.used_resolutions:
                    for idx, name in self.resolutions:
                        if idx == res_id: break
                    self.used_resolutions[res_id] = \
                            dict(self.resolutions)[res_id]
                # used categories
                categories = dict(self.get_categories(tr, noowner=True))
                if tck.category_id not in self.used_categories:
                    self.used_categories[tck.category_id] = \
                            categories[tck.category_id]

        # sorting everything
        self.trackers.sort(key=lambda x: x.name)
        self.groups.sort()
        self.priorities.sort()
Example #57
0
 def __init__(self):
     if self.__TREE: return
     self.__TREE = ElementTree()
     self.__TREE.parse(self.__CONF_FILE)
     self.__ROOT = self.__TREE.getroot()
     self.__ENGINE = self.__TREE.findall('Service/Engine')[0]
Example #58
0
                gender = xml.findtext('gender')
                gender_ = SubElement(DOC, 'gender')
                gender_.text = gender

                minimum_age = xml.findtext('minimum_age')
                minimum_age_ = SubElement(DOC, 'minimum_age')
                minimum_age_.text = minimum_age

                maximum_age = xml.findtext('maximum_age')
                maximum_age_ = SubElement(DOC, 'maximum_age')
                maximum_age_.text = maximum_age

                '''verification_date'''
                verification_date = xml.findtext('verification_date')
                # print(verification_date)
                verification_date_ = SubElement(DOC, 'verification_date')
                verification_date_.text = verification_date

                '''keyword'''
                for keyword in xml.findall('keyword'):
                    # print(keyword.text)
                    keyword_ = SubElement(DOC, 'keyword')
                    keyword_.text = pre_process(keyword.text)

                tree = ElementTree(DOC)
                root = tree.getroot()  # 得到根元素,Element类
                prettyXml(root, '\t', '\n')  # 执行美化方法

                tree.write(root_store_pre_cli_xml, encoding='utf-8')
                # print(root_store_pre_cli_xml)
        print(root_store_pre_cli_1 + ' Pre-processing have Completed!')
Example #59
0
class tomcat:
    __TREE = None
    __ENGINE = None
    __ROOT = None
    __CONF_FILE = '/www/server/tomcat/conf/server.xml'

    #打开配置文件
    def __init__(self):
        if self.__TREE: return
        self.__TREE = ElementTree()
        self.__TREE.parse(self.__CONF_FILE)
        self.__ROOT = self.__TREE.getroot()
        self.__ENGINE = self.__TREE.findall('Service/Engine')[0]

    #获取虚拟主机列表
    def GetVhosts(self):
        Hosts = self.__ENGINE.getchildren()
        data = []
        for host in Hosts:
            if host.tag != 'Host': continue
            tmp = host.attrib
            ch = host.getchildren()
            tmp['item'] = {}
            for c in ch:
                tmp['item'][c.tag] = c.attrib
            data.append(tmp)
        return data

    #添加虚拟主机
    def AddVhost(self, path, domain):
        if self.GetVhost(domain): return False
        if not os.path.exists(path): return False
        attr = {
            "autoDeploy": "true",
            "name": domain,
            "unpackWARs": "true",
            "xmlNamespaceAware": "false",
            "xmlValidation": "false"
        }
        Host = Element("Host", attr)
        attr = {
            "docBase": path,
            "path": "",
            "reloadable": "true",
            "crossContext": "true",
        }
        Context = Element("Context", attr)
        Host.append(Context)
        self.__ENGINE.append(Host)
        self.Save()
        return True

    #删除虚拟主机
    def DelVhost(self, name):
        host = self.GetVhost(name)
        if not host: return False
        self.__ENGINE.remove(host)
        self.Save()
        return True

    #获取指定虚拟主机
    def GetVhost(self, name):
        Hosts = self.__ENGINE.getchildren()
        for host in Hosts:
            if host.tag != 'Host': continue
            if host.attrib['name'] == name:
                return host
        return None

    #修改根目录
    def SetPath(self, name, path):
        if not os.path.exists(path): return False
        host = self.GetVhost(name)
        if not host: return False
        #host.attrib['appBase'] = path;
        host.getchildren()[0].attrib['docBase'] = path
        self.Save()
        return True

    #修改虚拟主机属性
    def SetVhost(self, name, key, value):
        host = self.GetVhost(name)
        if not host: return False
        host.attrib[key] = value
        self.Save()
        return True

    #保存配置
    def Save(self):
        self.format(self.__ROOT)
        self.__TREE.write(self.__CONF_FILE, 'utf-8')

    #整理配置文件格式
    def format(self, em, level=0):
        i = "\n" + level * "  "
        if len(em):
            if not em.text or not em.text.strip():
                em.text = i + "  "
            for e in em:
                self.format(e, level + 1)
            if not e.tail or not e.tail.strip():
                e.tail = i
        if level and (not em.tail or not em.tail.strip()):
            em.tail = i
Example #60
0
#!/usr/bin/env python
# encoding: utf-8
"""
grenzen2python.py - convert OpenStreetMap data to python code

Created by Maximillian Dornseif on 2010-01-17.
Copyright (c) 2010 HUDORA. All rights reserved.
"""

import sys
import os
import unittest
from pprint import pprint

from xml.etree.ElementTree import ElementTree
tree = ElementTree()
tree.parse("data/de_landmasse_osm_relation_62781.gpx")
root = tree.getroot()

tracks = []
for trkseg in root.findall(".//trkseg"):
    track = []
    for trkpt in trkseg.findall('trkpt'):
        track.append((float(trkpt.attrib['lon']), float(trkpt.attrib['lat'])))
    tracks.append(track)

sys.stdout.write("# autogenerated - do not edit\n")
sys.stdout.write("deutschgrenzen = ")
pprint(tracks, sys.stdout)