def serializeGraph(g, xmlfile, write=False):
    res = {}
    for attr in g.attributes():
        res[attr] = g[attr]

    pipeline = []
    for v in g.vs:
        n = copy.deepcopy(v["varSet"])
        n["name"] = v["name"]
        pipeline.append(n)

    res['pipeline'] = pipeline

    root = "LocalisationTest"
    xmlTree = ht.dict2et(res,
                         root,
                         listnames={
                             "pipeline": "filter",
                             "dependencies": "dependency",
                             "paramInts": "paramInt",
                             "paramFloats": "ParamFloat"
                         })
    xmlString = prettify(xmlTree)
    if not write:
        return xmlString
    else:
        text_file = open(xmlfile, "w")
        text_file.write("{0}".format(xmlString))
        text_file.close()
示例#2
0
文件: piy.py 项目: pombreda/piy
def transform_pom_yaml_to_xml(path=None, fileName="pom.yaml", modelVersion="4.0.0", indent="    ", encoding="utf-8"):
    if path == None:
        path = os.getcwd()
    path = path + os.sep + fileName 
    try:
        with open(path, "r") as f:
            content = f.read()
    except IOError:
        sys.exit("POM not fount at '%s'" % path)
    pom = yaml.load(content)
    if not "project" in pom:
        sys.exit("'project' root node not found")
    else:
        pom = __preprocess_attributes(pom)
        if not "modelVersion" in pom["project"]:
            pom["project"]["modelVersion"] = modelVersion
        elif not pom["project"]["modelVersion"] == modelVersion:
            sys.exit("Unsupported modelVersion " + pom["project"]["modelVersion"])
        pom["project"]["@xmlns"] = "http://maven.apache.org/POM/%s" % modelVersion
        pom["project"]["@xmlns:xsi"] = "http://www.w3.org/2001/XMLSchema-instance"
        pom["project"]["@xsi:schemaLocation"] = "http://maven.apache.org/POM/%s http://maven.apache.org/maven-v%s.xsd" % (modelVersion, modelVersion.replace(".", "_"))
        xml = dict2et(pom)
        dom = minidom.parseString(ElementTree.tostring(xml[0], encoding))
        print dom.toprettyxml(indent)
示例#3
0
def processFile(l):
    
    [js_file_path] = l
    
    try:
        # Compute AST but don't store, it's huge
        try:
            parser = Acorn()
            (acorn_ast, acorn_ok) = parser.run(os.path.join(corpus_root, js_file_path))
        except:
            return (js_file_path, None, 'Parser fail')
        
        if not acorn_ok:
            return (js_file_path, None, 'Parser fail')
        
        functions = []
        ast_ok = True
        try:
            # Read ast in json format
            ast = json.loads(acorn_ast)
    
            # Convert to ElementTree xml
            root = dict2et(ast)
            
            # Find all function declarations 
            # Record character positions in original file 
            for fd in root.findall("*//item[type='FunctionDeclaration']"):
                functions.append((fd.find('start').text, 
                                  fd.find('end').text))
        except:
            ast_ok = False
        
        if not ast_ok:
            return (js_file_path, None, 'JSON fail')

        # FIXME: FunctionExpression is different from FunctionDeclaration
        # FIXME: Can there be nested FunctionDeclarations?

        try:
            js = open(os.path.join(corpus_root, js_file_path), 'r').read().decode("utf-8")
        except UnicodeDecodeError:
            js = open(os.path.join(corpus_root, js_file_path), 'r').read()

        # Extract functions
        for (start, end) in functions:
            # extract function body
            f_body = js[int(start):int(end)]
            
            num_lines = f_body.count('\n')
    
            # write body to file
            f_file = os.path.basename(js_file_path).split('.')[0] + '_' + start + '_' + end + '.js'
            f_path = os.path.join(output_path, f_file)
                
            f = open(f_path, 'wb')
            try:
                f.write(f_body)
            except UnicodeEncodeError:
                f.write(f_body.encode("utf-8"))
            f.close()
        
        return (js_file_path, 'OK')
            
#             # check if not too short and not too long
#             statinfo_js = os.stat(f_path)
#             num_bytes = statinfo_js.st_size
#              
#             # Num tokens
#             try:
#                 tokens = Lexer(f_path).tokenList
#             except:
#                 return (js_file_path, None, 'Lexer fail')
#         
#             names = [t for (tt,t) in tokens if is_token_subtype(tt, Token.Name)]
#             nameCounter = Counter(names)
#              
#             if len(nameCounter.keys()):
#                 num_names = len(nameCounter.keys())
#                 mean_name_occurences = mean(nameCounter.values())
#                 median_name_occurences = median(nameCounter.values())
#             else:
#                 num_names = 0
#                 mean_name_occurences = 0.0
#                 median_name_occurences = 0.0
#         
#             return (js_file_path, f_file, num_lines, 
#                     num_bytes, num_names, 
#                     '%.2f' % float(mean_name_occurences), 
#                     '%.2f' % float(median_name_occurences))
        
    except Exception, e:
        return (js_file_path, None, str(e))
示例#4
0
                count += 1
                v = possibleValue['code']  if possibleValue else None
                p = praw.replace("[x]", v or "" )
                if v: 
                    v =  re.sub("\(.*\)", "", v).replace("@", "").replace("*", "")
                if len(p.split(".")) == 1: v = p
                parent = ".".join(p.split(".")[:-1])
                predicate =  p.replace(".", ":", 1).replace(".","_")
                paths[p] = {
                    'fhir_path': p,
                    'type': v,
                    'subs': []
                }
                if parent in paths:
                    paths[parent]['subs'].append({
                        'predicate': predicate,
                        'fhir_path': p,
                        'relative_xpath': "/".join(['f:'+x for x in p.replace(paths[parent]['fhir_path']+".", "").split(".")]),
                        'type': v
                        })
    for f in FILES: process(f)
    return {'fhirdefs': paths.values()}
t = tree(glob.glob(PROFILE_DIR + "/*.profile.json"))
#print json.dumps(t, sort_keys=True,indent=2)
v = ElementTree.tostring(dict2et(t, None, listnames = {'fhirdefs': 'path', 'subs': 'sub'}))
import pprint
#pp = pprint.PrettyPrinter(indent=4)
#pp.pprint(t)
dom = parseString(v)
print("\n".join(dom.toprettyxml().split("\n")[1:])).replace("<fhirdefs>", "<l:fhirdefs xmlns:l=\"http://local-mods\">").replace("</fhirdefs>", "</l:fhirdefs>")
示例#5
0
                        p,
                        'relative_xpath':
                        "/".join([
                            'f:' + x for x in p.replace(
                                paths[parent]['fhir_path'] +
                                ".", "").split(".")
                        ]),
                        'type':
                        v
                    })

    for f in FILES:
        process(f)
    return {'fhirdefs': paths.values()}


t = tree(glob.glob(PROFILE_DIR + "/*.profile.json"))
#print json.dumps(t, sort_keys=True,indent=2)
v = ElementTree.tostring(
    dict2et(t, None, listnames={
        'fhirdefs': 'path',
        'subs': 'sub'
    }))
import pprint
#pp = pprint.PrettyPrinter(indent=4)
#pp.pprint(t)
dom = parseString(v)
print("\n".join(dom.toprettyxml().split("\n")[1:])).replace(
    "<fhirdefs>", "<l:fhirdefs xmlns:l=\"http://local-mods\">").replace(
        "</fhirdefs>", "</l:fhirdefs>")