def all_url_test():
    l = classgenerator.Library()
    l2 = classgenerator.Library()
    l2.base_path = ["gentest","lib","ontologies"]                    
    #print ("test of getting global data")
    g = test_global_data_files.get_global_data()
    #print (g)
    predicates = Counter()
    subjects = Counter()
    predicate_types = Counter()
    predicate_types2 = Counter()
    objects = Counter()
    rebind(g)
    for x in g:
        p = x[1]
        s = x[0]
        o = x[2]
        predicates[p] += 1
        subjects[s] += 1
        objects[o] += 1
    print "predicates"

    seen = {}
    libs = {}    
    for (p,v) in  predicates.most_common(4230):
        if 'openlinksw.com' in p:
            continue
        if 'cc.rww.io' in p :
            continue
        
        p2 = g.namespace_manager.qname(p)

        (ns,term) = p2.split(':')
        m = g.namespace_manager.store.namespace(ns)

        # skip
        if str(m) =='http://www.w3.org/1999/xhtml/vocab#':
            continue
    
        if ns not in seen  :
            #print "NS",ns, m
            
            seen[ns]=1
            if 'ns' in ns:
                print "g.namespace_manager.bind(\"{prefix}\",\"{url}\",True)  ".format(prefix=ns,url=m)
                #pass
            path = l.get_module(ns,m)
            #print ns, m, path
            if path:
                importl = l.get_import_path(path)
                prefix = l.get_import_path(ns)
            
                #l.createpath(path)
                #l.create_module(path,prefix,url=m)
                #print "import {module} as {prefix}".format(module=importl,prefix=ns)
                replace= {
                    'http://purl.org/dc/dcam/' : 'https://raw.githubusercontent.com/dcmi/vocabtool/master/build/dcam.rdf'
                }
                
                if str(m) in replace :
                    o = replace[str(m)]
                    #print "replacing " ,m,"with", o
                    m = o

                _format = 'guess'
                turtles = [
                    'http://www.w3.org/ns/ldp#',
                    'http://usefulinc.com/ns/doap#',
                    
                ]
                
                if str(m) in turtles  :
                    _format = 'turtle'
                xmls = [
                    'http://xmlns.com/foaf/0.1/',
                    'http://www.w3.org/ns/auth/cert#',
                    'http://www.w3.org/ns/auth/acl#',
                    'http://www.w3.org/2000/10/swap/pim/doc#',
                    'http://www.w3.org/2003/06/sw-vocab-status/ns#',
                ]
                
                if str(m) in xmls  :
                    _format = 'xml'
                o = Ontology(url=m,prefix=prefix,_format=_format)
                o.set_path(path)
                #print "prefix", prefix, m
                libs[prefix]=o
                
    ## now revisit the graph and link it
    #pprint.pprint(libs)
        
    for p in libs:

        o = libs[p]
        prefix = o.prefix
        
        #print "Lib", p, o.path
        og = o.fetch(g.namespace_manager)
        rebind(og)
        od = o.extract_graph(og,l, libs)

        ours = od[0]
        others = od[2]
        prefixs = od[2]
        code = []

        importcode = []

        
        # create members
        used_prefixes= {}
        for x in ours :
            if 'http' in x :
                pass
            else:
                types=[]
                
                # lets try and create a class
                attrs = ours[x]
                for y in attrs:
                    p = y[0]
                    s = y[1]
                    p1 = resolve(p)
                    s1 = resolve(s)
                    if p1 == 'rdf.type' :
                        if s1 == 'owl.Restriction' :
                            pass
                        else:
                            types.append(s1)
                            ## append to used types
                            #print "check prefix for import",s,s1
                            
                            used_prefixes[s[0]] =1 
                        #print "\t","pred",p1,s1

                if len(types) > 0:
                    caml= convert2(x)
                    short= convert(x)
                    if caml.startswith('Ub'):
                        pass
                    else:
                    
                        classcode = ast.parse("class {_class}({base}):\n    term=\"{name}\"\n".format(
                            _class=caml,
                            name=x,
                            base=",".join(types)))
                        used_prefixes[prefix]=1
                        alias_code =  ast.parse("{alias} = {_class}()\n".format(
                            prefix=prefix,
                            alias=short,
                            _class=caml))

                        code.append(classcode)
                        code.append(alias_code)


        ##### create prefixes
        for x in prefixs:
            m = prefixs[x]

            if x not in used_prefixes:
                continue # remove unused prefixes
        
            if x == o.prefix :
                continue
            
            import_module = Module(body=[ImportFrom(
                module=m.module_name(),
                names=[alias(
                name='ontology',
                    asname=x)],
                level=0)])

            #code = "from {module} import ontology as {alias}".format(module=m.module_name(), alias=x)
            # x = Import(names=[alias(
            #     name=m.module_name(),
            #     asname=None)]),
            #print(astunparse.dump(ast.parse(code)))
            importcode.append(import_module)

        
        ###
        if True:
            npath= "gentest/" + o.path
            #path = l.get_module(ns,m)
            #print ns, m, path
            importl = l.get_import_path(npath)
            #prefix = l.get_import_path(ns)
            l.createpath(npath)
            print "npath",npath
            #print code
            l.create_module(npath,o.prefix,url=o.base,members=code,imports=importcode)
def all_url_test():
    l = classgenerator.Library()
    l2 = classgenerator.Library()
    l2.base_path = ["gentest", "lib", "ontologies"]
    #print ("test of getting global data")
    g = test_global_data_files.get_global_data()
    #print (g)
    predicates = Counter()
    subjects = Counter()
    predicate_types = Counter()
    predicate_types2 = Counter()
    objects = Counter()
    rebind(g)
    for x in g:
        p = x[1]
        s = x[0]
        o = x[2]
        predicates[p] += 1
        subjects[s] += 1
        objects[o] += 1
    print "predicates"

    seen = {}
    libs = {}
    for (p, v) in predicates.most_common(4230):
        if 'openlinksw.com' in p:
            continue
        if 'cc.rww.io' in p:
            continue

        p2 = g.namespace_manager.qname(p)

        (ns, term) = p2.split(':')
        m = g.namespace_manager.store.namespace(ns)

        # skip
        if str(m) == 'http://www.w3.org/1999/xhtml/vocab#':
            continue

        if ns not in seen:
            #print "NS",ns, m

            seen[ns] = 1
            if 'ns' in ns:
                print "g.namespace_manager.bind(\"{prefix}\",\"{url}\",True)  ".format(
                    prefix=ns, url=m)
                #pass
            path = l.get_module(ns, m)
            #print ns, m, path
            if path:
                importl = l.get_import_path(path)
                prefix = l.get_import_path(ns)

                #l.createpath(path)
                #l.create_module(path,prefix,url=m)
                #print "import {module} as {prefix}".format(module=importl,prefix=ns)
                replace = {
                    'http://purl.org/dc/dcam/':
                    'https://raw.githubusercontent.com/dcmi/vocabtool/master/build/dcam.rdf'
                }

                if str(m) in replace:
                    o = replace[str(m)]
                    #print "replacing " ,m,"with", o
                    m = o

                _format = 'guess'
                turtles = [
                    'http://www.w3.org/ns/ldp#',
                    'http://usefulinc.com/ns/doap#',
                ]

                if str(m) in turtles:
                    _format = 'turtle'
                xmls = [
                    'http://xmlns.com/foaf/0.1/',
                    'http://www.w3.org/ns/auth/cert#',
                    'http://www.w3.org/ns/auth/acl#',
                    'http://www.w3.org/2000/10/swap/pim/doc#',
                    'http://www.w3.org/2003/06/sw-vocab-status/ns#',
                ]

                if str(m) in xmls:
                    _format = 'xml'
                o = Ontology(url=m, prefix=prefix, _format=_format)
                o.set_path(path)
                #print "prefix", prefix, m
                libs[prefix] = o

    ## now revisit the graph and link it
    #pprint.pprint(libs)

    for p in libs:

        o = libs[p]
        prefix = o.prefix

        #print "Lib", p, o.path
        og = o.fetch(g.namespace_manager)
        rebind(og)
        od = o.extract_graph(og, l, libs)

        ours = od[0]
        others = od[2]
        prefixs = od[2]
        code = []

        importcode = []

        # create members
        used_prefixes = {}
        for x in ours:
            if 'http' in x:
                pass
            else:
                types = []

                # lets try and create a class
                attrs = ours[x]
                for y in attrs:
                    p = y[0]
                    s = y[1]
                    p1 = resolve(p)
                    s1 = resolve(s)
                    if p1 == 'rdf.type':
                        if s1 == 'owl.Restriction':
                            pass
                        else:
                            types.append(s1)
                            ## append to used types
                            #print "check prefix for import",s,s1

                            used_prefixes[s[0]] = 1
                        #print "\t","pred",p1,s1

                if len(types) > 0:
                    caml = convert2(x)
                    short = convert(x)
                    if caml.startswith('Ub'):
                        pass
                    else:

                        classcode = ast.parse(
                            "class {_class}({base}):\n    term=\"{name}\"\n".
                            format(_class=caml, name=x, base=",".join(types)))
                        used_prefixes[prefix] = 1
                        alias_code = ast.parse("{alias} = {_class}()\n".format(
                            prefix=prefix, alias=short, _class=caml))

                        code.append(classcode)
                        code.append(alias_code)

        ##### create prefixes
        for x in prefixs:
            m = prefixs[x]

            if x not in used_prefixes:
                continue  # remove unused prefixes

            if x == o.prefix:
                continue

            import_module = Module(body=[
                ImportFrom(module=m.module_name(),
                           names=[alias(name='ontology', asname=x)],
                           level=0)
            ])

            #code = "from {module} import ontology as {alias}".format(module=m.module_name(), alias=x)
            # x = Import(names=[alias(
            #     name=m.module_name(),
            #     asname=None)]),
            #print(astunparse.dump(ast.parse(code)))
            importcode.append(import_module)

        ###
        if True:
            npath = "gentest/" + o.path
            #path = l.get_module(ns,m)
            #print ns, m, path
            importl = l.get_import_path(npath)
            #prefix = l.get_import_path(ns)
            l.createpath(npath)
            print "npath", npath
            #print code
            l.create_module(npath,
                            o.prefix,
                            url=o.base,
                            members=code,
                            imports=importcode)