Example #1
0
    def VisualizeToHTML(self):
        var_nodes = ',\n'.join(
            ["            {{id: {}, label: '{}', group: '{}'}}".format(node.id, node.name, node.type)   #.type[1:-1]
             for node in self.tree.GetAllNodes()])

        var_edges = ',\n'.join(
            ["            {{from: {}, to: {}}}".format(str(i), str(j)) for (i, j) in self.tree.GetAllEdges()])

        code = '<html>\n<head>\n<script type="text/javascript" src="lib/vis/dist/vis.js"></script>\n' \
            '<link href="lib/vis/dist/vis.css" rel="stylesheet" type="text/css" />\n\n<style type="text/css">' \
            '\n\t#mynetwork {\n\t\twidth: 100%;\n\t\theight: 100%;\n\t\tborder: 1px solid lightgray;\n\t}\n</style>' \
            '\n</head>\n<body>\n<div id="mynetwork"></div>\n\n<script type="text/javascript">\nvar nodes = new vis.DataSet([\n' + \
            var_nodes + '\n\t\t]);\n\n\t\tvar edges = new vis.DataSet([\n' + var_edges + \
            '\n\t\t]);\n\n// create a network\nvar container = document.getElementById(\'mynetwork\');' + \
            '\nvar data = {\nnodes: nodes,\nedges: edges\n};\nvar options = {\n\t\tnodes: {\n\t\t\tfont: {size: 10},' + \
            '\n\t\t\tborderWidth: 0.3,\n\t\t\tborderWidthSelected: 0.5,\n\t\t\tcolor: {background:\'#F0F5FA\'},\n\t\t\tshape: ' + \
            '\'dot\',\n\t\t\tsize: 10\n\t\t},\n\t\tedges: {\n\t\t\twidth: 0.5,\n\t\t\tarrows: {\n\t\t\t\tto: {\n\t\t\t\t\tenabled: ' + \
            'true,\n\t\t\t\t\tscaleFactor: 0.3\n\t\t\t\t}\n\t\t\t},\n\t\t\tselectionWidth: 0.5\n\t\t},\n\t\tgroups: {\n\t\t\ti: ' + \
            '{\n\t\t\t\tcolor: {background:\'#fddddd\'},\n\t\t\t\tedges: {dashes:true}\n\t\t\t},\n\t\t\tc: {\n\t\t\t\tcolor: ' + \
            '{background:\'#c7e3f9\'},\n\t\t\t\tshape: \'dot\',\n\t\t\t\tsize: 10\n\t\t\t},\n\t\t\ta: {\n\t\t\t\tcolor: {\n\t\t\t\t' + \
            '\tbackground:\'#f5fafe\',\n\t\t\t\t\topacity: 0.5\n\t\t\t\t},\n\t\t\t},\n\t\t\te: {\n\t\t\t\tcolor: {background:\'#e6eeee\'},' + \
            '\n\t\t\t\tshape: \'dot\',\n\t\t\t\tsize: 10\n\t\t\t}\n\t\t}\n\t};\nvar network = new vis.Network(container, data, options);' + \
            '\n</script>\n\n<script src="../googleAnalytics.js"></script>\n</body>\n</html>\n\t\t'

        html_file_name = 'treee.html'
        with c_open(html_file_name, 'w', 'utf-8') as f:
            f.write(code)
            try:
                webbrowser.get('windows-default').open('file://{}'.format(os.path.abspath(html_file_name)))
            except Exception:
                webbrowser.open('file://{}'.format(os.path.abspath(html_file_name)))
        '''
Example #2
0
 def __init__(self):
     json_file = c_open("maps.json", encoding='utf-8')
     file_maps = load(json_file)
     json_file.close()
     self.maps = {}
     for ids in file_maps:
         if ids != '_template':
             self.maps[ids] = Map(file_maps[ids])
Example #3
0
 def __init__(self):
     json_file = c_open("mobs.json", encoding='utf-8')
     file_mobs = load(json_file)
     json_file.close()
     self.mobs = {}
     for ids in file_mobs:
         if ids != '_template':
             self.mobs[ids] = TypeMob(file_mobs[ids])
Example #4
0
    def __init__(self, projectRoot='', sourceRoot='', classRoot='', libRoot='', destination='', saveClasstreeTo='', saveInhtreeTo='', verbose=False):

        self.destination = os.path.abspath(destination)
        self.classDict = defaultdict(list)

        classlist = []
        print('[.] searching for files, loading classes, please wait ...')
        if projectRoot:
            classlist = [f.replace(os.path.sep, '.').split('.') for f in
                         self.__recursiveSearch(projectRoot, ['.java','.class','.jar'], verbose=verbose)]
        else:
            if sourceRoot:
                classlist = [f.replace(os.path.sep, '.').split('.') for f in
                            self.__recursiveSearch(sourceRoot, ['.java'], verbose=verbose)]
            if classRoot:
                classlist += [f.replace(os.path.sep, '.').split('.') for f in
                            self.__recursiveSearch(classRoot, ['.class'], verbose=verbose)]
            if libRoot:
                classlist += [f.replace(os.path.sep, '.').split('.') for f in
                            self.__recursiveSearch(libRoot, ['.jar'], verbose=verbose)]

        tree = defaultdict(list)
        for node in classlist:
            if len(node) == 1:
                tree[''].append(node[0])
            elif len(node) > 1:
                tree['.'.join(node[:len(node) - 1])].append(node[-1])
        self.classDict = tree
        print('[+] all classes loaded, decompiled or copied to {}'.format(self.destination))

        if not saveClasstreeTo:
            saveClasstreeTo = os.path.join(self.destination, 'classtree.json')
        s = json.dumps(self.classDict, indent=4, sort_keys=True, separators=(',',':'))
        with c_open(saveClasstreeTo, 'w', 'utf-8') as f:
            f.write(s)
            print('[+] serialized class tree has been saved to {}'.format(os.path.abspath(saveClasstreeTo)))

        if not saveInhtreeTo:
            saveInhtreeTo = os.path.join(self.destination, 'inhtree.json')
        print('[.] analyzing copied files, please wait ...')
        self.inhTree = Tree()
        self.notFoundImports = list()  # list of fqdn-names
        self.notResolvedParents = list()  # list of .java-files whose parents could not resolved
        self.brokenFiles = list()  # list of .java-files which caused error while parsing
        for klasse in [os.sep.join(c)+'.java' for c in classlist]:
            try:
                jc = self.parse_Code(os.path.join(self.destination, klasse), verbose)
                # TO-DO: nested classes
                #if '$' not in classfullname:
                self.inhTree.add_node(jc.fullclassname, type=jc.classtype, extends=jc.parent_classes, implements=jc.parent_ifaces)
            except (ParseException, AssertionError) as e:
                self.brokenFiles.append(klasse)
                continue
        self.notFoundImports = self.__CleanInnerList(self.notFoundImports)
        self.notResolvedParents = self.__CleanInnerList(self.notResolvedParents)
        SerializeAndSaveInhtree(self.inhTree, saveInhtreeTo)
Example #5
0
def about(item=None):
    _about = {}
    here = os.path.abspath(os.path.dirname(__file__))
    with c_open(os.path.join(here, "certgenerator", "__version__.py"), 'r',
                'utf-8') as version:
        exec(version.read(), _about)

    if item:
        return _about[item]
    return _about
Example #6
0
def read_migration_sql_file(filename):
    print("Reading migration .sql file: {}...".format(filename))
    try:
        with c_open("migrations/scripts/" + filename,
                    mode='r',
                    encoding='utf-8-sig') as f:
            migration_file = f.read()
    except Exception:
        print("Couldn't open migrations/scripts/{}".format(filename))
        exit(-1)
    return migration_file
Example #7
0
    def gng_importer(self, corpus_file):
        """Fill in self.ngcorpus from a Google NGram corpus file.

        :param file corpus_file: The Google NGram file from which to
            initialize the n-gram corpus
        """
        with c_open(corpus_file, 'r', encoding='utf-8') as gng:
            for line in gng:
                line = line.rstrip().split('\t')
                words = line[0].split()

                self._add_to_ngcorpus(self.ngcorpus, words, int(line[2]))
Example #8
0
 def parse_Code(self, filename, verbose=False):
     j = Javaclass()
     code = ''.join(c_open(filename, 'r', 'utf-8').readlines())
     (j.classtype, j.fullclassname) = self.parse_classname(code, (filename))
     assert j.classtype, j.fullclassname
     j.package = j.fullclassname[:j.fullclassname.rfind('.')]
     assert j.package
     j.classname = j.fullclassname[j.fullclassname.rfind('.') + 1:]
     j.imports = self.parse_imports(code)
     (j.parent_classes, j.parent_ifaces) = self.parse_parents(code, (j.classtype, j.classname, j.imports, j.package))
     j.set_short_type(j.classtype)
     return j
Example #9
0
 def __init__(self):
     json_file = c_open("classes.json", encoding='utf-8')
     file_classes = load(json_file)
     json_file.close()
     self.classes = {}
     for ids in file_classes:
         if ids != '_template':
             self.classes[ids] = Classe(
                 file_classes[ids]['NAME'], file_classes[ids]['SPELLS'],
                 file_classes[ids]['BASEHP'], file_classes[ids]['XHP'],
                 file_classes[ids]['MOVEMENTPOINTS'],
                 file_classes[ids]["ACTIONPOINTS"], ids)
Example #10
0
def write_file(location, data):
    '''
    Write string data into files

    :param location: filename where to write to
    :param data: content to write into ``filename``
    :return: ``data`` if successful
    '''
    location = check_file_location(location)
    if location and (data is not None):
        with c_open(location, 'w', encoding='utf-8') as wl:
            wl.write(data)
            return data
Example #11
0
def write_file(location, data):
    '''
    Write string data into files

    :param location: filename where to write to
    :param data: content to write into ``filename``
    :return: ``data`` if successful
    '''
    location = check_file_location(location)
    if location and (data is not None):
        with c_open(location, 'w', encoding='utf-8') as wl:
            wl.write(data)
            return data
Example #12
0
 def __init__(self):
     json_file = c_open("spells.json", encoding='utf-8')
     file_spells = load(json_file)
     json_file.close()
     self.spells = {}
     for ids in file_spells:
         if ids != '_template':
             if file_spells[ids]['SHAPE']["SHAPE"] == "LINE":
                 self.spells[ids] = LineSpell(file_spells[ids]['NAME'],
                                              file_spells[ids]['COST'],
                                              file_spells[ids]['TYPE'],
                                              file_spells[ids]['RELOAD'],
                                              file_spells[ids]['EFFECTS'],
                                              file_spells[ids]['SHAPE'])
Example #13
0
def read_file(location, fallback=None):
    '''
    Read string data from files

    :param location: filename where to write to
    :param fallback: data to return in case of read failure
    :return: read data from ``location`` if successful else ``fallback``
    '''
    location = check_file_location(location, must_exist=True)
    if location:
        with c_open(location, 'r', encoding='utf-8') as rl:
            data = rl.read()
            if data is not None:
                return data
    return fallback
Example #14
0
def read_file(location, fallback=None):
    '''
    Read string data from files

    :param location: filename where to write to
    :param fallback: data to return in case of read failure
    :return: read data from ``location`` if successful else ``fallback``
    '''
    location = check_file_location(location, must_exist=True)
    if location:
        with c_open(location, 'r', encoding='utf-8') as rl:
            data = rl.read()
            if data is not None:
                return data
    return fallback
Example #15
0
 def get_app_info(item=None):
     """
     Return app info
     :param item:
     :return:
     """
     about = {}
     with c_open(os.path.join(here, "__version__.py"), 'r', 'utf-8') as f:
         exec(f.read(), about)
     for i in about:
         if "__long_description__" in i:
             try:
                 about[i] = open(about[i]).read()
             except IOError:
                 about[i] = ""
     if item:
         return about[item]
     return about
Example #16
0
    def gng_importer(self, corpus_file):
        """Fill in self.ngcorpus from a Google NGram corpus file.

        Parameters
        ----------
        corpus_file : file
            The Google NGram file from which to initialize the n-gram corpus


        .. versionadded:: 0.3.0

        """
        with c_open(corpus_file, 'r', encoding='utf-8') as gng:
            for line in gng:
                line = line.rstrip().split('\t')
                words = line[0].split()

                self._add_to_ngcorpus(self.ngcorpus, words, int(line[2]))
Example #17
0
    def gng_importer(self, corpus_file: str) -> None:
        """Fill in self.corpus from a Google NGram corpus file.

        Parameters
        ----------
        corpus_file : file
            The Google NGram file from which to initialize the n-gram corpus


        .. versionadded:: 0.4.0

        """
        with c_open(corpus_file, 'r', encoding='utf-8') as gng:
            for line in gng:
                word, _, count, doc_count = line.rstrip().split('\t')
                if '_' in word:
                    word = word[:word.find('_')]

                self._add_word(word, int(count), int(doc_count))
            self._update_doc_count()
Example #18
0
    def gng_importer(self, corpus_file):
        """Fill in self.corpus from a Google NGram corpus file.

        Parameters
        ----------
        corpus_file : file
            The Google NGram file from which to initialize the n-gram corpus


        .. versionadded:: 0.4.0

        """
        with c_open(corpus_file, 'r', encoding='utf-8') as gng:
            for line in gng:
                line = line.rstrip().split('\t')
                word = line[0]
                if '_' in word:
                    word = word[: word.find('_')]

                self._add_word(word, int(line[2]), int(line[3]))
            self._update_doc_count()
Example #19
0
def SerializeAndSaveInhtree(inhTree, file):
    with c_open(file, 'w', 'utf-8') as f:
        f.write('{\n')
        i, num_nodes = 0, len(inhTree.nodes)
        for node_name in inhTree.nodes.keys():
            node = inhTree[node_name]
            if node.name in inhTree.roots:
                name = '<root>' + node.name
            else:
                name = node.name
            if node.children:
                f.write('\t"{}": [\n\t\t"{}"\n\t]'.format(
                    '[{}]{}'.format(node.type, name),
                    '",\n\t\t"'.join([ch for ch in sorted(node.children, key=str.lower)])
                ))
            else:
                f.write('\t"[{}]{}": []'.format(node.type, name))
            if i < num_nodes - 1:
                f.write(',\n')
            i += 1
        f.write('\n}')
    print('[+] inheritance tree has been saved to {}'.format(os.path.abspath(file)))
Example #20
0
    elif not os.path.isfile(inhTreeFile):
        parser.print_help()
        print('\n[-] Error: json-file {} containing previously built class tree not found.'.format(
            os.path.abspath(inhTreeFile)))
        exit(3)

    inhTree = InheritanceTree(inhTreeFile)

    saveTo = args.saveTo
    if saveTo is not None:
        SerializeAndSaveInhtree(inhTree.tree, saveTo)

    printBreadthFirst = args.printBreadthFirst
    saveBreadthFirst = args.saveBreadthFirst
    if saveBreadthFirst is not None:
        with c_open(saveBreadthFirst, 'w', encoding='utf-8') as fileDescr:
            inhTree.WriteTreeBreadthFirst(fileDescr, verbose)
    elif printBreadthFirst:
        inhTree.DisplayTreeBreadthFirst()

    printRecursively = args.printRecursively
    saveRecursively = args.saveRecursively
    withoutInterfaces = args.withoutInterfaces
    if saveRecursively is not None:
        with c_open(saveRecursively, 'w', encoding='utf-8') as fileDescr:
            inhTree.WriteTreeRecursively(fileDescr, withoutInterfaces, verbose=verbose)
    elif printRecursively:
        inhTree.DisplayTreeRecursively()

    visualize = args.visualize
    if visualize:
Example #21
0
def readfile(location):
    if path.exists(location):
        with c_open(location, 'r') as fl:
            return fl.read()
Example #22
0
def writefile(location, content):
    with c_open(location, 'w') as fl:
        return fl.write(content)
Example #23
0
    from importlib.util import module_from_spec, spec_from_file_location
except ImportError:  # Python < 3.5
    try:
        from importlib.machinery import SourceFileLoader
    except ImportError:  # Python < 3.3 - treat as Python 2 (otherwise unsupported).
        from imp import load_source
        METADATA = load_source('metadata', METADATA_PATH)
    else:  # Python 3.3 or 3.4
        LOADER = SourceFileLoader('metadata', METADATA_PATH)
        METADATA = LOADER.load_module('metadata')  # pylint: disable=deprecated-method
else:
    SPEC = spec_from_file_location('metadata', METADATA_PATH)
    METADATA = module_from_spec(SPEC)
    SPEC.loader.exec_module(METADATA)

with c_open(join(CWD, 'README.rst'), encoding='utf-8') as f:
    LONG_DESCRIPTION = f.read()

SETUP_REQS = [
    'setuptools_scm',
]

SPHINX_REQ = 'Sphinx < 2'

DOCS_REQS = [SPHINX_REQ]

TEST_REQS = [
    'hypothesis < 4',
    'hypothesis-pytest < 1',
    'py < 2',
    'pytest < 5',
Example #24
0
def load_migrations_json():
    print("Loading migrations file...")
    with c_open("/root/migrate/migrations/target_steps.json") as json_data:
        data = json.load(json_data)
    return data
Example #25
0
'''
My DOCSTRING
'''

from codecs import open as c_open
from os import path
from setuptools import setup

here = path.abspath(path.dirname(__file__))

with c_open(path.join(here, 'README.rst'), encoding='utf-8') as f:
    long_description = f.read()

setup(
    name='gforms_automation',
    version='1.0a1',
    description=
    'A Python3 module that uses Selenium to provide a programmatic interface to Google Forms',
    author='Vijay Pillai',
    author_email='*****@*****.**',
    py_modules=["gform.py"],
    install_requires=['selenium'])
Example #26
0
File: setup.py Project: jab/bidict
except ImportError:  # Python < 3.5
    try:
        from importlib.machinery import SourceFileLoader
    except ImportError:  # Python < 3.3 - treat as Python 2 (otherwise unsupported).
        from imp import load_source
        METADATA = load_source('metadata', METADATA_PATH)
    else:  # Python 3.3 or 3.4
        LOADER = SourceFileLoader('metadata', METADATA_PATH)
        METADATA = LOADER.load_module('metadata')  # pylint: disable=deprecated-method
else:
    SPEC = spec_from_file_location('metadata', METADATA_PATH)
    METADATA = module_from_spec(SPEC)
    SPEC.loader.exec_module(METADATA)


with c_open(join(CWD, 'README.rst'), encoding='utf-8') as f:
    LONG_DESCRIPTION = f.read()


# Manually keep these version pins in sync with those in .travis.yml and .pre-commit-config.yaml.

SETUP_REQS = [
    'setuptools_scm < 4',
]

SPHINX_REQ = 'Sphinx < 2'

DOCS_REQS = [SPHINX_REQ]

TEST_REQS = [
    'hypothesis < 5',
Example #27
0
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""Setuptools setup for MRBLEs package."""

from os import path
# To use a consistent encoding
from codecs import open as c_open
# Always prefer setuptools over distutils
from setuptools import setup, find_packages

ABS_PATH = path.abspath(path.dirname(__file__))

# Get the long description from the README file
with c_open(path.join(ABS_PATH, 'README.rst'), encoding='utf-8') as f:
    LONG_DESCRIPTION = f.read()

setup(
    name='mrbles',

    # Versions should comply with PEP440.  For a discussion on single-sourcing
    # the version across setup.py and the project code, see
    # https://packaging.python.org/en/latest/single_source_version.html
    version='1.0.1',
    description='MRBLEs decoding and analysis package',
    long_description=LONG_DESCRIPTION,

    # The project's main homepage.
    url='https://github.com/FordyceLab/MRBLEs',

    # Author details
    author='Björn Harink',
Example #28
0
    with c_open(os.path.join(here, "certgenerator", "__version__.py"), 'r',
                'utf-8') as version:
        exec(version.read(), _about)

    if item:
        return _about[item]
    return _about


def basedir():
    return os.path.abspath(os.path.dirname(__file__))


requirements_file = os.path.join(basedir(), 'requirements.txt')
if os.path.exists(requirements_file):
    with c_open(requirements_file) as f:
        requirements = f.read().splitlines()


def long_description():
    try:
        return open(about('__long_description__')).read()
    except IOError:
        return ""


setup(
    name=about('__title__'),
    version=about('__version__'),
    author=about('__author__'),
    author_email=about('__author_email__'),
Example #29
0
from re import search
from codecs import open as c_open
from os import path

from setuptools import setup

name = "kedro-doorstop"
here = path.abspath(path.dirname(__file__))

# get package version
package_name = name.replace("-", "_")
with c_open(path.join(here, package_name, "__init__.py"), encoding="utf-8") as f:
    version = search(r'__version__ = ["\']([^"\']+)', f.read()).group(1)

# get the dependencies and installs
with c_open("requirements.txt", "r", encoding="utf-8") as f:
    requires = [x.strip() for x in f if x.strip()]

# get test dependencies and installs
with c_open("test_requirements.txt", "r", encoding="utf-8") as f:
    test_requires = [x.strip() for x in f if x.strip() and not x.startswith("-r")]

# Get the long description from the README file
with c_open(path.join(here, "README.md"), encoding="utf-8") as f:
    readme = f.read()

setup(
    name=name,
    version=version,
    description="Kedro-Doorstop makes it easy to manage software requirements in Kedro projects",
    long_description=readme,
Example #30
0
# -*- coding: utf-8 -*-
from setuptools import setup, find_packages
from codecs import open as c_open
import os

here = os.path.abspath(os.path.dirname(__file__))

about = {}
with c_open(os.path.join(here, 'pytree', '__version__.py'), 'r', 'utf-8') as f:
    exec(f.read(), about)

with c_open('README.md') as f:
    _readme = f.read()

with c_open('LICENSE') as f:
    _license = f.read()

setup(
    name='pytree',
    version=about['__version__'],
    description='list contents of directories in a tree-like format.',
    long_description=_readme,
    author='Luke Du',
    author_email='*****@*****.**',
    url='https://github.com/adu-21/pytree',
    license=_license,
    # include all packages under pytree
    packages=find_packages(exclude=('tests', 'docs')),
    classifiers=[
        "Programming Language :: Python :: 3",
        "License :: OSI Approved :: MIT License",