示例#1
0
def get_categories(category=None):
    if category is None:
        return resource_listdir(__name__, "data")
    else:
        return [item for item
                in resource_listdir(__name__, "data/" + category)
                if resource_isdir(__name__, "data/" + category + "/" + item)]
示例#2
0
    def setUp(self):
        self.addTypeEqualityFunc(OrderedDict, self.assertDictEqual)
        self.maxDiff = None
        self.modulename = "example"
        # When testing, we want to avoid cluttering cwd with temporary
        # directories, so we chdir to a temp dir, but in some cases,
        # particularly when testing on travis-ci, changing the wd
        # makes subsequent calls to pkg_resources.resource_listdir
        # fail (at least for python <= 3.2). It also happens sometimes
        # when testing locally. pkg_resources is too dense for me to
        # find out why, so we just detect if that is the case and
        # adapt for those test methods that need a working
        # pkg_resources.resource_listdir
        self.tempdir = tempfile.mkdtemp()
        self.orig_cwd = os.getcwd()
        os.chdir(self.tempdir)
        try:
            import pkg_resources
            pkg_resources.resource_listdir('ferenda', 'res')
        except OSError:  # No such file or directory, thrown by
                         # os.listdir() call in
                         # pkg_resources/__init__.py
            if self.id() in ('testManager.Run.test_run_single_allmethods',
                             'testManager.Run.test_run_makeresources_defaultconfig',
                             'testManager.Run.test_run_all_allmethods'):
                print("%s: couldn't change to tempdir, running in %s instead" %
                      (self, self.orig_cwd))
                os.chdir(self.orig_cwd)
                self.tempdir = os.getcwd()
                self.orig_cwd = None

        self._setup_files(self.tempdir,  self.modulename)
        sys.path.append(self.tempdir)
示例#3
0
def _fuzzdb_get_strings(max_len=0):
    'Helper to get all the strings from fuzzdb'

    ignored = ['integer-overflow']

    for subdir in pkg_resources.resource_listdir('protofuzz', BASE_PATH):
        if subdir in ignored:
            continue

        path = '{}/{}'.format(BASE_PATH, subdir)
        listing = pkg_resources.resource_listdir('protofuzz', path)
        for filename in listing:
            if not filename.endswith('.txt'):
                continue

            path = '{}/{}/{}'.format(BASE_PATH, subdir, filename)
            source = _open_fuzzdb_file(path)
            for line in source:
                string = line.decode('utf-8').strip()
                if not string or string.startswith('#'):
                    continue
                if max_len != 0 and len(line) > max_len:
                    continue

                yield string
示例#4
0
    def get_package_contents(self):
        template = [x for x in pkg_resources.resource_listdir(self.TESTS_PACKAGES, "") if not x.startswith("__")]

        for folder in template:
            req_dir = os.path.join(*(folder, self.TESTS_PACKAGES_SUBDIR))
            search_dir = [x for x in pkg_resources.resource_listdir(self.TESTS_PACKAGES, req_dir) if not x.endswith(".pyc")]

            for test_for_copy in search_dir:
                self.copy_to_custom_dir(req_dir, test_for_copy)
示例#5
0
    def _copy_static_resources(html_path):
        module = "avocado_result_html"
        base_path = "resources/static"

        for top_dir in pkg_resources.resource_listdir(module, base_path):
            rsrc_dir = base_path + "/%s" % top_dir
            if pkg_resources.resource_isdir(module, rsrc_dir):
                rsrc_files = pkg_resources.resource_listdir(module, rsrc_dir)
                for rsrc_file in rsrc_files:
                    source = pkg_resources.resource_filename(module, rsrc_dir + "/%s" % rsrc_file)
                    dest = os.path.join(os.path.dirname(os.path.abspath(html_path)), top_dir, os.path.basename(source))
                    pkg_resources.ensure_directory(dest)
                    shutil.copy(source, dest)
示例#6
0
    def load_collection(self, package, path, keyname=None, display_name=None):
        if not keyname:
            keyname = path.split("/")[-1]
        if not display_name:
            display_name = keyname

        try:
            collection = MarkerCollection.filter_by(keyname=keyname).one()
        except NoResultFound:
            collection = MarkerCollection(keyname=keyname, display_name=display_name).persist()

        for catname in resource_listdir(package, path):
            if not resource_isdir(package, path + "/" + catname):
                continue

            try:
                category = MarkerCategory.filter_by(keyname=catname).one()
            except NoResultFound:
                category = MarkerCategory(keyname=catname, display_name=catname).persist()

            for fn in resource_listdir(package, path + "/" + catname):
                if not fn.endswith(".svg"):
                    continue

                if resource_isdir(package, path + "/" + catname + "/" + fn):
                    continue

                mkeyname = re.sub(r"\.svg$", "", fn)

                try:
                    marker = Marker.filter_by(keyname=mkeyname).one()

                    assert marker.collection == collection, "Marker '%s' found in collection '%s'!" % (
                        mkeyname,
                        marker.collection.keyname,
                    )

                    assert marker.category == category, "Marker '%s' found in category '%s'!" % (
                        mkeyname,
                        marker.category.keyname,
                    )

                except NoResultFound:
                    marker = Marker(
                        collection=collection, category=category, keyname=mkeyname, display_name=mkeyname
                    ).persist()

                marker.load_file(resource_stream(package, path + "/" + catname + "/" + fn))
示例#7
0
    def __init__(self):
        """Initialize the project manager."""
        _logger.info("Initializing project manager")
        self._projects = {}

        metadata = {}           # a dict from internal names to project name, dependencies
        logic_resource_pkg = settings.LOGIC_RESOURCE_PKG

        # Iterate over all project files
        for project in resource_listdir(logic_resource_pkg, '.'):
            # Skip empty resource paths (apparently, that can happen!!)
            if not project:
                continue

            # Skip ordinary files
            if not resource_isdir(logic_resource_pkg, project):
                continue

            # Construct project path
            project_dir = project

            # Find project file
            for resource in resource_listdir(logic_resource_pkg, project_dir):
                if resource.endswith(".project"):
                    # Compute path to resource
                    path_to_resource = path.join(project_dir, resource)
                    path_to_file = FileManager().mktemp()

                    # Read contents of project file
                    with open(path_to_file, 'w') as f:
                        # Copy contents from resource stream
                        for byte in resource_stream(logic_resource_pkg, path_to_resource):
                            f.write(byte)

                    # Extract metadata from project file
                    internal_name, deps = self.__extract_metadata(path_to_file)
                    metadata[internal_name] = Metadata(project, deps)

                    break

        # 2nd pass to create and store projects. This way the internal
        # names are entirely hidden from the user.
        for (i, (project, deps)) in metadata.iteritems():
            p = Project(project, *[metadata[d].project for d in deps])
            p.internal_name = i
            self._projects[project] = p
            setattr(self, project.replace('-', '_'), p)
            _logger.info("Found project %s that depends on: %s", project, p.dependencies)
示例#8
0
def _classpath():
	jars = []
	resources = pkg_resources.resource_listdir("sklearn2pmml.resources", "")
	for resource in resources:
		if(resource.endswith(".jar")):
			jars.append(pkg_resources.resource_filename("sklearn2pmml.resources", resource))
	return jars
 def list_test_resources(cls, root, test_name):
     test_root = '{}.{}'.format(root, test_name)
     test_resources = {}
     for test_resource in filter(cls.is_resource, resource_listdir(test_root, '')):
         test_resource = test_resource.split('.py')[0]
         test_resources[test_resource] = '{}.{}'.format(test_root, test_resource)
     return test_resources
示例#10
0
文件: common.py 项目: 08haozi/uliweb
def extract_dirs(mod, path, dst, verbose=False, exclude=None, exclude_ext=None, recursion=True, replace=True):
    """
    mod name
    path mod path
    dst output directory
    resursion True will extract all sub module of mod
    """
    default_exclude = [".svn", "_svn", ".git"]
    default_exclude_ext = [".pyc", ".pyo", ".bak", ".tmp"]
    exclude = exclude or []
    exclude_ext = exclude_ext or []
    #    log = logging.getLogger('uliweb')
    if not os.path.exists(dst):
        os.makedirs(dst)
        if verbose:
            print "Make directory %s" % dst
    for r in pkg.resource_listdir(mod, path):
        if r in exclude or r in default_exclude:
            continue
        fpath = os.path.join(path, r)
        if pkg.resource_isdir(mod, fpath):
            if recursion:
                extract_dirs(mod, fpath, os.path.join(dst, r), verbose, exclude, exclude_ext, recursion, replace)
        else:
            ext = os.path.splitext(fpath)[1]
            if ext in exclude_ext or ext in default_exclude_ext:
                continue
            extract_file(mod, fpath, dst, verbose, replace)
示例#11
0
    def get_jvm_args():
        
        # Classpath set by environment var
        classpath = os.getenv('NEO4J_PYTHON_CLASSPATH',None)
        if classpath is None:
            # Classpath set by finding bundled jars
            jars = []
            from pkg_resources import resource_listdir, resource_filename
            for name in resource_listdir(__name__, 'javalib'):
                if name.endswith('.jar'):
                    jars.append(resource_filename(__name__, "javalib/%s" % name))
            if len(jars) > 0:
                divider = ';' if sys.platform == "win32" else ':'
                classpath = divider.join(jars)
            else:
                # Last resort
                classpath = '.'

        jvmargs = os.getenv('NEO4J_PYTHON_JVMARGS',"").split(" ")
        jvmargs = jvmargs + ['-Djava.class.path=' + classpath]
        
        if os.getenv('DEBUG',None) is "true":
            jvmargs = jvmargs + ['-Xdebug', '-Xnoagent', '-Xrunjdwp:transport=dt_socket,server=y,suspend=n,address=8000']
        
        return jvmargs
示例#12
0
    def _unpack_dir(self, resource_dir, dst_dir):
        if not pkg_resources.resource_exists(__name__, resource_dir):
            raise Exception(
                "Cannot unpack directory: {0} doesn't exist in the package".format(
                    resource_dir
                )
            )

        if not pkg_resources.resource_isdir(__name__, resource_dir):
            raise Exception(
                "Cannot unpack directory: {0} is not a directory".format(resource_dir)
            )

        self.__create_dir(dst_dir)

        if not os.path.exists(dst_dir):
            raise Exception(
                "Cannot unpack directory: cannot create directory {0}".format(dst_dir)
            )

        for resource_file in pkg_resources.resource_listdir(__name__, resource_dir):
            resource_path = os.path.join(dst_dir, resource_file)
            if os.path.exists(resource_path):
                continue
            self._unpack_resource(
                resource_path, "/".join((resource_dir, resource_file)), False
            )
def generateReportFromCatalogue(catalogue, directory):
    """Given a RequirementsCatalogue object containing test results,
    generate a report in the given directory
    """
    
    # Copy static files to the output directory
    
    for resource in pkg_resources.resource_listdir('corejet.visualization', 'report-template'):
        path = pkg_resources.resource_filename('corejet.visualization', 
                os.path.join('report-template', resource)
            )
        
        if os.path.isdir(path):
            shutil.copytree(path, os.path.join(directory, os.path.basename(path)))
        else:
            shutil.copy(path, directory)
    
    # Load XSLT
    xslt_tree = None
    
    with pkg_resources.resource_stream('corejet.visualization', os.path.join('xslt', 'corejet-to-jit.xsl')) as stream:
        xslt_tree = lxml.etree.parse(stream)
    
    xslt = lxml.etree.XSLT(xslt_tree)
    
    # Create JIT output using XSLT
    source_tree = catalogue.serialize()
    target_tree = xslt(source_tree)
    
    with open(os.path.join(directory, 'corejet-requirements.js'), 'w') as output:
        output.write(str(target_tree))
示例#14
0
def generate_pkg_resources(pkg, module_name, re_filename=None):
    """Generate list of package resourses."""
    pkg_dirname = os.path.dirname(importlib.import_module(pkg).__file__)
    if resource_isdir(pkg, module_name):
        for filename in resource_listdir(pkg, module_name):
            if re_filename is None or re_filename.match(filename):
                yield os.path.join(pkg_dirname, module_name, filename)
示例#15
0
    def _unpack_resource(self, resource_path, resource_name, resource_executable):
        if not pkg_resources.resource_exists(__name__, resource_name):
            return

        if pkg_resources.resource_isdir(__name__, resource_name):
            self.__create_dir(resource_path)
            for f in pkg_resources.resource_listdir(__name__, resource_name):
                if f == "":
                    # TODO(beng): Figure out why this happens
                    continue
                # TODO: Handle executable resources in directory
                self._unpack_resource(
                    os.path.join(resource_path, f),
                    os.path.join(resource_name, f),
                    False,
                )
        else:
            with closable_named_temporary_file(
                prefix=resource_path + os.extsep
            ) as outf:
                outf.write(pkg_resources.resource_string(__name__, resource_name))
                if resource_executable and hasattr(os, "fchmod"):
                    st = os.fstat(outf.fileno())
                    os.fchmod(outf.fileno(), st.st_mode | stat.S_IXUSR)
                outf.close()
                shutil.copy(outf.name, resource_path)
示例#16
0
def load_graphs():
    '''load graphs from mavgraphs.xml'''
    mestate.graphs = []
    gfiles = ['mavgraphs.xml']
    if 'HOME' in os.environ:
        for dirname, dirnames, filenames in os.walk(os.path.join(os.environ['HOME'], ".mavproxy")):
            for filename in filenames:
                
                if filename.lower().endswith('.xml'):
                    gfiles.append(os.path.join(dirname, filename))
    for file in gfiles:
        if not os.path.exists(file):
            continue
        graphs = load_graph_xml(open(file).read(), file)
        if graphs:
            mestate.graphs.extend(graphs)
            mestate.console.writeln("Loaded %s" % file)
    # also load the built in graphs
    dlist = pkg_resources.resource_listdir("MAVProxy", "tools/graphs")
    for f in dlist:
        raw = pkg_resources.resource_stream("MAVProxy", "tools/graphs/%s" % f).read()
        graphs = load_graph_xml(raw, None)
        if graphs:
            mestate.graphs.extend(graphs)
            mestate.console.writeln("Loaded %s" % f)
    mestate.graphs = sorted(mestate.graphs, key=lambda g: g.name)
示例#17
0
文件: setup.py 项目: plq/soaplib
    def loadTestsFromModule(self, module):
        """Load unit test (skip 'interop' package).

        Hacked from the version in 'setuptools.command.test.ScanningLoader'.
        """
        tests = []
        tests.append(TestLoader.loadTestsFromModule(self,module))

        if hasattr(module, '__path__'):
            for file in resource_listdir(module.__name__, ''):
                if file == 'interop':
                    # These tests require installing a bunch of extra
                    # code:  see 'src/soaplib/test/README'.
                    continue

                if file.endswith('.py') and file != '__init__.py':
                    submodule = module.__name__ + '.' + file[:-3]
                else:
                    if resource_exists(
                        module.__name__, file + '/__init__.py'
                    ):
                        submodule = module.__name__ + '.' + file
                    else:
                        continue
                tests.append(self.loadTestsFromName(submodule))

        return self.suiteClass(tests)
示例#18
0
def install_manpages(man_dir):
    """Install COT's manual pages.

    Args:
      man_dir (str): Base directory where manpages should be installed.
    Returns:
      tuple: (result, message)
    """
    installed_any = False
    some_preinstalled = False
    # default success message, may be overridden below by more specific msg
    msg = "successfully installed to {0}".format(man_dir)

    try:
        for filename in resource_listdir("COT", "docs/man"):
            src_path = resource_filename("COT", os.path.join("docs/man",
                                                             filename))
            prev, new = _install_manpage(src_path, man_dir)
            some_preinstalled |= prev
            installed_any |= new
    except (IOError, OSError, HelperError) as exc:
        return False, "INSTALLATION FAILED: " + str(exc)

    if some_preinstalled:
        if installed_any:
            msg = "successfully updated in {0}".format(man_dir)
        else:
            msg = "already installed, no updates needed"

    return True, msg
示例#19
0
def verify_manpages(man_dir):
    """Verify installation of COT's manual pages.

    Args:
      man_dir (str): Base directory where manpages should be found.
    Returns:
      tuple: (result, message)
    """
    for filename in resource_listdir("COT", "docs/man"):
        src_path = resource_filename("COT", os.path.join("docs/man", filename))
        # Which man section does this belong in?
        section = os.path.splitext(filename)[1][1:]
        dest = os.path.join(man_dir, "man{0}".format(section))
        if not os.path.exists(dest):
            return True, "DIRECTORY NOT FOUND: {0}".format(dest)

        dest_path = os.path.join(dest, filename)
        if os.path.exists(dest_path):
            if filecmp.cmp(src_path, dest_path):
                logger.verbose("File %s does not need to be updated",
                               dest_path)
                continue
            logger.verbose("File %s needs to be updated", dest_path)
            return True, "NEEDS UPDATE"
        return True, "NOT FOUND"

    return True, "already installed, no updates needed"
示例#20
0
	def __init__(self):
		"""Imports all plugin modules based on platform"""
		for modname in pkg_resources.resource_listdir(__name__,'plugins'):
			if pkg_resources.resource_exists(__name__, 'plugins/%s/__init__.py' % modname):
				plugin = __import__('plugins.%s' % modname, globals(), locals(), ['']).get_plugin()
				if plugin.platform == 'All' or plugin.platform == howl.platform:
					self[modname] = plugin
示例#21
0
def __place_template_folder(group, src, dst, gbp=False):
    template_files = pkg_resources.resource_listdir(group, src)
    # For each template, place
    for template_file in template_files:
        template_path = os.path.join(src, template_file)
        template_dst = os.path.join(dst, template_file)
        if pkg_resources.resource_isdir(group, template_path):
            debug("Recursing on folder '{0}'".format(template_path))
            __place_template_folder(group, template_path, template_dst, gbp)
        else:
            try:
                debug("Placing template '{0}'".format(template_path))
                template = pkg_resources.resource_string(group, template_path)
                template_abs_path = pkg_resources.resource_filename(group, template_path)
            except IOError as err:
                error("Failed to load template "
                      "'{0}': {1}".format(template_file, str(err)), exit=True)
            if not os.path.exists(dst):
                os.makedirs(dst)
            if os.path.exists(template_dst):
                debug("Removing existing file '{0}'".format(template_dst))
                os.remove(template_dst)
            with open(template_dst, 'w') as f:
                if not isinstance(template, str):
                    template = template.decode('utf-8')
                f.write(template)
            shutil.copystat(template_abs_path, template_dst)
示例#22
0
 def _write_report(self, receipt_files):
     """Writes the report to disk for the ``receipt_files`` map of 
     :class:`task.TastReceipt` to list of file paths. 
     """
     
     # Render the report
     
     template = Template(text=pkg_resources.resource_string(
         "cass_check", "templates/report.mako"))
     index_path = os.path.join(self.report_dir, "index.html")
     self.log.info("Writing report index to {index_path}".format(
         index_path=index_path))
     with open(index_path, "w") as f:
         f.write(template.render(receipt_files=receipt_files))
         
     # copy assets
     for asset_name in pkg_resources.resource_listdir("cass_check", 
         "assets/"):
         
         res_name = "assets/{asset_name}".format(asset_name=asset_name)
         dest = os.path.join(self.report_dir, res_name)
         self.log.info("Copying report asset {asset_name} to "\
             "{dest}".format(asset_name=asset_name, dest=dest))
             
         with pkg_resources.resource_stream("cass_check", res_name) as src:
             file_util.ensure_dir(os.path.dirname(dest))
             with open(dest, "w") as f:
                 f.write(src.read())
     
     return index_path
示例#23
0
def list_data_dir(filename):
    if getattr(sys, 'frozen', False):
        base_dir = find_data_file("templates/html/resources")
        #QtGui.QMessageBox.critical(None, "Network Error",os.listdir(base_dir))
        return os.listdir(base_dir)
    else:
        return resource_listdir(__name__,'templates/html/resources')
示例#24
0
文件: common.py 项目: damonchen/onion
def extract_dirs(mod, path, dst, verbose=False, exclude=None, exclude_ext=None, recursion=True, replace=True):
    default_exclude = ['.git']
    default_exclude_ext = ['.pyc', '.pyo', '.bak', '.tmp', '.swp']

    exclude = exclude or []
    exclude_ext = exclude_ext or []


    if not os.path.exists(dst):
        os.makedirs(dst)
        if verbose:
            print 'Make directory %s' %dst

    for r in pkg.resource_listdir(mod, path):
        if r in exclude or r in default_exclude:
            continue

        fpath =os.path.join(path, r)
        if pkg.resource_isdir(mod, fpath):
            if recursion:
                extract_dirs(mod, fpath, os.path.join(dst, r), verbose, exclude, exclude_ext, recursion, replace)
        else:
            ext = os.path.splitext(fpath)[1]
            if ext in exclude_ext or ext in default_exclude_ext:
                continue
            extract_file(mod, fpath, dst, verbose, replace)
def suiteFromPackage(name):
    layer_dir = 'base'
    files = resource_listdir(__name__, '{}/{}'.format(layer_dir, name))
    suite = unittest.TestSuite()
    for filename in files:
        if not filename.endswith('.py'):
            continue
        if filename.endswith('_fixture.py'):
            continue
        if filename == '__init__.py':
            continue

        print(filename)

        dottedname = 'grokcore.catalog.tests.%s.%s.%s' % (
            layer_dir, name, filename[:-3])
        test = doctest.DocTestSuite(
            dottedname,
            setUp=setUpZope,
            tearDown=cleanUpZope,
            checker=checker,
            optionflags=(
                doctest.ELLIPSIS +
                doctest.NORMALIZE_WHITESPACE +
                renormalizing.IGNORE_EXCEPTION_MODULE_IN_PYTHON2))

        suite.addTest(test)

    return suite
示例#26
0
文件: common.py 项目: victorlv/uliweb
def extract_dirs(mod, path, dst, verbose=False, exclude=None, exclude_ext=None, recursion=True):
    """
    mod name
    path mod path
    dst output directory
    resursion True will extract all sub module of mod
    """
    default_exclude = ['.svn', '_svn', '.git']
    default_exclude_ext = ['.pyc', '.pyo', '.bak', '.tmp']
    exclude = exclude or []
    exclude_ext = exclude_ext or []
    log = logging.getLogger('uliweb.console')
    if not os.path.exists(dst):
        os.makedirs(dst)
        if verbose:
            log.info('Make directory %s' % dst)
    for r in pkg.resource_listdir(mod, path):
        if r in exclude or r in default_exclude:
            continue
        fpath = os.path.join(path, r)
        if pkg.resource_isdir(mod, fpath):
            if recursion:
                extract_dirs(mod, fpath, os.path.join(dst, r), verbose, exclude, exclude_ext)
        else:
            ext = os.path.splitext(fpath)[1]
            if ext in exclude_ext or ext in default_exclude_ext:
                continue
            extract_file(mod, fpath, dst, verbose)
示例#27
0
文件: cnv.py 项目: OlyDLG/seabird
    def load_rule(self):
        """ Load the adequate rules to parse the data

            It should try all available rules, one by one, and use the one
              which fits.
        """
        rules_dir = 'rules'
        rule_files = pkg_resources.resource_listdir(__name__, rules_dir)
        rule_files = [f for f in rule_files if re.match('^cnv.*yaml$', f)]
        for rule_file in rule_files:
            text = pkg_resources.resource_string(__name__,
                    os.path.join(rules_dir, rule_file))
            rule = yaml.load(text)
            # Should I load using codec, for UTF8?? Do I need it?
            #f = codecs.open(rule_file, 'r', 'utf-8')
            #rule = yaml.load(f.read())
            r = rule['header'] + rule['sep'] + rule['data']
            content_re = re.compile(r, re.VERBOSE)
            if re.search(r, self.raw_text, re.VERBOSE):
                logging.debug("Using rules from: %s" % rule_file)
                self.rule = rule
                self.parsed = content_re.search(self.raw_text).groupdict()
                return

        # If haven't returned a rule by this point, raise an exception.
        logging.error("No rules able to parse it")
        raise CNVError(tag='noparsingrule')
示例#28
0
def load_xml_layouts():
    print("loading layouts...")
    _xml_layouts.clear()
    loaded_modules = module.get_loaded_modules()
    for m in loaded_modules:
        if not pkg_resources.resource_exists(m, "layouts"):
            continue
        for fname in pkg_resources.resource_listdir(m, "layouts"):
            if not fname.endswith("xml"):
                continue
            data = pkg_resources.resource_string(m, "layouts/" + fname)
            try:
                root = etree.fromstring(data)
                vals = {
                    "module": m,
                }
                vals["name"] = fname.replace(".xml", "")
                vals["type"] = root.tag.lower()
                if root.attrib.get("model"):
                    vals["model"] = root.attrib["model"]
                if root.attrib.get("inherit"):
                    vals["inherit"] = root.attrib["inherit"]
                if root.attrib.get("priority"):
                    vals["priority"] = int(root.attrib["priority"])
                vals["layout"] = data.decode()
                _xml_layouts[vals["name"]] = vals
            except Exception as e:
                print("ERROR: Failed to load XML layout: %s/%s (%s)" % (m, fname, e))
    print("  %d layouts loaded"%len(_xml_layouts))
示例#29
0
def make_css(minify=False):
    print("building css...")
    global _css_file
    data = []
    loaded_modules = module.get_loaded_modules()
    for m in loaded_modules:
        if not pkg_resources.resource_exists(m, "css"):
            continue
        for fname in sorted(pkg_resources.resource_listdir(m, "css")):
            if not fname.endswith("css"):
                continue
            res = pkg_resources.resource_string(m, "css/"+fname).decode("utf-8")
            data.append(res)
    print("  %d css files loaded"%len(data))
    if data:
        buf = ("\n".join(data)).encode("utf-8")
        m = hashlib.new("md5")
        m.update(buf)
        h = m.hexdigest()[:8]
        if not os.path.exists("static/css"):
            os.makedirs("static/css")
        open("static/css/netforce-%s.css"%h, "wb").write(buf)
        if minify:
            if not os.path.exists("static/css/netforce-%s-min.css"%h):
                print("  minifying css...")
                os.system("yui-compressor --type css static/css/netforce-%s.css > static/css/netforce-%s-min.css" %(h,h))
            _css_file="netforce-%s-min.css"%h
        else:    
            _css_file="netforce-%s.css"%h
        print("  => static/css/%s" % _css_file)
示例#30
0
    def loadTestsFromModule(self, module):
        """Return a suite of all tests cases contained in the given module

        If the module is a package, load tests from all the modules in it.
        If the module has an ``additional_tests`` function, call it and add
        the return value to the tests.
        """
        tests = []
        if module.__name__ != 'setuptools.tests.doctest':  # ugh
            tests.append(TestLoader.loadTestsFromModule(self, module))

        if hasattr(module, "additional_tests"):
            tests.append(module.additional_tests())

        if hasattr(module, '__path__'):
            for file in resource_listdir(module.__name__, ''):
                if file.endswith('.py') and file != '__init__.py':
                    submodule = module.__name__ + '.' + file[:-3]
                else:
                    if resource_exists(module.__name__, file + '/__init__.py'):
                        submodule = module.__name__+'.'+file
                    else:
                        continue
                tests.append(self.loadTestsFromName(submodule))

        if len(tests) != 1:
            return self.suiteClass(tests)
        else:
            return tests[0] # don't create a nested suite for only one return
示例#31
0
文件: asset.py 项目: slacy/pyramid
 def listdir(self, resource_name):
     for package, rname in self.search_path(resource_name):
         if pkg_resources.resource_exists(package, rname):
             return pkg_resources.resource_listdir(package, rname)
示例#32
0
文件: copydir.py 项目: ronnix/pyramid
def copy_dir(source, dest, vars, verbosity, simulate, indent=0,
             sub_vars=True, interactive=False, overwrite=True,
             template_renderer=None, out_=sys.stdout):
    """
    Copies the ``source`` directory to the ``dest`` directory.

    ``vars``: A dictionary of variables to use in any substitutions.

    ``verbosity``: Higher numbers will show more about what is happening.

    ``simulate``: If true, then don't actually *do* anything.

    ``indent``: Indent any messages by this amount.

    ``sub_vars``: If true, variables in ``_tmpl`` files and ``+var+``
    in filenames will be substituted.

    ``overwrite``: If false, then don't every overwrite anything.

    ``interactive``: If you are overwriting a file and interactive is
    true, then ask before overwriting.

    ``template_renderer``: This is a function for rendering templates (if you
    don't want to use string.Template).  It should have the signature
    ``template_renderer(content_as_string, vars_as_dict,
    filename=filename)``.
    """
    def out(msg):
        out_.write(msg)
        out_.write('\n')
        out_.flush()
    # This allows you to use a leading +dot+ in filenames which would
    # otherwise be skipped because leading dots make the file hidden:
    vars.setdefault('dot', '.')
    vars.setdefault('plus', '+')
    use_pkg_resources = isinstance(source, tuple)
    if use_pkg_resources:
        names = sorted(pkg_resources.resource_listdir(source[0], source[1]))
    else:
        names = sorted(os.listdir(source))
    pad = ' '*(indent*2)
    if not os.path.exists(dest):
        if verbosity >= 1:
            out('%sCreating %s/' % (pad, dest))
        if not simulate:
            makedirs(dest, verbosity=verbosity, pad=pad)
    elif verbosity >= 2:
        out('%sDirectory %s exists' % (pad, dest))
    for name in names:
        if use_pkg_resources:
            full = '/'.join([source[1], name])
        else:
            full = os.path.join(source, name)
        reason = should_skip_file(name)
        if reason:
            if verbosity >= 2:
                reason = pad + reason % {'filename': full}
                out(reason)
            continue # pragma: no cover
        if sub_vars:
            dest_full = os.path.join(dest, substitute_filename(name, vars))
        sub_file = False
        if dest_full.endswith('_tmpl'):
            dest_full = dest_full[:-5]
            sub_file = sub_vars
        if use_pkg_resources and pkg_resources.resource_isdir(source[0], full):
            if verbosity:
                out('%sRecursing into %s' % (pad, os.path.basename(full)))
            copy_dir((source[0], full), dest_full, vars, verbosity, simulate,
                     indent=indent+1,
                     sub_vars=sub_vars, interactive=interactive,
                     template_renderer=template_renderer, out_=out_)
            continue
        elif not use_pkg_resources and os.path.isdir(full):
            if verbosity:
                out('%sRecursing into %s' % (pad, os.path.basename(full)))
            copy_dir(full, dest_full, vars, verbosity, simulate,
                     indent=indent+1,
                     sub_vars=sub_vars, interactive=interactive,
                     template_renderer=template_renderer, out_=out_)
            continue
        elif use_pkg_resources:
            content = pkg_resources.resource_string(source[0], full)
        else:
            f = open(full, 'rb')
            content = f.read()
            f.close()
        if sub_file:
            try:
                content = substitute_content(
                    content, vars, filename=full,
                    template_renderer=template_renderer
                    )
            except SkipTemplate: 
                continue # pragma: no cover
            if content is None:  
                continue  # pragma: no cover
        already_exists = os.path.exists(dest_full)
        if already_exists:
            f = open(dest_full, 'rb')
            old_content = f.read()
            f.close()
            if old_content == content:
                if verbosity:
                    out('%s%s already exists (same content)' %
                          (pad, dest_full))
                continue # pragma: no cover
            if interactive:
                if not query_interactive(
                    native_(full, fsenc), native_(dest_full, fsenc),
                    native_(content, fsenc), native_(old_content, fsenc),
                    simulate=simulate, out_=out_):
                    continue
            elif not overwrite:
                continue # pragma: no cover 
        if verbosity and use_pkg_resources:
            out('%sCopying %s to %s' % (pad, full, dest_full))
        elif verbosity:
            out(
                '%sCopying %s to %s' % (pad, os.path.basename(full),
                                        dest_full))
        if not simulate:
            f = open(dest_full, 'wb')
            f.write(content)
            f.close()
示例#33
0
#!/usr/bin/env python
# -*- coding: utf-8 -*-

""" Test load qc configurations (utils.load_cfg)
"""

import os.path
import pkg_resources

from cotede.utils import load_cfg, cotederc


CFG = [f[:-5] for f in pkg_resources.resource_listdir('cotede', 'qc_cfg')
        if f[-5:] == '.json']


def test_no_local_duplicate_cfg():
    """ Avoid local cfg of default procedures

        Guarantee that there is no local copy of a default cfg json file,
          otherwise the default cfg could be breaking, and the tests safely
          escaping into a local, non-distributed, cfg.
    """

    for cfg in CFG:
        local_cfg = os.path.join(cotederc(), "cfg", "%s.json" % cfg)
        assert not os.path.exists(local_cfg), \
                "Redundant local cfg file: %s" % cfg


def test_inout():
示例#34
0
def get_res_path(name):
    for i in pkg_resources.resource_listdir(__name__, "../"):
        if i.find(name) >= 0:
            return i
示例#35
0
    def do_startup(self):
        # super-call needs to be in this form?!
        Gtk.Application.do_startup(self)

        # Get the emulator lock and terminate if something already has it
        self.lock = EmulatorLock('sense_emu_gui')
        try:
            self.lock.acquire()
        except:
            dialog = Gtk.MessageDialog(
                message_type=Gtk.MessageType.ERROR,
                title=_('Error'),
                text=_(
                    'Another process is currently acting as the Sense HAT '
                    'emulator'),
                buttons=Gtk.ButtonsType.CLOSE)
            try:
                dialog.run()
            finally:
                dialog.destroy()
                self.quit()
                return

        def make_action(action_id, handler, param_type=None):
            action = Gio.SimpleAction.new(action_id, param_type)
            action.connect('activate', handler)
            self.add_action(action)
        make_action('example', self.on_example, GLib.VariantType.new('s'))
        make_action('play',    self.on_play)
        make_action('prefs',   self.on_prefs)
        make_action('help',    self.on_help)
        make_action('about',   self.on_about)
        make_action('quit',    self.on_quit)

        builder = Gtk.Builder(translation_domain=__project__)
        builder.add_from_string(
            pkg_resources.resource_string(__name__, 'menu.ui').decode('utf-8'))
        self.props.menubar = builder.get_object('app-menu')

        # Construct the open examples sub-menu
        for directory, label in [
                # I18N: Easy examples
                ('basic',        _('Simple')),
                # I18N: Intermediate skill examples
                ('intermediate', _('Intermediate')),
                # I18N: Difficult examples
                ('advanced',     _('Advanced')),
                ]:
            examples = Gio.Menu.new()
            # NOTE: The use of literal "/" below is correct; resource paths
            # are not file-system paths and always use "/"
            for example in sorted(
                    pkg_resources.resource_listdir(__name__, 'examples/%s' % directory)):
                if example.endswith('.py'):
                    examples.append(
                        example.replace('_', '__'),
                        Gio.Action.print_detailed_name(
                            'app.example',
                            GLib.Variant.new_string('%s/%s' % (directory, example))
                            )
                        )
            builder.get_object('example-submenu').append_submenu(label, examples)

        # Construct the settings database and tweak initial value of
        # simulate-imu and simulate-env if we're running on a slow Pi, and the
        # user hasn't explicitly set a value yet
        if pkg_resources.resource_exists(__name__, 'gschemas.compiled'):
            source = Gio.SettingsSchemaSource.new_from_directory(
                os.path.dirname(pkg_resources.resource_filename(__name__, 'gschemas.compiled')),
                Gio.SettingsSchemaSource.get_default(), True)
        else:
            source = Gio.SettingsSchemaSource.get_default()
        schema = Gio.SettingsSchemaSource.lookup(
            source, self.props.application_id, False)
        assert schema is not None
        self.settings = Gio.Settings.new_full(schema, None, None)
        if self.settings.get_user_value('simulate-imu') is None:
            enable_simulators = not slow_pi()
            self.settings.set_boolean('simulate-imu', enable_simulators)
            self.settings.set_boolean('simulate-env', enable_simulators)

        # Construct the emulator servers
        self.imu = IMUServer(simulate_world=self.settings.get_boolean('simulate-imu'))
        self.pressure = PressureServer(simulate_noise=self.settings.get_boolean('simulate-env'))
        self.humidity = HumidityServer(simulate_noise=self.settings.get_boolean('simulate-env'))
        self.screen = ScreenClient()
        self.stick = StickServer()

        # Connect the settings to the components
        self.settings.connect('changed', self.settings_changed)
示例#36
0
def list_species():
    data_files = resource_listdir(__name__, 'data/chromsizes/')
    splist = [sp.split('.')[0] for sp in data_files]
    print(splist)
    return (splist)
示例#37
0
def get_all_data():
    return resource_listdir(__name__, 'data')
示例#38
0
def listTemplates():
    templates = []
    for f in pkg_resources.resource_listdir('jupylecture', 'templates'):
        if re.match('.*ipynb', f):
            templates.append(f)
    return templates
示例#39
0
    `validate`. The template can thus compare it against strings and get
    sensible version ordering.
    """
    def __init__(self, name):
        self._template = _env.get_template(name)
        # Load the mini-schema that just validates the version
        schema = json.loads(self._template.module.validate_version())
        self._version_validator = _make_validator(schema)

    @staticmethod
    def _get_version(doc):
        version = doc["version"]
        return StrictVersion(version) if isinstance(version, str) else version

    def validate(self, doc):
        self._version_validator.validate(doc)
        version = self._get_version(doc)
        schema = json.loads(self._template.module.validate(version=version))
        validator = _make_validator(schema)
        validator.validate(doc)


for name in pkg_resources.resource_listdir(__name__, '.'):
    if name.endswith('.json'):
        reader = codecs.getreader('utf-8')(pkg_resources.resource_stream(
            __name__, name))
        schema = json.load(reader)
        globals()[name[:-5].upper()] = _make_validator(schema)
    elif name.endswith('.json.j2'):
        globals()[name[:-8].upper()] = MultiVersionValidator(name)
示例#40
0
# TODO: this doesn't handle fragment identifiers properly
def load_meta_schema(dictionary):
    """
    Load any meta schema specified in a schema dictionary
    :param dictionary: dict
    :return: dict
    """
    return load_resource_schema(dictionary[u'$ref']) \
        if u'$ref' in dictionary and '.json' in dictionary[u'$ref'] \
        else dictionary


# Parse all of the schema data from the schemas subdirectory and make a dictionary but not its subdirectories

schemas = dictionary_map(load_meta_schema, {os.path.splitext(schema)[0]: load_resource_schema(schema)
                                            for schema in pkg_resources.resource_listdir(__name__, schema_dir)
                                            if ".json" in schema})

class SchemaBasedRESTEndpoint(object):
    exposed = True

    def __init__(self, schema):
        # TODO: make a schema for validating schema
        assert 'id' in schema['properties'], "Schema does not specify an 'id' property:\n\n{}".format(
            json.dumps(schema, sort_keys=True, indent=4, separators=(',', ': ')))
        assert len(schema['properties']['type']['enum']) is 1, \
            "Schema must specify a unique 'type' property:\n\n{}".format(
                json.dumps(schema, sort_keys=True, indent=4, separators=(',', ': ')))
        self.schema = schema
        self.name = schema['properties']['type']['enum'][0]
示例#41
0
 def listdir(self, resource_name):
     path = self.get_path(resource_name)
     if pkg_resources.resource_exists(self.pkg_name, path):
         return pkg_resources.resource_listdir(self.pkg_name, path)
示例#42
0
def resource_ls(path):
    """Access resource directory via setuptools"""
    return resource_listdir(MODULE, path)
示例#43
0
文件: Build.py 项目: wyang17/SQuIRE
def main(**kwargs):

    ######## ARGUMENTS ###########
    #check if already args is provided, i.e. main() is called from the top level script
    args = kwargs.get(
        'args',
        None)  # if no arguments, the below parser statements will be printed
    if args is None:  ## i.e. standalone script called from command line in normal way
        parser = argparse.ArgumentParser(
            description="Installs required software")
        parser._optionals.title = "Arguments"
        parser.add_argument(
            "-b",
            "--build_folder",
            help=
            "Destination folder for downloaded UCSC file(s) (optional; default='squire_build')",
            type=str,
            default="squire_build",
            metavar="<folder>")
        parser.add_argument(
            "-s",
            "--software",
            help=
            "Install required SQuIRE software and add to PATH - specify 'all' or provide comma-separated list (no spaces) of: STAR,bedtools,samtools,stringtie (optional; default = False)",
            type=str,
            metavar="<software>",
            default=False)
        parser.add_argument(
            "-v",
            "--verbosity",
            help=
            "Want messages and runtime printed to stderr (optional; default=False)",
            action="store_true",
            default=False)
        args, extra_args = parser.parse_known_args()

    ###### I/O ############
    outfolder = args.build_folder
    software = args.software
    verbosity = args.verbosity

    ######### START TIMING SCRIPT ############
    if verbosity:
        startTime = datetime.now()
        print("start time is:" + str(startTime) + '\n',
              file=sys.stderr)  # Prints start time
        print(os.path.basename(__file__) + '\n',
              file=sys.stderr)  #prints script name to std err
        print("Script Arguments" + '\n' + "=================",
              file=sys.stderr)  #
        args_dict = vars(args)
        for option, arg in args_dict.iteritems():
            print(str(option) + "=" + str(arg),
                  file=sys.stderr)  #prints all arguments to std err
        print("\n", file=sys.stderr)

    ######## CHECK IF FOLDER DOESN'T EXIST, OTHERWISE CREATE #########

    make_dir(outfolder)

    if software:
        avail_software = ["STAR", "samtools", "bedtools", "stringtie"]
        if software == "all":
            software = avail_software
        else:
            software = software.split(",")

        for i in software:
            if i not in avail_software:
                warnings.warn(i + " not in available software")

        software_list = [
            f for f in pkg_resources.resource_listdir('software', '')
            if any(i in f for i in software)
        ]

        software_folder = pkg_resources.resource_filename('software', '')

        homepath = os.path.expanduser('~')
        bashrcfile = homepath + "/" + ".bashrc"
        bashprofile_file = homepath + "/" + ".bash_profile"
        outfolder = os.path.abspath(outfolder)
        if verbosity:
            print("Looking for software in" + str(software_folder) + "..." +
                  "\n",
                  file=sys.stderr)
        bashrc = open(bashrcfile, 'a')
        bashprofile = open(bashprofile_file, 'a')
        for package in software_list:
            package_path = software_folder + "/" + package
            package_file = get_basename(package).replace(".tar", "")
            new_package_path = outfolder + "/" + package_file
            if verbosity:
                print("Decompressing " + package_file + "..." + "\n",
                      file=sys.stderr)
            with tarfile.TarFile.open(package_path, 'r') as tarredgzippedFile:
                tarredgzippedFile.extractall(new_package_path)
            if verbosity:
                print("Adding permissions for " + package_file +
                      "and adding to PATH..." + "\n",
                      file=sys.stderr)
            if "bedtools" in package:
                make_folder = new_package_path + "/" "bedtools2"
                make_command_list = ["make"]
                make_command = "".join(make_command_list)
                sp.check_call(["/bin/sh", "-c", make_command], cwd=make_folder)
                new_package_path = make_folder + "/" + "bin"
            elif "samtools" in package:
                make_folder = new_package_path + "/" "samtools-1.1"
                make_command_list = ["make"]
                make_command = "".join(make_command_list)
                sp.check_call(["/bin/sh", "-c", make_command], cwd=make_folder)
                new_package_path = make_folder
            elif "STAR" in package:
                new_package_path = new_package_path + "/STAR-2.5.3a/bin/Linux_x86_64"
            elif "stringtie" in package:
                new_package_path = new_package_path + "/stringtie-1.3.3b.Linux_x86_64"
            newline = """export PATH='""" + new_package_path + """':$PATH"""
            bashrc.writelines(newline + "\n")
            bashprofile.writelines(newline + "\n")

        bashrc.close()
        bashprofile.close()

    ####### STOP TIMING SCRIPT #######################
    if verbosity:
        endTime = datetime.now()
        print('end time is: ' + str(endTime) + "\n",
              file=sys.stderr)  # print end time
        print('it took: ' + str(endTime - startTime) + "\n",
              file=sys.stderr)  # print total time
def list_schemes(package):
    """Return a list of example workflows.
    """
    resources = pkg_resources.resource_listdir(package.__name__, ".")
    resources = list(filter(is_ows, resources))
    return sorted(resources)
示例#45
0
def aph_test_ann_files():
    ann_dir = pkg_resources.resource_filename('citation_extractor',
                                              'data/aph_corpus/testset/ann')
    ann_files = pkg_resources.resource_listdir('citation_extractor',
                                               'data/aph_corpus/testset/ann')
    return ann_dir, ann_files
示例#46
0
from pkg_resources import (resource_stream, resource_listdir)
from io import StringIO
import argparse
import datetime
import re
import os
import subprocess
import sys
import getpass


LICENSES = []
for file in sorted(resource_listdir(__name__, '.')):
    match = re.match(r'template-([a-z0-9_]+).txt', file)
    if match:
        LICENSES.append(match.groups()[0])

DEFAULT_LICENSE = "bsd3"


# To extend language formatting sopport with a new language, add an item in
# LANGS dict:
# "language_suffix":"comment_name"
# where "language_suffix" is the suffix of your language and "comment_name" is
# one of the comment types supported and listed in LANG_CMT:
# text : no comment
# c    : /* * */
# unix : #
# lua  : --- --

# if you want add a new comment type just add an item to LANG_CMT:
示例#47
0
 def css_files(cls):
     return cls._uniq(
         sorted(
             filter(lambda x: '.css' in x,
                    pkg_resources.resource_listdir(cls.css_package(),
                                                   '.'))))
示例#48
0
class CyAPI(DetectionsMixin, DevicesMixin, DeviceCommandsMixin,
            ExceptionsMixin, FocusViewMixin, GlobalListMixin,
            InstaQueriesMixin, MemoryProtectionMixin, OpticsPoliciesMixin,
            PackagesMixin, PoliciesMixin, RulesMixin, RulesetMixin,
            ThreatsMixin, UsersMixin, ZonesMixin, MTCHealthCheckMixin,
            MTCPolicyTemplatesMixin, MTCReportsMixin, MTCTenantsMixin,
            MTCUsersMixin):
    """The main class that should be used. Each of the Mixins above provides the
       functionality for that specific API. Example: DetectionsMixin implements
       all relevant functions to getting / working with detections.

       Example Usage:
         API = CyAPI(tid="your_tid", app_id="your_app_id", app_secret="your_secret")
         API.create_conn()
       At this point you're ready to begin interacting with the API.
    """
    regions = {
        'NA': {
            'fullname': 'North America',
            'url': ''
        },
        'US': {
            'fullname': 'United States',
            'mtc_url': 'us'
        },
        'APN': {
            'fullname': 'Asia Pacific-North',
            'url': '-apne1'
        },
        'JP': {
            'fullname': 'Asia Pacific NE/Japan',
            'mtc_url': 'jp'
        },
        'APS': {
            'fullname': 'Asia Pacific-South',
            'url': '-au'
        },
        'AU': {
            'fullname': 'Asia Pacific SE/Australia',
            'mtc_url': 'au'
        },
        'EU': {
            'fullname': 'Europe',
            'url': '-euc1',
            'mtc_url': 'eu'
        },
        'GOV': {
            'fullname': 'US-Government',
            'url': '-us'
        },
        'SA': {
            'fullname': 'South America',
            'url': '-sae1'
        },
        'SP': {
            'fullname': 'South America/Sao Paulo',
            'mtc_url': 'sp'
        }
    }

    valid_detection_statuses = [
        "New", "In Progress", "Follow Up", "Reviewed", "Done", "False Positive"
    ]
    valid_artifact_types = [
        "Protect", "Process", "File", "NetworkConnection", "RegistryKey"
    ]

    root_path = os.path.dirname(os.path.abspath(__file__))

    exclusions = pkg_resources.resource_listdir(__name__, "exclusions")

    exc_choices = list(
        map(lambda x: os.path.basename(x).replace('.json', ''), exclusions))

    WORKERS = 20

    def __init__(self,
                 tid=None,
                 app_id=None,
                 app_secret=None,
                 region="NA",
                 mtc=False,
                 tenant_app=False,
                 tenant_jwt=None):
        self.tid_val = tid
        self.app_id = app_id
        self.app_secret = app_secret
        self.jwt = None
        self.region = region
        self.mtc = mtc
        self.tenant_app = tenant_app
        self.tenant_jwt = tenant_jwt

        if self.mtc:
            self.baseURL = "https://api-admin.cylance.com/public/{}/".format(
                self.regions[region]['mtc_url'])
        else:
            self.baseURL = "https://protectapi{}.cylance.com/".format(
                self.regions[region]['url'])
        self.debug_level = debug_level
        self.s = None
        self.req_cnt = 0

    def create_conn(self):
        """
        Setup and authenticate the connection to the API
        """

        self.s = self._setup_session(session=self.s)

        if self.mtc:
            self.auth = self._get_auth_token()
            self.headers = {
                "Content-Type": "application/json; charset=utf-8",
                "Accept": "*/*",
                'Accept-Encoding': "gzip,deflate,br",
                'Authorization': "Bearer {}".format(self.auth)
            }
        else:
            if self.tenant_app:
                self.jwt = self.tenant_jwt
            else:
                self.jwt = self._get_jwt()
            self.headers = {
                'Accept': "application/json",
                'Accept-Encoding': "gzip,compress",
                'Authorization': "Bearer {}".format(self.jwt),
                'Cache-Control': "no-cache"
            }

        # # 30 minutes from now
        timeout = 1800
        now = datetime.utcnow()
        timeout_datetime = now + timedelta(seconds=timeout)
        self.access_token_expiration = timeout_datetime - timedelta(seconds=30)

        self.s.headers.update(self.headers)

    def _setup_session(self,
                       retries=250,
                       backoff_factor=0.8,
                       backoff_max=180,
                       status_forcelist=(500, 502, 503, 504),
                       session=None):
        """Creates a session with a Retry handler. This will automatically retry
           up to 250 times... which might be overkill
        """

        session = session or requests.Session()
        retry = Retry(
            total=retries,
            read=retries,
            connect=retries,
            backoff_factor=backoff_factor,
            status_forcelist=status_forcelist,
            respect_retry_after_header=True,
        )
        adapter = HTTPAdapter(max_retries=retry)
        session.mount(self.baseURL, adapter)
        return session

    def _get_jwt(self):
        '''Create a JWT that expires in 30min'''
        # 30 minutes from now
        timeout = 1800
        now = datetime.utcnow()
        timeout_datetime = now + timedelta(seconds=timeout)
        epoch_time = int((now - datetime(1970, 1, 1)).total_seconds())
        epoch_timeout = int(
            (timeout_datetime - datetime(1970, 1, 1)).total_seconds())
        jti_val = str(uuid.uuid4())

        AUTH_URL = self.baseURL + "auth/v2/token"
        claims = {
            "exp": epoch_timeout,
            "iat": epoch_time,
            "iss": "http://cylance.com",
            "sub": self.app_id,
            "tid": self.tid_val,
            "jti": jti_val
        }

        try:
            # This is left for compatibility with PyJWT prior to 2.0.0
            encoded = jwt.encode(claims, self.app_secret,
                                 algorithm='HS256').decode('utf-8')
        except:
            encoded = jwt.encode(claims, self.app_secret, algorithm='HS256')

        print(type(encoded))
        print(encoded)
        if debug_level > 2:
            print("auth_token:\n" + encoded + "\n")
        payload = {"auth_token": encoded}
        headers = {"Content-Type": "application/json; charset=utf-8"}
        resp = requests.post(AUTH_URL,
                             headers=headers,
                             data=json.dumps(payload))

        # Can't do anything without a successful authentication
        try:
            assert resp.status_code == 200
        except AssertionError:
            error_message = []
            try:
                errors = resp.json()
            except json.decoder.JSONDecodeError:
                errors = None
            error_message.append("Failed request for JWT Token.")
            error_message.append("  Response Status Code: {}".format(
                resp.status_code))
            if not errors == None:
                error_message.append("  Error(s):")
                for k in errors:
                    error_message.append("    {}: {}".format(k, errors[k]))
            raise RuntimeError('\n'.join(error_message))

        data = resp.json()
        token = data.get('access_token', None)
        if debug_level > 1:
            print("http_status_code: {}".format(resp.status_code))
            print("access_token:\n" + token + "\n")

        return token

    def _get_auth_token(self):
        """Get auth token for MTC"""
        AUTH_URL = self.baseURL + "auth"
        claims = {"scope": "api", "grant_type": "client_credentials"}
        payload = claims

        headers = {"Content-Type": "application/json; charset=utf-8"}
        resp = requests.post(AUTH_URL,
                             data=payload,
                             auth=(self.app_id, self.app_secret))

        # Can't do anything without a successful authentication
        try:
            assert resp.status_code == 200
        except AssertionError:
            error_message = []
            try:
                errors = resp.json()
            except json.decoder.JSONDecodeError:
                errors = None
            error_message.append("  Failed request for MTC Auth Token")
            error_message.append("  Response Status Code: {}".format(
                resp.status_code))
            if not errors == None:
                error_message.append("  Error(s):")
                for k in errors:
                    error_message.append("    {}: {}".format(k, errors[k]))
            raise RuntimeError('\n'.join(error_message))

        data = resp.json()
        token = data.get('access_token', None)
        if debug_level > 1:
            print("http_status_code: {}".format(resp.status_code))
            print("access_token:\n" + token + "\n")

        return token

    def _make_request(self, method, url, data=None):
        """Request Handler which also checks for token expiration"""
        self.req_cnt += 1

        if datetime.utcnow(
        ) > self.access_token_expiration or self.req_cnt >= 9500:
            self.req_cnt = 0
            # Refresh the token if needed
            self.create_conn()
        if method == "get":
            resp = self.s.get(url)
            # loop if rate limited
            # TODO: Improve method when headers are uniformally supported
            while resp.status_code == 429:
                time.sleep(1)
                resp = self.s.get(url)
            return ApiResponse(resp)
        elif method == "post":
            if data:
                return ApiResponse(self.s.post(url, json=data))
            return ApiResponse(self.s.post(url))
        elif method == "delete":
            if data:
                return ApiResponse(self.s.delete(url, json=data))
            return ApiResponse(self.s.delete(url))
        elif method == "put":
            if data:
                return ApiResponse(self.s.put(url, json=data))
            return ApiResponse(self.s.put(url))

        raise ValueError("Invalid Method: {}".format(method))

    def _validate_parameters(self, param, valid_params):

        if param not in valid_params:
            raise ValueError("{} not valid. Valid values are: {}".format(
                param, valid_params))
        return True

    def _is_valid_detection_status(self, status):

        if status and status not in self.valid_detection_statuses:
            raise ValueError("Status not valid. Valid values: {}".format(
                self.valid_detection_statuses))
        return True

    def _is_valid_artifact_type(self, artifact_type):

        if artifact_type and artifact_type not in self.valid_artifact_types:
            raise ValueError(
                "Artifact Type not valid. Valid values: {}".format(
                    self.valid_artifact_types))
        return True

    def _convert_id(self, pid):
        # Convert ID to Uppercase & no '-'s
        return pid.replace('-', '').upper()

    def _add_url_params(self, url, params):
        """ Add GET params to provided URL being aware of existing.

        :param url: string of target URL
        :param params: dict containing requested params to be added
        :return: string with updated URL

        >> url = 'http://stackoverflow.com/test?answers=true'
        >> new_params = {'answers': False, 'data': ['some','values']}
        >> add_url_params(url, new_params)
        'http://stackoverflow.com/test?data=some&data=values&answers=false'
        """
        # Unquoting URL first so we don't loose existing args
        url = unquote(url)
        # Extracting url info
        parsed_url = urlparse(url)
        # Extracting URL arguments from parsed URL
        get_args = parsed_url.query
        # Converting URL arguments to dict
        parsed_get_args = dict(parse_qsl(get_args))
        # Merging URL arguments dict with new params
        parsed_get_args.update(params)

        parsed_get_args = {
            k: v
            for k, v in parsed_get_args.items() if v is not None
        }
        # Bool and Dict values should be converted to json-friendly values
        # you may throw this part away if you don't like it :)
        parsed_get_args.update({
            k: json.dumps(v)
            for k, v in parsed_get_args.items() if isinstance(v, (bool, dict))
        })

        # Converting URL argument to proper query string
        encoded_get_args = urlencode(parsed_get_args, doseq=True)
        # Creating new parsed result object based on provided with new
        # URL arguments. Same thing happens inside of urlparse.
        new_url = ParseResult(parsed_url.scheme, parsed_url.netloc,
                              parsed_url.path, parsed_url.params,
                              encoded_get_args, parsed_url.fragment).geturl()

        return new_url

    # TODO: Remove this method
    def create_item(self, ptype, item):
        '''Type options: zones, rulesets, policies'''
        baseURL = self.baseURL + "{}/v2".format(ptype)

        if debug_level > 1:
            if debug_level > 2:
                pprint(item)
            print("Create Item URL: " + baseURL)

        return self._make_request("post", baseURL, data=item)

    # Method to get a page of items and return the response object
    def _get_list_page(self,
                       page_type,
                       page=1,
                       page_size=200,
                       detail="",
                       params={}):
        q_params = {"page": page, "page_size": page_size}

        if params:
            q_params.update(params)

        baseURL = self.baseURL + "{}/v2{}".format(page_type, detail)
        baseURL = self._add_url_params(baseURL, q_params)

        return self._make_request("get", baseURL)

    def _generate_urls(self,
                       page_type,
                       page=1,
                       page_size=200,
                       detail="",
                       params={},
                       total_pages=0):
        start_page = page
        q_params = {"page": start_page, "page_size": page_size}

        if params:
            q_params.update(params)

        if self.mtc:
            baseURL = self.baseURL + "{}/{}".format(page_type, detail)
        else:
            baseURL = self.baseURL + "{}/v2{}".format(page_type, detail)
            baseURL = self._add_url_params(baseURL, q_params)

        response = self._make_request("get", baseURL)
        try:
            assert response.status_code == 200
        except AssertionError:
            error_message = []
            error_message.append(
                "Failed initial request for {}.".format(page_type))
            error_message.append("  get URL:\n    {}".format(baseURL))
            error_message.append("  Response Status Code: {}".format(
                response.status_code))
            if response.errors:
                error_message.append("Error(s)")
                for k in response.errors:
                    error_message.append("  {}: {}".format(
                        k, response.errors.get(k)))
            raise RuntimeError('\n'.join(error_message))
        data = response.data

        page_size = data['page_size']
        if total_pages == 0 or total_pages > data['total_pages']:
            total_pages = data['total_pages']

        all_urls = [
            baseURL,
        ]
        for page in range(start_page + 1, total_pages + 1):
            updated_param = {"page": page}
            baseURL = self._add_url_params(baseURL, updated_param)
            all_urls.append(baseURL)
        # there seems to be a bug which leads to increased 50x errors the higher the page count is (caching?)
        # we want to shuffle the urls so they're not sequentially downloaded anymore and hit the 50x early
        # this way, we can retry again and again, until scalar returns our content
        shuffle(all_urls)
        return all_urls

    def _bulk_get(self, urls, disable_progress=True, paginated=True):

        tqdmargs = {
            'total': len(urls),
            'unit': 'Page',
            'leave': False,
            'desc': 'Download Progress',
            'disable': disable_progress
        }

        collector = []
        with cf.ThreadPoolExecutor(max_workers=self.WORKERS) as executor:
            res = {
                executor.submit(self._make_request, "get", url): url
                for url in urls
            }
            for future in tqdm(cf.as_completed(res), **tqdmargs):
                url = res[future]
                try:
                    response = future.result()

                except Exception as exc:
                    print('[-] {} generated an exception: {}'.format(url, exc))
                    continue
                else:
                    try:
                        if not response.is_success:
                            raise json.decoder.JSONDecodeError
                        data = response.data
                        if paginated:
                            collector.extend(data["page_items"])
                        else:
                            collector.append(data)
                    except json.decoder.JSONDecodeError:
                        print(
                            "Likely got a Server Error here, trying to quit. Please try again later."
                        )
                        executor.shutdown(wait=False)
                        raise json.decoder.JSONDecodeError
                    except KeyError:
                        print("Error: no data returned.")
                        print(
                            "if you see this message, 250 retries per url have been exceeded"
                        )
                        print("get a new token and retry later")
                        print(
                            "this is the url {} and returned data: {}".format(
                                url, data))

        response.data = collector  # This is a hacky way of returning an APIResponse for all of the data

        return response

    # Method to retrieve list of items
    def get_list_items(self,
                       type_name,
                       detail="",
                       limit=200,
                       params={},
                       disable_progress=True,
                       total_pages=0,
                       start_page=1):

        urls = self._generate_urls(type_name,
                                   detail=detail,
                                   page_size=limit,
                                   params=params,
                                   total_pages=total_pages,
                                   page=start_page)

        return self._bulk_get(urls, disable_progress)

    # Method to retrieve an Item
    # This may not be anything as a result of edit error..
    # TODO: Make this return a response instead of a JSON object
    def get_item(self, ptype, item):
        #Type options: rulesets, policies

        if self.mtc:
            baseURL = self.baseURL + "{}/{}".format(ptype, item)
        else:
            baseURL = self.baseURL + "{}/v2/{}".format(ptype, item)

        response = self._make_request("get", baseURL)
        return response
示例#49
0
def readWidgets(directory,
                cachedWidgetDescriptions,
                prototype=False,
                silent=False,
                addOn=None,
                defaultCategory=None,
                module=None):
    import sys
    global hasErrors, splashWindow, widgetsWithError, widgetsWithErrorPrototypes

    widgets = []

    if not defaultCategory:
        predir, defaultCategory = os.path.split(
            directory.strip(os.path.sep).strip(os.path.altsep))
        if defaultCategory == "widgets":
            defaultCategory = os.path.split(
                predir.strip(os.path.sep).strip(os.path.altsep))[1]

    if defaultCategory.lower() == "prototypes" or prototype:
        defaultCategory = "Prototypes"

    if module:
        files = [
            f for f in pkg_resources.resource_listdir(module.__name__, '')
            if f.endswith('.py')
        ]
    else:
        files = glob.iglob(os.path.join(directory, "*.py"))

    for filename in files:
        if module:
            if pkg_resources.resource_isdir(module.__name__, filename):
                continue
        else:
            if os.path.isdir(filename):
                continue

        if module:
            if getattr(module, '__loader__', None):
                datetime = str(
                    os.stat(module.__loader__.archive)[stat.ST_MTIME])
            else:
                datetime = str(
                    os.stat(
                        pkg_resources.resource_filename(
                            module.__name__, filename))[stat.ST_MTIME])
        else:
            datetime = str(os.stat(filename)[stat.ST_MTIME])
        cachedDescription = cachedWidgetDescriptions.get(filename, None)
        if cachedDescription and cachedDescription.time == datetime and hasattr(
                cachedDescription, "inputClasses"):
            widgets.append((cachedDescription.name, cachedDescription))
            continue
        if module:
            data = pkg_resources.resource_string(module.__name__, filename)
        else:
            data = file(filename).read()
        try:
            meta = widgetParser.WidgetMetaData(
                data, defaultCategory, enforceDefaultCategory=prototype)
        except:  # Probably not an Orange widget module.
            continue

        widgetPrototype = meta.prototype == "1" or meta.prototype.lower(
        ).strip() == "true" or prototype
        if widgetPrototype:
            meta.category = "Prototypes"

        dirname, fname = os.path.split(filename)
        widgname = os.path.splitext(fname)[0]
        try:
            if not splashWindow:
                import orngEnviron
                logo = QPixmap(
                    os.path.join(orngEnviron.directoryNames["canvasDir"],
                                 "icons", "splash.png"))
                splashWindow = QSplashScreen(logo, Qt.WindowStaysOnTopHint)
                splashWindow.setMask(logo.mask())
                splashWindow.show()

            splashWindow.showMessage("Registering widget %s" % meta.name,
                                     Qt.AlignHCenter + Qt.AlignBottom)
            qApp.processEvents()

            if module:
                import_name = "%s.%s" % (module.__name__, widgname)
            else:
                import_name = widgname
            wmod = __import__(import_name, fromlist=[""])

            wmodFilename = wmod.__file__
            if os.path.splitext(wmodFilename)[1] != "py":
                # Replace .pyc, .pyo with bare .py extension
                # (used as key in cachedWidgetDescription)
                wmodFilename = os.path.splitext(wmodFilename)[0] + ".py"

            # Evaluate the input/output list (all tuple items are strings)
            inputs = eval(meta.inputList)
            outputs = eval(meta.outputList)

            inputs = [InputSignal(*input) for input in inputs]
            outputs = [OutputSignal(*output) for output in outputs]

            # Resolve signal type names into concrete type instances
            inputs = [
                resolveSignal(input, globals=wmod.__dict__) for input in inputs
            ]
            outputs = [
                resolveSignal(output, globals=wmod.__dict__)
                for output in outputs
            ]

            inputClasses = set([s.type.__name__ for s in inputs])
            outputClasses = set(
                [klass.__name__ for s in outputs for klass in s.type.mro()])

            # Convert all signal types back into qualified names.
            # This is to prevent any possible import problems when cached
            # descriptions are unpickled (the relevant code using this lists
            # should be able to handle missing types better).
            for s in inputs + outputs:
                s.type = "%s.%s" % (s.type.__module__, s.type.__name__)

            widgetInfo = WidgetDescription(
                name=meta.name,
                time=datetime,
                fileName=widgname,
                module=module.__name__ if module else None,
                fullName=wmodFilename,
                directory=directory,
                addOn=addOn,
                inputList=meta.inputList,
                outputList=meta.outputList,
                inputClasses=inputClasses,
                outputClasses=outputClasses,
                tags=meta.tags,
                inputs=inputs,
                outputs=outputs,
            )

            for attr in [
                    "contact", "icon", "priority", "description", "category"
            ]:
                setattr(widgetInfo, attr, getattr(meta, attr))

            # build the tooltip
            if len(widgetInfo.inputs) == 0:
                formatedInList = "<b>Inputs:</b><br> &nbsp;&nbsp; None<br>"
            else:
                formatedInList = "<b>Inputs:</b><br>"
                for signal in widgetInfo.inputs:
                    formatedInList += " &nbsp;&nbsp; - " + signal.name + " (" + signal.type + ")<br>"

            if len(widgetInfo.outputs) == 0:
                formatedOutList = "<b>Outputs:</b><br> &nbsp; &nbsp; None<br>"
            else:
                formatedOutList = "<b>Outputs:</b><br>"
                for signal in widgetInfo.outputs:
                    formatedOutList += " &nbsp; &nbsp; - " + signal.name + " (" + signal.type + ")<br>"

            addOnName = "" if not widgetInfo.addOn else " (from add-on %s)" % widgetInfo.addOn

            widgetInfo.tooltipText = "<b><b>&nbsp;%s</b></b>%s<hr><b>Description:</b><br>&nbsp;&nbsp;%s<hr>%s<hr>%s" % (
                meta.name, addOnName, widgetInfo.description,
                formatedInList[:-4], formatedOutList[:-4])
            widgets.append((meta.name, widgetInfo))
        except Exception, msg:
            if not hasErrors and not silent:
                print "There were problems importing the following widgets:"
                hasErrors = True
            if not silent:
                print "   %s: %s" % (widgname, msg)

            if not widgetPrototype:
                widgetsWithError.append(widgname)
            else:
                widgetsWithErrorPrototypes.append(widgname)
示例#50
0
#
# This file is part of Invenio.
# Copyright (C) 2013, 2014, 2015 CERN.
#
# Invenio is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation; either version 2 of the
# License, or (at your option) any later version.
#
# Invenio is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Invenio; if not, write to the Free Software Foundation, Inc.,
# 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA

import pkg_resources

from flask_registry import ModuleAutoDiscoveryRegistry, RegistryProxy
from invenio.utils.datastructures import LazyDict

bulletinext = RegistryProxy('bulletinext', ModuleAutoDiscoveryRegistry,
                            'bulletinext')

journals = LazyDict(lambda: dict(
    (f, pkg_resources.resource_filename(l.__name__, f)) for l in bulletinext
    for f in pkg_resources.resource_listdir(l.__name__, '.')
    if pkg_resources.resource_isdir(l.__name__, f)))
示例#51
0
# -*- coding: utf-8 -*-
"""
This module registers *.cfg files in this as named pipelines for
transmogrifier.

"""

from configparser import RawConfigParser
from venusianconfiguration import configure
from io import StringIO
import os
import pkg_resources


# Register pipelines
for resource in pkg_resources.resource_listdir(__package__, ''):
    name, ext = os.path.splitext(resource)

    if ext == '.cfg':
        # Parse to read title and description
        data = pkg_resources.resource_string(__package__, resource)
        config = RawConfigParser()
        try:
            config.read_file(StringIO(data.decode('utf-8')))
        except AttributeError:
            # noinspection PyDeprecation
            config.readfp(StringIO(data.decode('utf-8')))

        # Register
        configure.transmogrifier.pipeline(
            name=name,
示例#52
0
文件: cli.py 项目: m1kola/molo
def get_template_dirs(package_name):
    return pkg_resources.resource_listdir(package_name, 'templates')
示例#53
0
 def list(self, path: str, long: bool = False) -> Sequence[str]:
     files = pkg_resources.resource_listdir(self.package, self.path(path))
     if long:
         files = [self.join(path, file) for file in files]
     return files
示例#54
0
""" PyMiniRacer main wrappers """
# pylint: disable=bad-whitespace,too-few-public-methods

import sys
import os
import json
import ctypes
import threading
import datetime
import fnmatch

from pkg_resources import resource_listdir, resource_filename

# In python 3 the extension file name depends on the python version
try:
    EXTENSION_NAME = fnmatch.filter(resource_listdir('py_mini_racer', '.'),
                                    '_v8*.so')[0]
    EXTENSION_PATH = resource_filename('py_mini_racer', EXTENSION_NAME)
except NotImplementedError:
    if not hasattr(sys, "_MEIPASS"):
        raise
    __location__ = os.path.join(sys._MEIPASS, "_v8")  # pylint: disable=no-member, protected-access
    EXTENSION_NAME = fnmatch.filter(os.listdir(__location__), '_v8*.so')[0]
    EXTENSION_PATH = os.path.join(__location__, EXTENSION_NAME)


class MiniRacerBaseException(Exception):
    """ base MiniRacer exception class """
    pass

示例#55
0
文件: sam.py 项目: 0xTCG/aldy
def load_sam_profile(
    sam_path: str,
    factor: float = 2.0,
    regions: Dict[Tuple[str, str, int], GRange] = dict(),
    cn_region: Optional[GRange] = None,
) -> Dict[str, Dict[str, List[float]]]:
    """
    Load the profile information from a SAM/BAM file.

    Returns:
        list[str, str, int, float]: list of tuples
        ``(gene_name, chromosome, loci, coverage)``.

    Params:
        factor (float):
            Scaling factor. Default is 2.0 (for two copies).
        regions (list[:obj:`GRange`], optional):
            List of regions to be extracted.

    Notes:
        Profiles that were used in Aldy paper:

            1. PGRNseq-v1/v3: NA17642 was used for all genes
                (n.b. PGXT147 with rescale 2.52444127771 was used for CYP2B6 beta).
            2. PGRNseq-v2: NA19789.bam was used for all genes.
            3. Illumina: by definition contains all ones (uniform coverage profile).
    """

    if len(regions) == 0:
        import pkg_resources

        gene_regions = {}
        for g in sorted(
                pkg_resources.resource_listdir("aldy.resources", "genes")):
            if g[-4:] != ".yml":
                continue
            gg = Gene(script_path(f"aldy.resources.genes/{g}"))
            for gi, gr in enumerate(gg.regions):
                for r, rng in gr.items():
                    gene_regions[gg.name, r, gi] = rng
    else:
        gene_regions = regions
    gene_regions["neutral", "cn", 0] = (cn_region if cn_region else
                                        DEFAULT_CN_NEUTRAL_REGION["hg19"])

    chr_regions: Dict[str, Tuple[int, int]] = {}
    for c, s, e in gene_regions.values():
        if c not in chr_regions:
            chr_regions[c] = (s, e)
        else:
            chr_regions[c] = (min(s, chr_regions[c][0]),
                              max(e, chr_regions[c][1]))

    cov: dict = defaultdict(lambda: defaultdict(int))
    for c, (s, e) in natsorted(chr_regions.items()):
        if sam_path == "<illumina>":
            continue
        with pysam.AlignmentFile(sam_path) as sam:
            region = GRange(c, s, e).samtools(
                pad_left=1000,
                pad_right=1000,
                prefix=_chr_prefix(c, [x["SN"] for x in sam.header["SQ"]]),
            )
            log.info("Scanning {}...", region)
            try:
                for read in sam.fetch(region=region):
                    start, s_start = read.reference_start, 0
                    if not read.cigartuples:
                        continue

                    for op, size in read.cigartuples:
                        if op == 2:
                            for i in range(size):
                                cov[c][start + i] += 1
                            start += size
                        elif op == 1:
                            s_start += size
                        elif op == 4:
                            s_start += size
                        elif op in [0, 7, 8]:
                            for i in range(size):
                                cov[c][start + i] += 1
                            start += size
                            s_start += size
            except ValueError:
                log.warn("Cannot fetch {}", region)

    d: Any = {}
    for (g, r, ri), (c, s, e) in gene_regions.items():
        if g not in d:
            d[g] = {}
        if r not in d[g]:
            d[g][r] = [0]
        if ri >= len(d[g][r]):
            d[g][r].append(0)
        if sam_path == "<illumina>":
            d[g][r][ri] = sum(1.0 for i in range(s, e))
        else:
            d[g][r][ri] = sum(cov[c][i] * (factor / 2.0) for i in range(s, e))
    return d
示例#56
0
def test_anaconda_failure_samples():
    for filename in pkg_resources.resource_listdir('bkr.inttest.labcontroller',
                                                   'install-failure-logs'):
        yield check_anaconda_failure_sample, filename
示例#57
0
def load_testDigits_list():
    return pkg_resources.resource_listdir(__name__, 'data/testDigits')
示例#58
0
def _all_kernels():
    kernels = pkg_resources.resource_listdir('beakerx', 'kernel')
    return [
        kernel for kernel in kernels
        if (kernel != 'base' and kernel != 'sparkex')
    ]
示例#59
0
    def __init__(self, config_path=None):
        """
        Constructor. Tries to find the main settings file and load it.

        :param config_path: Path to a config file. Useful for unittesting.
        """
        self.config = configparser.ConfigParser()
        self.config_paths = []
        self.all_config_paths = []
        _source_tree_root = os.path.dirname(os.path.dirname(os.path.dirname(
            __file__)))
        # In case "examples" file exists in root, we are running from tree
        self.intree = bool(os.path.exists(os.path.join(_source_tree_root,
                                                       'examples')))
        if config_path is None:
            if 'VIRTUAL_ENV' in os.environ:
                cfg_dir = os.path.join(os.environ['VIRTUAL_ENV'], 'etc')
                user_dir = os.environ['VIRTUAL_ENV']
            else:
                cfg_dir = '/etc'
                user_dir = os.path.expanduser("~")

            _config_dir_system = os.path.join(cfg_dir, 'avocado')
            _config_dir_system_extra = os.path.join(cfg_dir, 'avocado', 'conf.d')
            _config_dir_local = os.path.join(user_dir, '.config', 'avocado')

            config_filename = 'avocado.conf'
            config_path_system = os.path.join(_config_dir_system, config_filename)
            config_path_local = os.path.join(_config_dir_local, config_filename)

            config_pkg_base = os.path.join('etc', 'avocado', config_filename)
            config_path_pkg = resource_filename('avocado', config_pkg_base)
            _config_pkg_extra = os.path.join('etc', 'avocado', 'conf.d')
            if resource_isdir('avocado', _config_pkg_extra):
                config_pkg_extra = resource_listdir('avocado',
                                                    _config_pkg_extra)
                _config_pkg_extra = resource_filename('avocado', _config_pkg_extra)
            else:
                config_pkg_extra = []
            # First try pkg/in-tree config
            self.all_config_paths.append(config_path_pkg)
            for extra_file in (os.path.join(_config_pkg_extra, _)
                               for _ in config_pkg_extra
                               if _.endswith('.conf')):
                self.all_config_paths.append(extra_file)
            # Override with system config
            self.all_config_paths.append(config_path_system)
            for extra_file in glob.glob(os.path.join(_config_dir_system_extra,
                                                     '*.conf')):
                self.all_config_paths.append(extra_file)
            # And the local config
            if not os.path.exists(config_path_local):
                try:
                    path.init_dir(_config_dir_local)
                    with open(config_path_local, 'w') as config_local_fileobj:
                        content = ("# You can use this file to override "
                                   "configuration values from '%s and %s\n"
                                   % (config_path_system,
                                      _config_dir_system_extra))
                        config_local_fileobj.write(content)
                except IOError:     # Some users can't write it (docker)
                    pass
            # Allow plugins to modify/extend the list of configs
            dispatcher = SettingsDispatcher()
            if dispatcher.extensions:
                dispatcher.map_method('adjust_settings_paths',
                                      self.all_config_paths)
            # Register user config as last to always take precedence
            self.all_config_paths.append(config_path_local)
        else:
            # Only used by unittests (the --config parses the file later)
            self.all_config_paths.append(config_path)
        self.config_paths = self.config.read(self.all_config_paths)
        if not self.config_paths:
            raise ConfigFileNotFound(self.all_config_paths)
示例#60
0
class VideoModule(VideoFields, XModule):
    """Video Xmodule."""
    video_time = 0
    icon_class = 'video'

    js = {
        'coffee': [
            resource_string(__name__, 'js/src/time.coffee'),
            resource_string(__name__, 'js/src/video/display.coffee')
        ] + [
            resource_string(__name__, 'js/src/video/display/' + filename)
            for filename in sorted(
                resource_listdir(__name__, 'js/src/video/display'))
            if filename.endswith('.coffee')
        ]
    }
    css = {'scss': [resource_string(__name__, 'css/video/display.scss')]}
    js_module_name = "Video"

    def __init__(self, *args, **kwargs):
        XModule.__init__(self, *args, **kwargs)

        xmltree = etree.fromstring(self.data)
        self.youtube = xmltree.get('youtube')
        self.show_captions = xmltree.get('show_captions', 'true')
        self.source = self._get_source(xmltree)
        self.track = self._get_track(xmltree)
        self.start_time, self.end_time = self.get_timeframe(xmltree)

    def _get_source(self, xmltree):
        """Find the first valid source."""
        return self._get_first_external(xmltree, 'source')

    def _get_track(self, xmltree):
        """Find the first valid track."""
        return self._get_first_external(xmltree, 'track')

    def _get_first_external(self, xmltree, tag):
        """
        Will return the first valid element
        of the given tag.
        'valid' means has a non-empty 'src' attribute
        """
        result = None
        for element in xmltree.findall(tag):
            src = element.get('src')
            if src:
                result = src
                break
        return result

    def get_timeframe(self, xmltree):
        """ Converts 'from' and 'to' parameters in video tag to seconds.
        If there are no parameters, returns empty string. """
        def parse_time(str_time):
            """Converts s in '12:34:45' format to seconds. If s is
            None, returns empty string"""
            if str_time is None:
                return ''
            else:
                obj_time = time.strptime(str_time, '%H:%M:%S')
                return datetime.timedelta(
                    hours=obj_time.tm_hour,
                    minutes=obj_time.tm_min,
                    seconds=obj_time.tm_sec).total_seconds()

        return parse_time(xmltree.get('from')), parse_time(xmltree.get('to'))

    def handle_ajax(self, dispatch, get):
        """This is not being called right now and we raise 404 error."""
        log.debug(u"GET {0}".format(get))
        log.debug(u"DISPATCH {0}".format(dispatch))
        raise Http404()

    def get_instance_state(self):
        """Return information about state (position)."""
        return json.dumps({'position': self.position})

    def video_list(self):
        """Return video list."""
        return self.youtube

    def get_html(self):
        # We normally let JS parse this, but in the case that we need a hacked
        # out <object> player because YouTube has broken their <iframe> API for
        # the third time in a year, we need to extract it server side.
        normal_speed_video_id = None  # The 1.0 speed video

        # video_list() example:
        #   "0.75:nugHYNiD3fI,1.0:7m8pab1MfYY,1.25:3CxdPGXShq8,1.50:F-D7bOFCnXA"
        for video_id_str in self.video_list().split(","):
            if video_id_str.startswith("1.0:"):
                normal_speed_video_id = video_id_str.split(":")[1]

        return self.system.render_template(
            'video.html', {
                'streams': self.video_list(),
                'id': self.location.html_id(),
                'position': self.position,
                'source': self.source,
                'track': self.track,
                'display_name': self.display_name_with_default,
                'caption_asset_path': "/static/subs/",
                'show_captions': self.show_captions,
                'start': self.start_time,
                'end': self.end_time,
                'normal_speed_video_id': normal_speed_video_id
            })