コード例 #1
0
ファイル: test_main.py プロジェクト: PyAr/fades
    def test_child_program(self):
        child_program = 'tests/test_files/req_module.py'

        d = main.consolidate_dependencies(needs_ipython=False, child_program=child_program,
                                          requirement_files=None, manual_dependencies=None)

        self.assertDictEqual(d, {'pypi': {Requirement.parse('foo'), Requirement.parse('bar')}})
コード例 #2
0
ファイル: base.py プロジェクト: bloomonkey/archiveshub
def listContributors(session):
    """Return a list of Contributor Identifier, Contributor Name tuples.

    Pickle file caching equivalent of::

        list(iterContributors(session))

    """
    index_filename = resource_filename(
        Requirement.parse('archiveshub'),
        'dbs/ead/indexes/idx-vdbid/idx-vdbid.index_TERMIDS'
    )
    pickle_filename = resource_filename(
        Requirement.parse('archiveshub'),
        'www/ead/html/searchContributors.pickle'
    )
    if (
        os.path.exists(pickle_filename) and
        os.stat(index_filename).st_mtime < os.stat(pickle_filename).st_mtime
    ):
        with open(pickle_filename, 'rb') as pfh:
            try:
                return pickle.load(pfh)
            except:
                pass
    # Generate and cache the list
    contributorCache = list(iterContributors(session))
    with open(pickle_filename, 'wb') as pfh:
        pickle.dump(contributorCache, pfh)
    return contributorCache
コード例 #3
0
ファイル: build.py プロジェクト: ccstolley/snakegit
def find_required(venv, file_):
    pkgdir = os.path.join(os.path.abspath(venv), "lib/python2.7/site-packages")
    working_set = WorkingSet([pkgdir])
    #We need a version of nose & pylint, preferably our version, but if someone
    # insists on adding it to requirements.txt, we should accomodate them.
    nose_fulfilled = False
    pylint_fulfilled = False
    with open(file_, 'r') as fp:
        required = [Requirement.parse(req) for req in fp \
                    if not req.startswith("#")]
        requested = []
        for requirement in required:
            if requirement.project_name == 'nose':
                nose_fulfilled = True
            if requirement.project_name == 'pylint':
                pylint_fulfilled = True
            if not version_in_working_set(requirement, working_set):
                requested.append(requirement)

    if not nose_fulfilled:
        requirement = Requirement.parse('nose==1.2.1')
        if not version_in_working_set(requirement, working_set):
            requested.append(requirement)
    if not pylint_fulfilled:
        requirement = Requirement.parse('pylint==0.26.0')
        if not version_in_working_set(requirement, working_set):
            requested.append(requirement)
    return requested
コード例 #4
0
ファイル: version.py プロジェクト: zenotech/MyCluster
def read_release_version():
    try:
        filename = os.path.join(sys.prefix,
                                "share", "MyCluster", "RELEASE-VERSION")
        if os.path.isfile(filename):
            f = open(filename, 'r')
        else:
            filename = os.path.join(sys.prefix,
                                    "local", "share", "MyCluster",
                                    "RELEASE-VERSION")
            if os.path.isfile(filename):
                f = open(filename, 'r')
            else:
                filename = resource_filename(Requirement.parse("MyCluster"),
                                             "share/MyCluster/RELEASE-VERSION")
                if os.path.isfile(filename):
                    f = open(filename, 'r')
                else:
                    filename = resource_filename(
                        Requirement.parse("MyCluster"),
                        "RELEASE-VERSION")
                    f = open(filename, 'r')
        try:
            version = f.readlines()[0]
            return version.strip()

        finally:
            f.close()

    except:
        return None
コード例 #5
0
ファイル: executor.py プロジェクト: rhyolight/liquipy
 def __init__(self, host, database, username, password):
   self.host = host
   self.database = database
   self.username = username
   self.password = password
   self.liquibaseJar = resource_filename(Requirement.parse('liquipy'), 'externals/liquibase.jar')
   self.mysqlJar = resource_filename(Requirement.parse('liquipy'), 'externals/mysql-connector-java-5.1.17-bin.jar')
コード例 #6
0
ファイル: pex_builder.py プロジェクト: BabyDuncan/commons
 def get_all_valid_reqs(cls, requirements, requirements_txt):
   from collections import namedtuple
   from pkg_resources import Requirement
   import re
   numbered_item = namedtuple("numbered_item", ["position", "data"])
   numbered_list = lambda dataset: [numbered_item(*ni) for ni in enumerate(dataset)]
   named_dataset = namedtuple("named_dataset", ["name", "dataset"])
   inputs = [
     named_dataset(name="command line", dataset=numbered_list(requirements)),
   ]
   if requirements_txt is not None:
     file_lines = re.split("[\n\r]", open(requirements_txt).read())
     inputs.append(named_dataset(
       name="file: {0}".format(requirements_txt), dataset=numbered_list(file_lines)
     ))
   valid_reqs = []
   whitespace = re.compile("^\s*$")
   for name, dataset in inputs:
     for position, req in dataset:
       try:
         Requirement.parse(req)
         valid_reqs.append(req)
       except ValueError:
         if whitespace.match(req) is None: # Don't warn if empty string or whitespace
           cls.logger.warn("Invalid requirement \"{0}\" at " \
                       "position {1} from {2}\n".format(req, position + 1, name))
   return valid_reqs
コード例 #7
0
ファイル: setup.py プロジェクト: vassilux/odin
def copyConfig():
    '''copyConfig()
Copies samples configuration if necessary to /etc/odinsys directory.'''
    from pkg_resources import Requirement, resource_filename
 
    # Get our file.
    filename_odinsys = resource_filename(Requirement.parse("odinsys"),
                                            "config/odinsys.sample.conf")

    filename_odinsys_log = resource_filename(Requirement.parse("odinsys"),
                                            "config/odinsyslogger.sample.conf")
 
    try:
        import shutil
 
        # Create the directory.
        if not os.path.exists("/opt/odinsys"):
            os.mkdir("/opt/odinsys")
 
        # Copy the configuration. Don't clobber existing files.
        if not os.path.exists("/etc/odinsys/odinsys.conf"):
            shutil.copyfile(filename_odinsys, "/etc/odinsys/odinsys.conf")

        if not os.path.exists("/etc/odinsys/odinsyslogger.conf"):
            shutil.copyfile(filename_odinsys_log, "/etc/odinsys/odinsyslogger.conf")
 
    except IOError:
        print "Unable to copy configuration file to /etc/odinsys directory."
コード例 #8
0
ファイル: egg.py プロジェクト: dockeroo/dockeroo
 def populate_source(self, source, dependency=False):
     super(SetupEggSubRecipe, self).populate_source(
         source, load_options=not dependency)
     if 'egg' not in source:
         source['egg'] = self.name
     source['requirement'] = Requirement.parse(source['egg'])
     source['egg'] = str(source['requirement'])
     source['find-requirement'] = Requirement.parse(source['find-egg']) \
         if 'find-egg' in source else source['requirement']
     source['find-egg'] = str(source['find-requirement'])
     source.setdefault('build', True)
     egg_directories = []
     if 'develop-eggs-directory' in self.recipe.buildout['buildout']:
         egg_directories.append(self.recipe.buildout['buildout']['develop-eggs-directory'])
     if 'eggs-directory' in self.recipe.buildout['buildout']:
         egg_directories.append(self.recipe.buildout['buildout']['eggs-directory'])
     source.setdefault('egg-path',
                       [source['location']] if 'location' in source else [] +
                       source.get('extra-paths', []) + egg_directories +
                       buildout_and_setuptools_path)
     source.setdefault('location',
                       self.default_eggs_directory(develop=source.get('develop', False)))
     source['egg-environment'] = Environment(source['egg-path'])
     source['build-options'] = {}
     if not dependency:
         for src_key, dst_key in [(key, re.sub('-', '_', key)) for key in
                                  [option for option in self.options
                                   if option in BUILD_EXT_OPTIONS]]:
             source['build-options'][dst_key] = self.options[src_key]
     source.setdefault('signature', self.resolve_signature(source))
コード例 #9
0
ファイル: test_informer.py プロジェクト: gotcha/compoze
    def test_show_distributions_skips_multi_develop_dists(self):
        import re
        from pkg_resources import Requirement
        logged = []
        target, path, cheeseshop_path = self._makeDirs()
        compoze = Requirement.parse('compoze')
        nose = Requirement.parse('nose')
        cheeseshop = self._makeIndex(compoze, compoze, nose,
                                     target=cheeseshop_path,
                                     develop=('compoze',))
        local = self._makeIndex(compoze, target=target)
        def _factory(index_url, search_path=None):
            if index_url == 'http://pypi.python.org/simple':
                assert search_path is None
                return cheeseshop
            if index_url ==  target:
                assert search_path is ()
                return local
            raise ValueError(index_url)
        informer = self._makeOne('--verbose', 'compoze', 'nose',
                                 logger=logged.append)
        informer.index_factory = _factory
        informer.tmpdir = target

        informer.show_distributions()

        log = '\n'.join(logged)
        skipped = re.compile(r'Skipping.*<compoze')
        found = re.compile(r'nose: /tmp/.*/cheeseshop/nose')
        self.failUnless(skipped.search(log))
        self.failUnless(found.search(log))
コード例 #10
0
ファイル: test_informer.py プロジェクト: gotcha/compoze
    def test_show_distributions_multiple_w_only_best(self):
        import re
        from pkg_resources import Requirement
        logged = []
        target, path, cheeseshop_path = self._makeDirs()
        nose = Requirement.parse('nose')
        nose2 = Requirement.parse('nose')
        cheeseshop = self._makeIndex(nose, nose2,
                                     target=cheeseshop_path,
                                    )
        def _factory(index_url, search_path=None):
            assert index_url == 'http://pypi.python.org/simple'
            assert search_path is None
            return cheeseshop
        informer = self._makeOne('--verbose', '--show-only-best',
                                 'nose',
                                 logger=logged.append)
        informer.index_factory = _factory
        informer.tmpdir = target

        informer.show_distributions()

        log = '\n'.join(logged)
        found = re.findall(r'nose: /tmp/.*/cheeseshop/nose', log)
        self.assertEqual(len(found), 1)
コード例 #11
0
ファイル: version.py プロジェクト: acimpoeru/ParaViewConnect
def read_release_version():
    try:
        filename = os.path.join(sys.prefix,
                                "share", __application__, "RELEASE-VERSION")
        if os.path.isfile(filename):
            f = open(filename, 'r')
        else:
            filename = os.path.join(sys.prefix,
                                    "local", "share", __application__,
                                    "RELEASE-VERSION")
            if os.path.isfile(filename):
                f = open(filename, 'r')
            else:
                filename = resource_filename(
                    Requirement.parse(__application__),
                    "share/"+__application__+"/RELEASE-VERSION"
                    )
                if os.path.isfile(filename):
                    f = open(filename, 'r')
                else:
                    filename = resource_filename(
                        Requirement.parse(__application__),
                        "RELEASE-VERSION")
                    f = open(filename, 'r')
        try:
            version = f.readlines()[0]
            return version.strip()

        finally:
            f.close()

    except Exception, e:
        print 'read_release_version exception: '+e
        return None
コード例 #12
0
ファイル: test_informer.py プロジェクト: tseaver/compoze
    def test_show_distributions_skips_develop_dists(self):
        import re
        from pkg_resources import Requirement

        logged = []
        target, path, cheeseshop_path = self._makeDirs()
        compoze = Requirement.parse("compoze")
        nose = Requirement.parse("nose")
        cheeseshop = self._makeIndex(compoze, nose, target=cheeseshop_path, develop=("compoze",))
        local = self._makeIndex(compoze, target=target)

        def _factory(index_url, search_path=None):
            if index_url == "http://pypi.python.org/simple":
                assert search_path is None
                return cheeseshop
            if index_url == target:
                assert search_path is ()
                return local
            raise ValueError(index_url)

        informer = self._makeOne("--verbose", "compoze", "nose", logger=logged.append)
        informer.index_factory = _factory
        informer.tmpdir = target

        informer.show_distributions()

        log = "\n".join(logged)
        skipped = re.compile(r"Skipping.*<compoze")
        found = re.compile(r"nose: /tmp/.*/cheeseshop/nose")
        self.assertTrue(skipped.search(log))
        self.assertTrue(found.search(log))
コード例 #13
0
ファイル: helperFuncs.py プロジェクト: hjkgrp/molSimplify
def runtestMulti(tmpdir,name,threshMLBL,threshLG,threshOG):
    infile = resource_filename(Requirement.parse("molSimplify"),"tests/inputs/"+name+".in")
    newinfile = parse4test(infile,tmpdir,True)
    args =['main.py','-i', newinfile]
    #Need to make the ligand file visible to the input file
    startgen(args,False,False)
    myjobdir=jobdir(infile)+"/"
    print "Test input file: ", newinfile
    print "Test output files are generated in ",myjobdir
    refdir = resource_filename(Requirement.parse("molSimplify"),"tests/refs/"+name+"/")
    [passMultiFileCheck,myfiles]=checkMultiFileGen(myjobdir,refdir)
    pass_structures=[]
    if passMultiFileCheck==False:
        print "Test failed for checking number and names of generated files. Test ends"
    else:
        print "Checking each generated structure..."
        for f in myfiles:
            if ".xyz" in f:
                r=f.replace(".xyz",".report")
                output_xyz = output_xyz = myjobdir +f
                ref_xyz = refdir+f
                output_report = myjobdir+r
                ref_report = refdir+r
                print "Output xyz file: ", output_xyz
                print "Reference xyz file: ", ref_xyz
                print "Test report file: ", output_report
                print "Reference report file: ", ref_report
                pass_xyz=compareGeo(output_xyz,ref_xyz,threshMLBL,threshLG,threshOG)
                [passNumAtoms,passMLBL,passLG,passOG] = pass_xyz
                pass_report = compare_report_new(output_report,ref_report)
        pass_structures.append([f,passNumAtoms, passMLBL, passLG, passOG, pass_report])
    return [passMultiFileCheck,pass_structures]
コード例 #14
0
ファイル: test_resources.py プロジェクト: haloteam/halo
 def testOptionsAndHashing(self):
     r1 = Requirement.parse("Twisted[foo,bar]>=1.2")
     r2 = Requirement.parse("Twisted[bar,FOO]>=1.2")
     assert r1 == r2
     assert r1.extras == ("foo", "bar")
     assert r2.extras == ("bar", "foo")  # extras are normalized
     assert hash(r1) == hash(r2)
     assert hash(r1) == hash(("twisted", packaging.specifiers.SpecifierSet(">=1.2"), frozenset(["foo", "bar"])))
コード例 #15
0
ファイル: html_logger.py プロジェクト: ScotLowery2/egat
    def copy_resources_to_log_dir(log_dir):
        """Copies the necessary static assets to the log_dir and returns the path 
        of the main css file."""
        css_path = resource_filename(Requirement.parse("egat"), "/egat/data/default.css")
        header_path = resource_filename(Requirement.parse("egat"), "/egat/data/egat_header.png")
        shutil.copyfile(css_path, log_dir + "/style.css")
        shutil.copyfile(header_path, log_dir + "/egat_header.png")

        return log_dir + os.sep + "style.css"
コード例 #16
0
def load_plugins(build_configuration, plugins, working_set):
  """Load named plugins from the current working_set into the supplied build_configuration

  "Loading" a plugin here refers to calling registration methods -- it is assumed each plugin
  is already on the path and an error will be thrown if it is not. Plugins should define their
  entrypoints in the `pantsbuild.plugin` group when configuring their distribution.

  Like source backends, the `build_file_aliases`, `global_subsystems` and `register_goals` methods
  are called if those entry points are defined.

  * Plugins are loaded in the order they are provided. *

  This is important as loading can add, remove or replace existing tasks installed by other plugins.

  If a plugin needs to assert that another plugin is registered before it, it can define an
  entrypoint "load_after" which can return a list of plugins which must have been loaded before it
  can be loaded. This does not change the order or what plugins are loaded in any way -- it is
  purely an assertion to guard against misconfiguration.

  :param BuildConfiguration build_configuration: The BuildConfiguration (for adding aliases).
  :param list<str> plugins: A list of plugin names optionally with versions, in requirement format.
                            eg ['widgetpublish', 'widgetgen==1.2'].
  :param WorkingSet working_set: A pkg_resources.WorkingSet to load plugins from.
  """
  loaded = {}
  for plugin in plugins:
    req = Requirement.parse(plugin)
    dist = working_set.find(req)

    if not dist:
      raise PluginNotFound('Could not find plugin: {}'.format(req))

    entries = dist.get_entry_map().get('pantsbuild.plugin', {})

    if 'load_after' in entries:
      deps = entries['load_after'].load()()
      for dep_name in deps:
        dep = Requirement.parse(dep_name)
        if dep.key not in loaded:
          raise PluginLoadOrderError('Plugin {0} must be loaded after {1}'.format(plugin, dep))

    if 'build_file_aliases' in entries:
      aliases = entries['build_file_aliases'].load()()
      build_configuration.register_aliases(aliases)

    if 'register_goals' in entries:
      entries['register_goals'].load()()

    if 'global_subsystems' in entries:
      subsystems = entries['global_subsystems'].load()()
      build_configuration.register_optionables(subsystems)

    if 'rules' in entries:
      rules = entries['rules'].load()()
      build_configuration.register_rules(rules)

    loaded[dist.as_requirement().key] = dist
コード例 #17
0
ファイル: test_resources.py プロジェクト: haloteam/halo
    def testSetuptoolsProjectName(self):
        """
        The setuptools project should implement the setuptools package.
        """

        assert Requirement.parse("setuptools").project_name == "setuptools"
        # setuptools 0.7 and higher means setuptools.
        assert Requirement.parse("setuptools == 0.7").project_name == "setuptools"
        assert Requirement.parse("setuptools == 0.7a1").project_name == "setuptools"
        assert Requirement.parse("setuptools >= 0.7").project_name == "setuptools"
コード例 #18
0
 def testBasics(self):
     r = Requirement.parse("Twisted>=1.2")
     self.assertEqual(str(r),"Twisted>=1.2")
     self.assertEqual(repr(r),"Requirement.parse('Twisted>=1.2')")
     self.assertEqual(r, Requirement("Twisted", [('>=','1.2')], ()))
     self.assertEqual(r, Requirement("twisTed", [('>=','1.2')], ()))
     self.assertNotEqual(r, Requirement("Twisted", [('>=','2.0')], ()))
     self.assertNotEqual(r, Requirement("Zope", [('>=','1.2')], ()))
     self.assertNotEqual(r, Requirement("Zope", [('>=','3.0')], ()))
     self.assertNotEqual(r, Requirement.parse("Twisted[extras]>=1.2"))
コード例 #19
0
 def testBasics(self):
     r = Requirement.parse("Twisted>=1.2")
     assert str(r) == "Twisted>=1.2"
     assert repr(r) == "Requirement.parse('Twisted>=1.2')"
     assert r == Requirement("Twisted", [('>=','1.2')], ())
     assert r == Requirement("twisTed", [('>=','1.2')], ())
     assert r != Requirement("Twisted", [('>=','2.0')], ())
     assert r != Requirement("Zope", [('>=','1.2')], ())
     assert r != Requirement("Zope", [('>=','3.0')], ())
     assert r != Requirement.parse("Twisted[extras]>=1.2")
コード例 #20
0
ファイル: test_iterator.py プロジェクト: pfmoore/pex
def test_empty_iteration():
  crawler_mock = mock.create_autospec(Crawler, spec_set=True)
  crawler_mock.crawl.return_value = []
  iterator = Iterator(crawler=crawler_mock)

  assert list(iterator.iter(Requirement.parse('foo'))) == []
  assert len(crawler_mock.crawl.mock_calls) == 1
  _, args, kwargs = crawler_mock.crawl.mock_calls[0]
  assert list(args[0]) == list(PyPIFetcher().urls(Requirement.parse('foo')))
  assert kwargs == {'follow_links': False}
コード例 #21
0
ファイル: test_homebrew.py プロジェクト: wangbinxiang/flocker
    def test_get_requirements(self):
        """
        It is possible to get a list of requirements from a requirements.txt
        file.
        """
        requirements_path = FilePath(self.mktemp())
        requirements_path.setContent("\n".join(["eliot==0.7.0", "Twisted==15.2.0"]))

        requirements = get_requirements(requirements_path)

        self.assertEqual(requirements, [Requirement.parse("eliot==0.7.0"), Requirement.parse("Twisted==15.2.0")])
コード例 #22
0
ファイル: test_resources.py プロジェクト: ArchAiA/skillshare
 def testOptionsAndHashing(self):
     r1 = Requirement.parse("Twisted[foo,bar]>=1.2")
     r2 = Requirement.parse("Twisted[bar,FOO]>=1.2")
     self.assertEqual(r1,r2)
     self.assertEqual(r1.extras, ("foo","bar"))
     self.assertEqual(r2.extras, ("bar","foo"))  # extras are normalized
     self.assertEqual(hash(r1), hash(r2))
     self.assertEqual(
         hash(r1), hash(("twisted", packaging.specifiers.SpecifierSet(">=1.2"),
                         frozenset(["foo","bar"])))
     )
コード例 #23
0
    def _setuptools_req(self, req_rhs):
        """Counter distribute's hack that replaces setuptools requirements.

        if distribute is not around, this is simply a normal requirement
        parsing.
        """
        if not DISTRIBUTE:
            return Requirement.parse('setuptools' + req_rhs)

        req = Requirement.parse('willbesetuptools' + req_rhs)
        req.key = req.project_name = 'setuptools'
        return req
コード例 #24
0
ファイル: test_main.py プロジェクト: PyAr/fades
    def test_two_different(self):
        requirement_files = [create_tempfile(self, ['1', '2'])]
        manual_dependencies = ['vcs::3', 'vcs::4']

        d = main.consolidate_dependencies(needs_ipython=False, child_program=None,
                                          requirement_files=requirement_files,
                                          manual_dependencies=manual_dependencies)

        self.assertEqual(d, {
            'pypi': {Requirement.parse('1'), Requirement.parse('2')},
            'vcs': {parsing.VCSDependency('3'), parsing.VCSDependency('4')}
        })
コード例 #25
0
 def __init__(self, config, logger=None):
   self._path = config.get('python-setup', 'interpreter_cache')
   setuptools_req = 'setuptools==%s' % config.get('python-setup', 'setuptools_version')
   try:
     self._setuptools_requirement = Requirement.parse(setuptools_req, replacement=False)
   except TypeError:
     self._setuptools_requirement = Requirement.parse(setuptools_req)
   safe_mkdir(self._path)
   self._fetchers = MultiResolver.fetchers(config)
   self._crawler = MultiResolver.crawler(config)
   self._interpreters = set()
   self._logger = logger or (lambda msg: True)
コード例 #26
0
ファイル: test_main.py プロジェクト: PyAr/fades
    def test_two_same_repo(self):
        requirement_files = [create_tempfile(self, ['1', '2'])]
        manual_dependencies = ['3', '4']

        d = main.consolidate_dependencies(needs_ipython=False, child_program=None,
                                          requirement_files=requirement_files,
                                          manual_dependencies=manual_dependencies)

        self.assertDictEqual(d, {
            'pypi': {Requirement.parse('1'), Requirement.parse('2'), Requirement.parse('3'),
                     Requirement.parse('4')}
        })
コード例 #27
0
def grab_distrib(req, index=None, dest='.', search_pypi=True):
    """\
Downloads a distribution from the given package index(s) based on the
given requirement string(s). Downloaded distributions are placed in the
specified destination or the current directory if no destination is
specified.  If a distribution cannot be found in the given index(s), the
Python Package Index will be searched as a last resort unless 
search_pypi is False.  This does NOT install the distribution.
    """
    
    # allow multiple package indexes to be specified
    if index is None:
        index = []
    elif isinstance(index, basestring):
        index = [index]
    # else just assume it's some iterator of indexes
        
    # add PyPI as last place to search if it wasn't already specified
    if search_pypi and _pypi not in index and (_pypi+'/') not in index:
        index.append(_pypi)
    
    # allow specification of single or multiple requirements    
    if isinstance(req, basestring):
        reqs = [Requirement.parse(req)]
    elif isinstance(req, Requirement):
        reqs = [req]
    else:
        reqs = []
        for rr in req:
            if isinstance(rr, basestring):
                reqs.append(Requirement.parse(rr))
            elif isinstance(rr, Requirement):
                reqs.append(rr)
            else:
                raise TypeError("supplied requirement arg must be a string"+
                                " or a Requirement, but given type is %s" %
                                type(rr))
        
    index_list = [PackageIndex(idx,search_path=[]) for idx in index]
    
    for req in reqs:
        fetched = None
        for idx in index_list:
            _logger.info('Looking for %s at package index %s' % (req, idx.index_url))
            fetched = idx.download(req, dest)
            if fetched:
                _logger.info('    %s successfully downloaded' % fetched)
                break
        else:
            _logger.error("couldn't find distrib for %s" % req)
        
    return fetched
コード例 #28
0
    def testVersionEquality(self):
        r1 = Requirement.parse("foo==0.3a2")
        r2 = Requirement.parse("foo!=0.3a4")
        d = Distribution.from_filename

        assert d("foo-0.3a4.egg") not in r1
        assert d("foo-0.3a1.egg") not in r1
        assert d("foo-0.3a4.egg") not in r2

        assert d("foo-0.3a2.egg") in r1
        assert d("foo-0.3a2.egg") in r2
        assert d("foo-0.3a3.egg") in r2
        assert d("foo-0.3a5.egg") in r2
コード例 #29
0
 def testOptionsAndHashing(self):
     r1 = Requirement.parse("Twisted[foo,bar]>=1.2")
     r2 = Requirement.parse("Twisted[bar,FOO]>=1.2")
     r3 = Requirement.parse("Twisted[BAR,FOO]>=1.2.0")
     self.assertEqual(r1,r2)
     self.assertEqual(r1,r3)
     self.assertEqual(r1.extras, ("foo","bar"))
     self.assertEqual(r2.extras, ("bar","foo"))  # extras are normalized
     self.assertEqual(hash(r1), hash(r2))
     self.assertEqual(
         hash(r1), hash(("twisted", ((">=",parse_version("1.2")),),
                         frozenset(["foo","bar"])))
     )
コード例 #30
0
ファイル: lucky.py プロジェクト: kuba/SIS
    def current_week_pdf(self):
        """Lucky numbers for current or next week in pdf format."""
        change_hour = 15
        numbers = LuckyNumber.current_week(change_hour)

        if len(numbers) == 0:
            return redirect(url('lucky_week'))

        # Register fonts
        ubuntu_r = resource_filename(Requirement.parse("SIS"), "resources/Ubuntu-R.ttf")
        ubuntu_b = resource_filename(Requirement.parse("SIS"), "resources/Ubuntu-B.ttf")
        pdfmetrics.registerFont(TTFont('Ubuntu', ubuntu_r))
        pdfmetrics.registerFont(TTFont('Ubuntu Bold', ubuntu_b))

        numbers_pdf = StringIO.StringIO()
        doc = SimpleDocTemplate(numbers_pdf, pagesize=A4, topMargin=A4[1]*0.26)
        doc.author = 'SIS'
        doc.title = 'Szczęśliwy numerek'

        data = []
        for number in numbers:
            date = number.date.strftime("%d.%m.%y")
            data.append(('{0} -'.format(date), str(number.number)))

        table = Table(data)
        table.setStyle(TableStyle([
            ('FONT', (0, 0), (0, -1), 'Ubuntu', 80),
            ('FONT', (1, 0), (1, -1), 'Ubuntu Bold', 80),
        ]))

        def header_and_footer(canvas, document):
            canvas.saveState()
            size = document.pagesize
            center = size[0] / 2

            canvas.setFont('Ubuntu', 80)
            canvas.drawCentredString(center,
                size[1] - document.topMargin / 2, "SZCZĘŚLIWY")
            canvas.drawCentredString(center, size[1] - document.topMargin + 20, 'NUMEREK')

            canvas.setFont('Ubuntu', 15)
            canvas.drawRightString(size[0] - document.rightMargin,
                document.bottomMargin - 20, "Samorząd Uczniowski")

            canvas.restoreState()

        doc.build([table], onFirstPage=header_and_footer,
            onLaterPages=header_and_footer)

        response.headers['Content-type'] = 'application/pdf'
        return numbers_pdf.getvalue()
コード例 #31
0
def bind_load(userbind, bindcores):
    globs = globalvars()
    if '~' in userbind:
        homedir = os.path.expanduser("~")
        userbind = userbind.replace('~', homedir)
    emsg = False
    bind = mol3D()  # initialize binding molecule
    bsmi = False  # flag for smiles
    ### check if binding molecule exists in dictionary
    if userbind in bindcores.keys():
        # load bind mol file (with hydrogens)
        #        fbind = installdir+'Bind/'+bindcores[userbind][0]
        if globs.custom_path:
            fbind = globs.custom_path + "/Bind/" + bindcores[userbind][0]
        else:
            fbind = resource_filename(
                Requirement.parse("molSimplify"),
                "molSimplify/Bind/" + bindcores[userbind][0])
        # check if bind xyz/mol file exists
        if not glob.glob(fbind):
            emsg = "We can't find the binding species structure file %s right now! Something is amiss. Exiting..\n" % fbind
            print emsg
            return False, False, emsg
        if ('.xyz' in fbind):
            bind.OBMol = bind.getOBMol(fbind, 'xyzf')
        elif ('.mol' in fbind):
            bind.OBMol = bind.getOBMol(fbind, 'molf')
        elif ('.smi' in fbind):
            bind.OBMol = bind.getOBMol(fbind, 'smif')
        bind.charge = bind.OBMol.GetTotalCharge()
    ### load from file
    elif ('.mol' in userbind or '.xyz' in userbind or '.smi' in userbind):
        if glob.glob(userbind):
            ftype = userbind.split('.')[-1]
            # try and catch error if conversion doesn't work
            try:
                bind.OBMol = bind.getOBMol(userbind,
                                           ftype + 'f')  # convert from file
                bind.charge = bind.OBMol.GetTotalCharge()
            except IOError:
                emsg = 'Failed converting file ' + userbind + ' to molecule..Check your file.\n'
                return False, emsg
            bind.ident = userbind.rsplit('/')[-1]
            bind.ident = bind.ident.split('.' + ftype)[0]
        else:
            emsg = 'Binding species file ' + userbind + ' does not exist. Exiting..\n'
            return False, emsg
    ### if not, try converting from SMILES
    else:
        # check for transition metals
        userbind = checkTMsmiles(userbind)
        # try and catch error if conversion doesn't work
        try:
            bind.OBMol = bind.getOBMol(userbind, 'smi')  # convert from smiles
            bind.charge = bind.OBMol.GetTotalCharge()
            bsmi = True
            bind.ident = 'smi'
        except IOError:
            emsg = "We tried converting the string '%s' to a molecule but it wasn't a valid SMILES string.\n" % userbind
            emsg += "Furthermore, we couldn't find the binding species structure: '%s' in the binding species dictionary. Try again!\n" % userbind
            print emsg
            return False, False, emsg
    return bind, bsmi, emsg
コード例 #32
0
    def add_requirement(self, r, nodeps=False, parent=None, level=1):
        """Add a new requirement to the depgraph
        
        Requirements for this requirements are automatically added.
        
        Return True unless if this requirement (`r`) is already satisfied
        recursively.
        
        Raises PackageNotFound if not distribution found matching r.
        """
        if not isinstance(r, Requirement):
            r = Requirement.parse(r)
        if str(r) in self._added_requirements:
            return
        self._added_requirements.add(str(r))

        LOG.debug('ADDREQ:%s%s, parent=%s' % (' ' * level, r, parent))
        name = req_name(r)
        ret = False

        # Current list of requirements for this package; these requirements all
        # must be satisfied.
        to_satisfy = [r]
        node = self.nodes.get(name, None)
        if node:
            to_satisfy.extend(sum(self.edges[name].values(), []))

        releases = self.get_available_distributions(name)
        if not releases:
            raise PackageNotFound(r, parent)

        # Note: we may have installed packages that is not in the ``releases``
        # eg: numpy-2.x maybe installed, yet, the repo (and thus ``releases``)
        # may have only 1.5 and 1.4.
        satisfying_packages = [
            p for p in releases \
            if all([p.version in req for req in to_satisfy])]

        if not satisfying_packages:
            raise PackageNotFound(req2str(*to_satisfy), parent)

        p = satisfying_packages[0]
        if node:
            # This package was already marked for install(ed); should we upgrade
            # or downgrade to `p`?

            # node_pkg: package currently marked to be installed.
            node_pkg = node.pkg1 or node.pkg
            assert node_pkg
            change = True

            if p.version_key == node_pkg.version_key:
                # No change in version; skip.
                # TEST: 'pypm install --nodeps fabric' followed by
                #       'pypm install fabrics' should ideally install the deps now
                change = False
            elif p.version_key < node_pkg.version_key:
                # Downgrade, unless the (possibly latest) installed package
                # satisfies all requirements
                # TEST:
                #   numpy-2 is installed, but repo has only 1.5
                #   'pypm install matpotlib' should NOT downgrade numpy-2
                # TEST:
                #   'pypm install numpy<1.999' should downgrade it, though
                for req in to_satisfy:
                    if node_pkg.version not in req:
                        # So, it doesn't. We must downgrade.
                        break
                else:
                    # So, let's keep the installed/marked on. The user seems
                    # to have installed a latest package that is not in the
                    # configured repo.
                    change = False

            if change:
                # downgrade or upgrade
                node = self._mark_for_change(name, p, parent, r)
                if node.pkg1:
                    ret = True

                    # TODO: adjust requirements
                    rl0 = tuple(node_pkg.get_requirements(r.extras))
                    rl1 = tuple(node.pkg1.get_requirements(r.extras))
                    if not nodeps and rl0 != rl1:
                        msg = ((
                            'need to implement requirements differing across versions;'
                            '\n    %s-%s [%s]\n -> %s-%s [%s]') %
                               (name, node_pkg.printable_version,
                                req2str(*rl0), name,
                                node.pkg1.printable_version, req2str(*rl1)))
                        LOG.warn(msg)
            else:
                # Just mark anyway to process its (missing?) dependencies
                self._mark_new_requirement(name, parent, r)
        else:
            # This package is not marked for install; do it now.
            node = self._mark_for_install(name, p, parent, r)
            ret = True

        if not nodeps:
            pkg = node.pkg1 or node.pkg  # check requirements even if the package is installed
            assert pkg
            for r in pkg.get_requirements(r.extras):
                ret = any([
                    ret,
                    self.add_requirement(r,
                                         nodeps=nodeps,
                                         parent=node and node.name,
                                         level=level + 1)
                ])

        return ret
コード例 #33
0
# -*- coding: latin_1 -*-
# test_charset.py
"""unittest cases for dicom.charset module"""
# Copyright (c) 2008 Darcy Mason
# This file is part of pydicom, released under a modified MIT license.
#    See the file license.txt included with this distribution, also
#    available at http://pydicom.googlecode.com

import unittest
import dicom
import os.path

from pkg_resources import Requirement, resource_filename
testcharset_dir = resource_filename(Requirement.parse("pydicom"),
                                    "dicom/testcharsetfiles")

latin1_file = os.path.join(testcharset_dir, "chrFren.dcm")
jp_file = os.path.join(testcharset_dir, "chrH31.dcm")
multiPN_file = os.path.join(testcharset_dir, "chrFrenMulti.dcm")
sq_encoding_file = os.path.join(testcharset_dir, "chrSQEncoding.dcm")

test_dir = resource_filename(Requirement.parse("pydicom"), "dicom/testfiles")
normal_file = os.path.join(test_dir, "CT_small.dcm")


class charsetTests(unittest.TestCase):
    def testLatin1(self):
        """charset: can read and decode latin_1 file........................"""
        ds = dicom.read_file(latin1_file)
        ds.decode()
        # Make sure don't get unicode encode error on converting to string
コード例 #34
0
ファイル: _fetch.py プロジェクト: biocore/q2-american-gut
# The full license is in the file LICENSE, distributed with this software.
# ----------------------------------------------------------------------------

from pkg_resources import Requirement, resource_filename

import qiime2
from q2_types.feature_data import DNAIterator
import pandas as pd
import skbio
import redbiom.search
import redbiom.summarize
import redbiom.fetch

from q2_american_gut._type import QiitaMetadata

CLASSIFIER = (Requirement.parse('q2_american_gut'),
              'q2_american_gut/assets/gg-13-8-99-515-806-nb-classifier.qza')
GG_TREE = (Requirement.parse('q2_american_gut'),
           'q2_american_gut/assets/97_otus.tree')
DEBUG_PHY = (Requirement.parse('q2_american_gut'),
             'q2_american_gut/assets/reference_phylogeny_tiny.qza')
DEBUG_ALN = (Requirement.parse('q2_american_gut'),
             'q2_american_gut/assets/reference_alignment_tiny.qza')


def _determine_context(processing_type, trim_length, instrument='illumina'):
    """Determine the processing context in redbiom to use

    Parameters
    ----------
    processing_type : str, {deblur, closed-reference}
コード例 #35
0
 def get_module_include_path(self, name):
     from pkg_resources import Requirement, resource_filename
     return resource_filename(Requirement.parse(name), "%s/include" % name)
コード例 #36
0
    def parse_line(cls, line: str) -> 'Requirement':
        """
        Parses a Requirement from a non-editable requirement.

        See: pip/req.py:from_line()

        :param line: a "non-editable" requirement
        :returns: a Requirement instance for the given line
        :raises: ValueError on an invalid requirement
        """

        req = cls(line)

        vcs_match: Optional[Match[str]] = VCS_REGEX.match(line)
        uri_match: Optional[Match[str]] = URI_REGEX.match(line)
        local_match: Optional[Match[str]] = LOCAL_REGEX.match(line)

        if vcs_match is not None:
            groups = vcs_match.groupdict()
            if groups.get('login'):
                req.uri = '{scheme}://{login}@{path}'.format(
                    **groups)  # type: ignore
            else:
                req.uri = '{scheme}://{path}'.format(**groups)  # type: ignore
            req.revision = groups['revision']  # type: ignore
            if groups['fragment']:
                fragment = parse_fragment(groups['fragment'])
                egg = fragment.get('egg')
                req.name, req.extras = parse_extras_require(
                    egg)  # type: ignore
                req.hash_name, req.hash = get_hash_info(
                    fragment)  # type: ignore
                req.subdirectory = fragment.get('subdirectory')  # type: ignore
            for vcs in VCS:
                if str(req.uri).startswith(vcs):
                    req.vcs = vcs  # type: ignore
        elif uri_match is not None:
            groups = uri_match.groupdict()
            req.uri = '{scheme}://{path}'.format(**groups)  # type: ignore
            if groups['fragment']:
                fragment = parse_fragment(groups['fragment'])
                egg = fragment.get('egg')
                req.name, req.extras = parse_extras_require(
                    egg)  # type: ignore
                req.hash_name, req.hash = get_hash_info(
                    fragment)  # type: ignore
                req.subdirectory = fragment.get('subdirectory')  # type: ignore
            if groups['scheme'] == 'file':
                req.local_file = True
        elif '#egg=' in line:
            # Assume a local file match
            assert local_match is not None, 'This should match everything'
            groups = local_match.groupdict()
            req.local_file = True
            if groups['fragment']:
                fragment = parse_fragment(groups['fragment'])
                egg = fragment.get('egg')
                name, extras = parse_extras_require(egg)
                req.name = fragment.get('egg')  # type: ignore
                req.hash_name, req.hash = get_hash_info(
                    fragment)  # type: ignore
                req.subdirectory = fragment.get('subdirectory')  # type: ignore
            req.path = groups['path']  # type: ignore
        else:
            # This is a requirement specifier.
            # Delegate to pkg_resources and hope for the best
            req.specifier = True
            pkg_req = Req.parse(line)
            req.name = pkg_req.unsafe_name  # type: ignore
            req.extras = list(pkg_req.extras)
            req.specs = pkg_req.specs  # type: ignore
        return req
コード例 #37
0
# FMSPy - Copyright (c) 2009 Andrey Smirnov.
#
# See COPYRIGHT for details.
"""
Configuration file handling.
"""

import ConfigParser

config = ConfigParser.SafeConfigParser()
config_files = []

try:
    from pkg_resources import Requirement, resource_filename, DistributionNotFound

    try:
        config_files.append(
            resource_filename(Requirement.parse("fmspy"), "etc/fmspy.cfg"))
    except DistributionNotFound:
        pass

except ImportError:
    pass

config_files.extend(
    ['/etc/fmspy.cfg', '/usr/local/etc/fmspy.cfg', 'fmspy.cfg'])

config.read(config_files)
コード例 #38
0
def main(args=None):
    if args is None:
        args = sys.argv[1:]

    parser = OptionParser()
    parser.add_option(
        "--dev",
        action="store_true",
        dest='dev',
        help=
        "if present, a development script will be generated instead of a release script"
    )
    parser.add_option("--dest",
                      action="store",
                      type="string",
                      dest='dest',
                      help="specify destination directory",
                      default='.')
    parser.add_option(
        "-f",
        "--findlinks",
        action="store",
        type="string",
        dest="findlinks",
        default='http://openmdao.org/dists',
        help=
        "default URL where openmdao packages and dependencies are searched for first (before PyPI)"
    )

    (options, args) = parser.parse_args(args)

    if len(args) > 0:
        print 'unrecognized args: %s' % args
        parser.print_help()
        sys.exit(-1)

    if options.dev:
        openmdao_packages.extend(openmdao_dev_packages)
        sout = StringIO.StringIO()
        pprint.pprint(openmdao_packages, sout)
        pkgstr = sout.getvalue()
        make_dev_eggs = """
        # now install dev eggs for all of the openmdao packages
        topdir = os.path.abspath(os.path.dirname(__file__))
        startdir = os.getcwd()
        absbin = os.path.abspath(bin_dir)
        openmdao_packages = %s
        try:
            for pkg, pdir, _ in openmdao_packages:
                if options.nogui and pkg == 'openmdao.gui':
                    continue
                os.chdir(join(topdir, pdir, pkg))
                cmdline = [join(absbin, 'python'), 'setup.py', 
                           'develop', '-N'] + cmds
                try:
                    call_subprocess(cmdline, show_stdout=True, raise_on_returncode=True)
                except OSError:
                    failures.append(pkg)
        finally:
            os.chdir(startdir)
        """ % pkgstr
    else:  # making a release installer
        make_dev_eggs = ''

    script_str = """

openmdao_prereqs = %(openmdao_prereqs)s

def extend_parser(parser):
    parser.add_option("-r","--req", action="append", type="string", dest='reqs', 
                      help="specify additional required distributions", default=[])
    parser.add_option("--noprereqs", action="store_true", dest='noprereqs', 
                      help="don't check for any prerequisites, e.g., numpy or scipy")
    parser.add_option("--nogui", action="store_true", dest='nogui', 
                      help="don't install the openmdao graphical user interface or its dependencies")
    parser.add_option("-f", "--findlinks", action="store", type="string", 
                      dest="findlinks",
                      help="default URL where openmdao packages and dependencies are searched for first (before PyPI)")
    parser.add_option("--testurl", action="store", type="string", dest='testurl', 
                      help="specify url where openmdao.* distribs are located (used for release testing only)")
                      
    # hack to force use of setuptools for now because using 'distribute' causes issues
    os.environ['VIRTUALENV_USE_SETUPTOOLS'] = '1'

%(adjust_options)s

def _single_install(cmds, req, bin_dir, failures, dodeps=False):
    global logger
    if dodeps:
        extarg = '-Z'
    else:
        extarg = '-NZ'
    cmdline = [join(bin_dir, 'easy_install'), extarg] + cmds + [req]
        # pip seems more robust than easy_install, but won't install binary distribs :(
        #cmdline = [join(bin_dir, 'pip'), 'install'] + cmds + [req]
    #logger.debug("running command: %%s" %% ' '.join(cmdline))
    try:
        call_subprocess(cmdline, show_stdout=True, raise_on_returncode=True)
    except OSError:
        failures.append(req)

def after_install(options, home_dir):
    global logger, openmdao_prereqs
    
    reqs = %(reqs)s
    guireqs = %(guireqs)s
    
    if options.findlinks is None:
        url = '%(url)s'
    else:
        url = options.findlinks
    # for testing we allow one to specify a url where the openmdao
    # package dists are located that may be different from the main
    # url where the dependencies are located. We do this because
    # setuptools only allows us to specify a single -f parameter,
    # which would force us to mirror the entire openmdao distribution
    # directory in order to test our releases because setuptools will
    # barf if it can't find everything in the same location (or on PyPI).
    # TODO: get rid of this after we quit using setuptools.
    if options.testurl:
        openmdao_url = options.testurl
    else:
        openmdao_url = url
    etc = join(home_dir, 'etc')
    if sys.platform == 'win32':
        lib_dir = join(home_dir, 'Lib')
        bin_dir = join(home_dir, 'Scripts')
    else:
        lib_dir = join(home_dir, 'lib', py_version)
        bin_dir = join(home_dir, 'bin')

    if not os.path.exists(etc):
        os.makedirs(etc)
        
    failed_imports = []
    for pkg in openmdao_prereqs:
        try:
            __import__(pkg)
        except ImportError:
            failed_imports.append(pkg)
    if failed_imports:
        if options.noprereqs:
            logger.warn("\\n**** The following prerequisites could not be imported: %%s." %% failed_imports)
            logger.warn("**** As a result, some OpenMDAO components will not work.")
        else:
            logger.error("ERROR: the following prerequisites could not be imported: %%s." %% failed_imports)
            logger.error("These must be installed in the system level python before installing OpenMDAO.")
            logger.error("To run a limited version of OpenMDAO without the prerequisites, try 'python %%s --noprereqs'" %% __file__)
            sys.exit(-1)
    
    cmds = ['-f', url]
    openmdao_cmds = ['-f', openmdao_url]
    try:
        allreqs = reqs[:]
        failures = []
        if not options.nogui:
            allreqs = allreqs + guireqs
            
        for req in allreqs:
            if req.startswith('openmdao.'):
                _single_install(openmdao_cmds, req, bin_dir, failures)
            else:
                _single_install(cmds, req, bin_dir, failures)
        
%(make_dev_eggs)s

        # add any additional packages specified on the command line
        for req in options.reqs:
            _single_install(cmds, req, bin_dir, failures, dodeps=True)

    except Exception as err:
        logger.error("ERROR: build failed: %%s" %% str(err))
        sys.exit(-1)

    abshome = os.path.abspath(home_dir)
    
    if failures:
        failmsg = ' (with failures).'
        failures.sort()
        print '\\n\\n***** The following packages failed to install: %%s.' %% failures
    else:
        failmsg = '.'
    print '\\n\\nThe OpenMDAO virtual environment has been installed in\\n %%s%%s' %% (abshome, failmsg)
    print '\\nFrom %%s, type:\\n' %% abshome
    if sys.platform == 'win32':
        print r'Scripts\\activate'
    else:
        print '. bin/activate'
    print "\\nto activate your environment and start using OpenMDAO."
    
    sys.exit(1 if failures else 0)
    """

    reqs = set()
    guireqs = set()

    version = '?.?.?'
    excludes = set(['setuptools', 'distribute', 'SetupDocs'] +
                   openmdao_prereqs)
    dists = working_set.resolve([
        Requirement.parse(r[0]) for r in openmdao_packages
        if r[0] != 'openmdao.gui'
    ])
    distnames = set([d.project_name for d in dists]) - excludes
    gui_dists = working_set.resolve([Requirement.parse('openmdao.gui')])
    guinames = set([d.project_name for d in gui_dists]) - distnames - excludes

    try:
        setupdoc_dist = working_set.resolve([Requirement.parse('setupdocs')
                                             ])[0]
    except:
        setupdoc_dist = None

    for dist in dists:
        if dist.project_name not in distnames:
            continue
        if dist.project_name == 'openmdao.main':
            version = dist.version
        if options.dev:  # in a dev build, exclude openmdao stuff because we'll make them 'develop' eggs
            if not dist.project_name.startswith('openmdao.'):
                reqs.add('%s' % dist.as_requirement())
        else:
            reqs.add('%s' % dist.as_requirement())

    for dist in gui_dists:
        if dist.project_name not in guinames:
            continue
        if options.dev:  # in a dev build, exclude openmdao stuff because we'll make them 'develop' eggs
            if not dist.project_name.startswith('openmdao.'):
                guireqs.add('%s' % dist.as_requirement())
        else:
            guireqs.add('%s' % dist.as_requirement())

    # adding setupdocs req is a workaround to prevent Traits from looking elsewhere for it
    if setupdoc_dist:
        _reqs = [str(setupdoc_dist.as_requirement())]
    else:
        _reqs = ['setupdocs>=1.0']
    reqs = _reqs + list(reqs)
    guireqs = list(guireqs)

    optdict = {
        'reqs': reqs,
        'guireqs': guireqs,
        'version': version,
        'url': options.findlinks,
        'make_dev_eggs': make_dev_eggs,
        'adjust_options': _get_adjust_options(options, version),
        'openmdao_prereqs': openmdao_prereqs,
    }

    dest = os.path.abspath(options.dest)
    if options.dev:
        scriptname = os.path.join(dest, 'go-openmdao-dev.py')
    else:
        scriptname = os.path.join(dest, 'go-openmdao-%s.py' % version)
    with open(scriptname, 'wb') as f:
        f.write(virtualenv.create_bootstrap_script(script_str % optdict))
    os.chmod(scriptname, 0755)
コード例 #39
0
ファイル: configuration.py プロジェクト: Fy-/netwrok-server
from pkg_resources import Requirement, resource_filename
import sys
import json
import logging

config_file = resource_filename(Requirement.parse("NetWrok-Server"),
                                "netwrok/data/netwrok_default.ini")
config = {}

with open(config_file, "r") as f:
    config.update(json.load(f))

if len(sys.argv) > 1:
    config_file = sys.argv[1]
    with open(config_file, "r") as f:
        config.update(json.load(f))

loglevel = getattr(logging, config["SERVER"]["LOG_LEVEL"].upper())
logging.basicConfig(filename=config["SERVER"]["LOG_PATH"], level=loglevel)
logging.info("NetWrok Started")
コード例 #40
0
ファイル: runtest.py プロジェクト: kjappelbaum/molSimplify
import pytest
import argparse
import os
import openbabel as ob
import numpy as np
from molSimplify.Scripts.inparse import *
from molSimplify.Scripts.generator import *
from molSimplify.Classes.globalvars import *
from molSimplify.Classes.mol3D import mol3D
from molSimplify.Classes.mol3D import distance
from molSimplify.Classes.atom3D import atom3D
from pkg_resources import resource_filename, Requirement

infile = resource_filename(Requirement.parse("molSimplify"),
                           "tests/inputs/example_1_noff.in")
args = ['main.py', '-i', infile]
startgen(args, False, False)
infile = resource_filename(Requirement.parse("molSimplify"),
                           "tests/inputs/example_1.in")
args = ['main.py', '-i', infile]
startgen(args, False, False)
# ts test
infile = resource_filename(Requirement.parse("molSimplify"),
                           "tests/inputs/example_9.in")
args = ['main.py', '-i', infile]
startgen(args, False, False)
コード例 #41
0
def find_resource(project, resource_path, alt_path=None, return_path=False):
    """ Returns a file object or file path pointing to the desired resource.

    Parameters
    ----------
    project : str
        The name of the project to look for the resource in. Can be the name or
        a requirement string. Ex: 'MyProject', 'MyProject>1.0', 'MyProject==1.1'
    resource_path : str
        The path to the file from inside the package. If the file desired is
        MyProject/data/image.jpg, resource_path would be 'data/image.jpg'.
    alt_path : str
        The path to the resource relative to the location of the application's
        top-level script (the one with __main__). If this function is called in
        code/scripts/myscript.py and the resource is code/data/image.jpg, the
        alt_path would be '../data/image.jpg'. This path is only used if the
        resource cannot be found using setuptools.
    return_path : bool
        Determines whether the function should return a file object or a full
        path to the resource.

    Returns
    -------
    file : file object or file path
        A file object containing the resource. If return_path is True, 'file'
        will be the full path to the resource. If the file is not found or
        cannot be opened, None is returned.

    Description
    -----------
    This function will find a desired resource file and return an opened file
    object. The main method of finding the resource uses the pkg_resources
    resource_stream method, which searches your working set for the installed
    project specified and appends the resource_path given to the project
    path, leading it to the file. If setuptools is not installed or it cannot
    find/open the resource, find_resource will use the sys.path[0] to find the
    resource if alt_path is defined.
    """

    try:
        # Get the image using the pkg_resources resource_stream module, which
        # will find the file by getting the Chaco install path and appending the
        # image path. This method works in all cases as long as setuptools is
        # installed. If setuptools isn't installed, the backup sys.path[0]
        # method is used.
        from pkg_resources import resource_stream, working_set, Requirement

        # Get a requirement for the project
        requirement = Requirement.parse(project)

        if return_path:
            dist = working_set.find(requirement)
            full_path = os.path.join(dist.location, resource_path)

            # If the path exists, return it
            if os.path.exists(full_path):
                return full_path
            else:
                raise
        else:
            return resource_stream(requirement, resource_path)

    except:
        # Setuptools was either not installed, or it failed to find the file.
        # First check to see if the package was installed using egginst by
        # looking for the file at: site-packages\\resouce_path
        full_path = os.path.join(get_python_lib(), resource_path)
        if os.path.exists(full_path):
            if return_path:
                return full_path
            else:
                return open(full_path, 'rb')

        # Get the image using sys.path[0], which is the directory that the
        # running script lives in. The path to the file is then constructed by
        # navigating from the script's location. This method only works if this
        # script is called directly from the command line using
        # 'python %SOMEPATH%/<script>'
        if alt_path is None:
            return
        if return_path:
            return os.path.join(sys.path[0], alt_path)

        # Try to open the file, return None on exception
        try:
            return open(os.path.join(sys.path[0], alt_path), 'rb')
        except:
            return
コード例 #42
0
ファイル: interpreter.py プロジェクト: alexstaytuned/commons
class PythonInterpreter(object):
    REGEXEN = (
        re.compile(r'jython$'),
        re.compile(r'python$'),
        re.compile(r'python[23].[0-9]$'),
        re.compile(r'pypy$'),
        re.compile(r'pypy-1.[0-9]$'),
    )

    CACHE = {}  # memoize executable => PythonInterpreter

    try:
        # Versions of distribute prior to the setuptools merge would automatically replace
        # 'setuptools' requirements with 'distribute'.  It provided the 'replacement' kwarg
        # to toggle this, but it was removed post-merge.
        COMPATIBLE_SETUPTOOLS = Requirement.parse('setuptools>=1.0',
                                                  replacement=False)
    except TypeError:
        COMPATIBLE_SETUPTOOLS = Requirement.parse('setuptools>=1.0')

    class Error(Exception):
        pass

    class IdentificationError(Error):
        pass

    class InterpreterNotFound(Error):
        pass

    @classmethod
    def get(cls):
        return cls.from_binary(sys.executable)

    @classmethod
    def all(cls, paths=None):
        if paths is None:
            paths = os.getenv('PATH', '').split(':')
        return cls.filter(cls.find(paths))

    @classmethod
    def _parse_extras(cls, output_lines):
        def iter_lines():
            for line in output_lines:
                try:
                    dist_name, dist_version, location = line.split()
                except ValueError:
                    raise cls.IdentificationError(
                        'Could not identify requirement: %s' % line)
                yield ((dist_name, dist_version), location)

        return dict(iter_lines())

    @classmethod
    def _from_binary_internal(cls, path_extras):
        def iter_extras():
            for item in sys.path + list(path_extras):
                for dist in find_distributions(item):
                    if dist.version:
                        yield ((dist.key, dist.version), dist.location)

        return cls(sys.executable, PythonIdentity.get(), dict(iter_extras()))

    @classmethod
    def _from_binary_external(cls, binary, path_extras):
        environ = cls.sanitized_environment()
        environ['PYTHONPATH'] = ':'.join(path_extras)
        po = subprocess.Popen([binary],
                              stdin=subprocess.PIPE,
                              stdout=subprocess.PIPE,
                              env=environ)
        so, _ = po.communicate(ID_PY)
        output = so.decode('utf8').splitlines()
        if len(output) == 0:
            raise cls.IdentificationError(
                'Could not establish identity of %s' % binary)
        identity, extras = output[0], output[1:]
        return cls(binary,
                   PythonIdentity.from_id_string(identity),
                   extras=cls._parse_extras(extras))

    @classmethod
    def expand_path(cls, path):
        if os.path.isfile(path):
            return [path]
        elif os.path.isdir(path):
            return [os.path.join(path, fn) for fn in os.listdir(path)]
        return []

    @classmethod
    def from_env(cls, hashbang):
        """Resolve a PythonInterpreter as /usr/bin/env would.

       :param hashbang: A string, e.g. "python3.3" representing some binary on the $PATH.
    """
        paths = os.getenv('PATH', '').split(':')
        for path in paths:
            for fn in cls.expand_path(path):
                basefile = os.path.basename(fn)
                if hashbang == basefile:
                    try:
                        return cls.from_binary(fn)
                    except Exception as e:
                        TRACER.log('Could not identify %s: %s' % (fn, e))

    @classmethod
    def from_binary(cls, binary, path_extras=None):
        path_extras = path_extras or ()
        if binary not in cls.CACHE:
            if binary == sys.executable:
                cls.CACHE[binary] = cls._from_binary_internal(path_extras)
            else:
                cls.CACHE[binary] = cls._from_binary_external(
                    binary, path_extras)
        return cls.CACHE[binary]

    @classmethod
    def find(cls, paths):
        """
      Given a list of files or directories, try to detect python interpreters amongst them.
      Returns a list of PythonInterpreter objects.
    """
        pythons = []
        for path in paths:
            for fn in cls.expand_path(path):
                basefile = os.path.basename(fn)
                if any(
                        matcher.match(basefile) is not None
                        for matcher in cls.REGEXEN):
                    try:
                        pythons.append(cls.from_binary(fn))
                    except Exception as e:
                        TRACER.log('Could not identify %s: %s' % (fn, e))
                        continue
        return pythons

    @classmethod
    def filter(cls, pythons):
        """
      Given a map of python interpreters in the format provided by PythonInterpreter.find(),
      filter out duplicate versions and versions we would prefer not to use.

      Returns a map in the same format as find.
    """
        good = []

        MAJOR, MINOR, SUBMINOR = range(3)

        def version_filter(version):
            return (version[MAJOR] == 2 and version[MINOR] >= 6
                    or version[MAJOR] == 3 and version[MINOR] >= 2)

        all_versions = set(interpreter.identity.version
                           for interpreter in pythons)
        good_versions = filter(version_filter, all_versions)

        for version in good_versions:
            # For each candidate, use the latest version we find on the filesystem.
            candidates = defaultdict(list)
            for interp in pythons:
                if interp.identity.version == version:
                    candidates[interp.identity.interpreter].append(interp)
            for interp_class in candidates:
                candidates[interp_class].sort(
                    key=lambda interp: os.path.getmtime(interp.binary),
                    reverse=True)
                good.append(candidates[interp_class].pop(0))

        return good

    @classmethod
    def sanitized_environment(cls):
        # N.B. This is merely a hack because sysconfig.py on the default OS X
        # installation of 2.6/2.7 breaks.
        env_copy = os.environ.copy()
        env_copy.pop('MACOSX_DEPLOYMENT_TARGET', None)
        return env_copy

    @classmethod
    def replace(cls, requirement):
        self = cls.get()
        if self.identity.matches(requirement):
            return False
        for pi in cls.all():
            if pi.identity.matches(requirement):
                break
        else:
            raise cls.InterpreterNotFound(
                'Could not find interpreter matching filter!')
        os.execve(pi.binary, [pi.binary] + sys.argv,
                  cls.sanitized_environment())

    def __init__(self, binary, identity, extras=None):
        """Construct a PythonInterpreter.

       You should probably PythonInterpreter.from_binary instead.

       :param binary: The full path of the python binary.
       :param identity: The :class:`PythonIdentity` of the PythonInterpreter.
       :param extras: A mapping from (dist.key, dist.version) to dist.location
                      of the extras associated with this interpreter.
    """
        self._binary = os.path.realpath(binary)
        self._binary_stat = os.stat(self._binary)
        self._extras = extras or {}
        self._identity = identity

    def with_extra(self, key, version, location):
        extras = self._extras.copy()
        extras[(key, version)] = location
        return self.__class__(self._binary, self._identity, extras)

    @property
    def extras(self):
        return self._extras.copy()

    @property
    def binary(self):
        return self._binary

    @property
    def identity(self):
        return self._identity

    @property
    def python(self):
        return self._identity.python

    @property
    def version(self):
        return self._identity.version

    @property
    def version_string(self):
        return str(self._identity)

    def satisfies(self, capability):
        if not isinstance(capability, PythonCapability):
            raise TypeError('Capability must be a PythonCapability, got %s' %
                            type(capability))
        return not any(self.get_location(req) is None for req in capability)

    def get_location(self, req):
        req = maybe_requirement(req)
        for dist, location in self.extras.items():
            dist_name, dist_version = dist
            if req.key == dist_name and dist_version in req:
                return location

    def __hash__(self):
        return hash(self._binary_stat)

    def __eq__(self, other):
        if not isinstance(other, PythonInterpreter):
            return False
        return self._binary_stat == other._binary_stat

    def __lt__(self, other):
        if not isinstance(other, PythonInterpreter):
            return False
        return self.version < other.version

    def __repr__(self):
        return '%s(%r, %r, %r)' % (self.__class__.__name__, self._binary,
                                   self._identity, self._extras)
コード例 #43
0
def parse_requirement_arg(spec):
    try:
        return Requirement.parse(spec)
    except ValueError:
        raise DistutilsError(
            "Not a URL, existing file, or requirement spec: %r" % (spec, ))
コード例 #44
0
# variable SAGE_DOC_MATHJAX is set to "no" or "False".  (Note that if
# the user does not set this variable, then the script sage-env sets
# it to "True".)

if (os.environ.get('SAGE_DOC_MATHJAX', 'no') != 'no'
        and os.environ.get('SAGE_DOC_MATHJAX', 'no') != 'False'):

    extensions.append('sphinx.ext.mathjax')
    mathjax_path = 'MathJax.js?config=TeX-AMS_HTML-full,../mathjax_sage.js'

    from sage.misc.latex_macros import sage_mathjax_macros
    # this is broken for now
    # html_theme_options['mathjax_macros'] = sage_mathjax_macros()

    from pkg_resources import Requirement, working_set
    sagenb_path = working_set.find(Requirement.parse('sagenb')).location
    mathjax_relative = os.path.join('sagenb', 'data', 'mathjax')

    # It would be really nice if sphinx would copy the entire mathjax directory,
    # (so we could have a _static/mathjax directory), rather than the contents of the directory

    mathjax_static = os.path.join(sagenb_path, mathjax_relative)
    html_static_path.append(mathjax_static)
    exclude_patterns = [
        '**/' + os.path.join(mathjax_relative, i)
        for i in ('docs', 'README*', 'test', 'unpacked', 'LICENSE')
    ]
else:
    extensions.append('sphinx.ext.pngmath')

# This is to make the verbatim font smaller;
コード例 #45
0
def _pkg_sphinx_info(startdir, pkg, outfile, show_undoc=False, underline='-'):
    """Generate Sphinx autodoc directives for all of the modules in 
    the given package.
    
    """
    # locate the package directory
    topdir = pkg
    pkgdir = pkg

    dist = working_set.find(Requirement.parse(pkg))
    if dist is None:
        logging.error('no dist found for Requirement(%s)' % pkg)
    print >> outfile, 'Package %s' % pkg
    print >> outfile, underline * (len('Package ') + len(pkg))
    print >> outfile, '\n\n'

    # this behaves strangely, maybe because we use namespace pkgs?
    # mod points to module 'openmdao', not 'openmdao.whatever', so we
    # have to access 'whatever' through the 'openmdao' module
    mod = __import__(pkg)
    docs = getattr(mod, pkg.split('.')[1]).__doc__
    if docs:
        print >> outfile, docs, '\n'

    #excluding traits now since they need to be sorted separately
    _names = list(
        _get_resource_files(dist, [
            '*__init__.py', '*setup.py', '*datatypes*.py', '*/gui/*/views.py',
            '*/gui/*/models.py', '*/gui/manage.py', '*/gui/urls.py',
            '*/gui/*/urls.py', '*/gui/projdb/admin.py'
        ], ['*.py']))
    names = []
    for n in _names:
        parts = n.split('/')
        if parts[0] == 'openmdao' and parts[1] == 'test':
            if len(parts) > 2 and parts[2] != 'plugins':
                names.append(n)
        elif 'test' not in parts:
            names.append(n)

    names.sort()

    #wanted to sort traits separately based only on filenames despite differing paths
    traitz = list(
        _get_resource_files(dist, ['*__init__.py', '*setup.py', '*/test/*.py'],
                            ['*datatypes*.py']))
    sorted_traitz = sorted(traitz, cmp=_compare_traits_path)

    names.extend(sorted_traitz)

    exdirs = ['build', 'examples']

    oldheader = None
    newheader = None

    for name in names:
        if os.path.basename(name) == 'releaseinfo.py':
            continue

        for ex in exdirs:
            if name.startswith('%s/' % ex) or '/%s/' % ex in name:
                break
            else:
                x = name.split('/')
                #kind of dirty, but the other sections don't need api header.
                if os.path.basename(name) == 'api.py' and x[1] == 'lib':
                    newheader = 'api'
                if len(x) >= 4:
                    newheader = x[2]
            if (oldheader != newheader):
                print >> outfile, '**%s**' % newheader.upper()
                print >> outfile, '_' * (4 + len(newheader)) + '\n'
                oldheader = newheader

        _mod_sphinx_info(name, outfile, show_undoc=show_undoc)
コード例 #46
0
def _find_pycuda_include_path():
    from pkg_resources import Requirement, resource_filename
    return resource_filename(Requirement.parse("pycuda"), "pycuda/cuda")
コード例 #47
0
 def get_examples_paths():
     """Generator of possible paths for examples."""
     yield os.path.join(os.path.dirname(os.path.dirname(__file__)),
                        "examples")
     yield resource_filename(Requirement.parse("weblate"), "examples")
コード例 #48
0
def mkpseudo(argv=None):
    """A command line script (mkpseudo) points to this.  It generates a
    source distribution package that's empty aside from
    having a number of dependencies on other packages.

    usage: ``make_pseudopkg <pkg_name> <version> [-d <dest_dir>] [-l <links_url>] [-r req1] ... [-r req_n]``

    If ``pkg_name`` contains dots, a namespace package will be built.

    Required dependencies are specified using the same notation used by
    ``setuptools/easy_install/distribute/pip``.

    .. note:: If your required dependencies use the "<" or ">" characters, you must put the
              entire requirement in quotes to avoid misinterpretation by the shell.

    """

    if argv is None:
        argv = sys.argv[1:]

    parser = OptionParser()
    parser.usage = "mkpseudo <name> <version> [options]"
    parser.add_option("-d", "--dest", action="store", type="string", dest='dest', default='.',
                      help="directory where distribution will be created (optional)")
    parser.add_option("-l", "--link", action="append", type="string", dest='deplinks', default=[],
                      help="URLs to search for dependencies (optional)")
    parser.add_option("-r", "--req", action="append", type="string", dest='reqs', default=[],
                      help="requirement strings for dependent distributions (one or more)")
    parser.add_option("", "--dist", action="store_true", dest="dist",
                      help="make a source distribution after creating the directory structure")

    (options, args) = parser.parse_args(argv)

    if len(args) != 2:
        parser.print_help()
        sys.exit(-1)

    name = args[0]
    names = name.split('.')
    version = args[1]

    for i, url in enumerate(options.deplinks):
        if not url.startswith('http:') and not url.startswith('https:'):
            options.deplinks[i] = "http://%s" % url

    dest = os.path.abspath(os.path.expandvars(os.path.expanduser(options.dest)))

    if len(options.reqs) == 0 and options.dist:
        print "No dependencies have been specified, so the distribution will not be built"
        options.dist = False

    nspkgs = []
    for i, nm in enumerate(names[:-1]):
        nspkgs.append('.'.join(names[:i + 1]))

    dists = working_set.resolve([Requirement.parse(r) for r in options.reqs])
    dset = set([("%s" % d).replace(' ', '==') for d in dists])

    setup_options = {
        'requires': list(dset),
        'name': name,
        'version': version,
        'deplinks': options.deplinks,
        'nspkgs': nspkgs,
    }

    startdir = os.getcwd()
    if options.dist:
        tdir = tempfile.mkdtemp()
    else:
        tdir = dest

    try:
        os.chdir(tdir)

        rnames = names[::-1]
        for i, ns in enumerate(rnames):
            if i == 0:
                dct = {'__init__.py': ''}
            else:
                dct = {
                    '__init__.py': _ns_template,
                    rnames[i - 1]: dct,
                }

        dct = {names[0]: dct}

        dirstruct = {
            name: {
                'setup.py': _setup_py_template % setup_options,
                'src': dct,
            },
        }

        if not options.dist:
            if os.path.exists(name):
                print "'%s' already exists.  aborting..." % name
                sys.exit(-1)

        build_directory(dirstruct)

        os.chdir(name)

        if options.dist:
            tarname = os.path.join(dest, "%s-%s.tar.gz" % (name, version))
            zipname = os.path.join(dest, "%s-%s.zip" % (name, version))
            for fname in [tarname, zipname]:
                if os.path.exists(fname):
                    print "%s already exists" % fname
                    sys.exit(-1)

            cmd = [sys.executable, 'setup.py', 'sdist', '-d', dest]
            subprocess.check_call(cmd)

    finally:
        os.chdir(startdir)
        if options.dist:
            shutil.rmtree(tdir, onerror=onerror)
コード例 #49
0
def get_resource(path):
    return resource_filename(Requirement.parse('bbctrl'), 'bbctrl/' + path)
コード例 #50
0
ファイル: util.py プロジェクト: framecurrent/bibtools
def datastream(name):
    from pkg_resources import Requirement, resource_stream
    return resource_stream(Requirement.parse('bibtools'), 'bibtools/' + name)
コード例 #51
0
    def validate(self):
        """Check for required packages are installed."""
        # RPM dependency
        rpm_deps = {"cortx-sspl-test": None}
        # python 3rd party package dependency
        pip3_packages_dep = {"Flask": "1.1.1", "coverage": "5.5"}
        if not self.coverage_enabled:
            pip3_packages_dep.pop("coverage")

        # Validate pip3 python pkg with required version.
        for pkg, version in pip3_packages_dep.items():
            installed_pkg = None
            uninstalled_pkg = False
            try:
                pkg_req = Requirement.parse(f"{pkg}=={version}")
                installed_pkg = working_set.find(pkg_req)
            except VersionConflict:
                cmd = f'pip3 uninstall -y {pkg}'
                _, err, ret = SimpleProcess(cmd).run()
                if ret:
                    raise TestException(
                        "Failed to uninstall the pip3 pkg: %s(v%s), "
                        "due to an Error: %s" % (pkg, version, err))
                uninstalled_pkg = True
            except Exception as err:
                raise TestException("Failed at verification of pip3 pkg: %s, "
                                    "due to an Error: %s" % (pkg, err))

            if not installed_pkg or uninstalled_pkg:
                cmd = f'pip3 install {pkg}=={version}'
                _, err, ret = SimpleProcess(cmd).run()
                if ret:
                    raise TestException(
                        "Failed to install the pip3 pkg: %s(v%s), "
                        "due to an Error: %s" % (pkg, version, err))
            logger.info(f"Ensured Package Dependency: {pkg}(v{version}).")

        # Validate rpm dependencies
        pkg_validator = PkgV()
        pkg_validator.validate_rpm_pkgs(host=socket.getfqdn(),
                                        pkgs=rpm_deps,
                                        skip_version_check=True)
        # Load global, sspl and test configs
        Conf.load(SSPL_CONFIG_INDEX, sspl_config_path)
        Conf.load(SSPL_TEST_CONFIG_INDEX, sspl_test_config_path)
        # Take copy of supplied config passed to sspl_test and load it
        with open(self.sspl_test_gc_copy_file, "w") as f:
            f.write("")
        self.sspl_test_gc_copy_url = "yaml://%s" % self.sspl_test_gc_copy_file
        Conf.load(SSPL_TEST_GLOBAL_CONFIG, self.sspl_test_gc_copy_url)
        Conf.load("global_config", self.sspl_test_gc_url)
        Conf.copy("global_config", SSPL_TEST_GLOBAL_CONFIG)
        # Validate input configs
        machine_id = Utility.get_machine_id()
        self.node_type = Conf.get(SSPL_TEST_GLOBAL_CONFIG,
                                  "server_node>%s>type" % machine_id)
        enclosure_id = Conf.get(
            SSPL_TEST_GLOBAL_CONFIG,
            "server_node>%s>storage>enclosure_id" % machine_id)
        self.enclosure_type = Conf.get(
            SSPL_TEST_GLOBAL_CONFIG,
            "storage_enclosure>%s>type" % enclosure_id)
コード例 #52
0
def build_docs_and_install(owner, name, version, findlinks):  # pragma no cover
    tdir = tempfile.mkdtemp()
    startdir = os.getcwd()
    os.chdir(tdir)
    try:
        tarpath = download_github_tar(owner, name, version)

        # extract the repo tar file
        tar = tarfile.open(tarpath)
        tar.extractall()
        tar.close()

        files = os.listdir('.')
        files.remove(os.path.basename(tarpath))
        if len(files) != 1:
            raise RuntimeError("after untarring, found multiple directories: %s"
                               % files)

        os.chdir(files[0])  # should be in distrib directory now

        cfg = SafeConfigParser(dict_type=OrderedDict)

        try:
            cfg.readfp(open('setup.cfg', 'r'), 'setup.cfg')

        except IOError as io_error:
            raise IOError, \
                "OpenMDAO plugins must have a setup.cfg: {}".format(io_error), \
                sys.exc_info()[2]

        try:
            reqs = cfg.get('metadata', 'requires-dist').strip()
            reqs = reqs.replace(',', ' ')
            reqs = [n.strip() for n in reqs.split()]

            try:
                flinks = cfg.get('easy_install', 'find_links').strip()
                flinks = flinks.split('\n')
                flinks = [n.strip() for n in flinks]

                flinks.append(findlinks)

                findlinks = ' '.join(flinks)

            except (NoSectionError, NoOptionError):
                pass

        except NoOptionError:
            # couldn't find requires-dist in setup.cfg, so
            # create an sdist so we can query metadata for distrib dependencies
            tarname = _bld_sdist_and_install(deps=False)

            # now find any dependencies
            metadict = get_metadata(tarname)
            reqs = metadict.get('requires', [])

        # install dependencies (some may be needed by sphinx)
        ws = WorkingSet()
        for r in reqs:
            print "Installing dependency '%s'" % r
            req = Requirement.parse(r)
            dist = ws.find(req)
            if dist is None:
                try:
                    check_call(['easy_install', '-Z', '-f', findlinks, r])
                except Exception:
                    traceback.print_exc()

        # build sphinx docs
        check_call(['plugin', 'build_docs', files[0]])

        # make a new sdist with docs in it and install it
        tarname = _bld_sdist_and_install()
    finally:
        os.chdir(startdir)
        shutil.rmtree(tdir, ignore_errors=True)
コード例 #53
0
def parse_extras(extras_str):
    """Turn a string of extras into a parsed extras list"""
    extras = Requirement.parse("fakepkg{0}".format(
        extras_to_string(extras_str))).extras
    return sorted(dedup([extra.lower() for extra in extras]))
コード例 #54
0
ファイル: internal.py プロジェクト: tanmoydeb07/cheshire3
           'transformer', 'workflow', 'xpathProcessor',
           'textmining.tmNormalizer', 'textmining.tmDocumentFactory',
           'textmining.tmPreParser', 'textmining.tmTransformer',
           'datamining.dmPreParser', 'datamining.dmTransformer',
           'grid.srbIndex', 'grid.srbStore']

_major_version = 1
_minor_version = 1
_patch_version = 8

cheshire3Version = (_major_version, _minor_version, _patch_version)
cheshireVersion = cheshire3Version   # Included for backward compatibility

# Find Cheshire3 environment
try:
    cheshire3Home = resource_filename(Requirement.parse('cheshire3'), '')
except:
    # Cheshire3 not yet installed; maybe in a source distro/repo checkout
    # Assume local directory
    cheshire3Home = '.'

# Allow cheshire3Home to be over-ridden by environmental variable
# e.g. for source code distro/repo checkout
cheshire3Home = os.environ.get('C3HOME', cheshire3Home)

cheshire3Root = os.path.join(cheshire3Home, "cheshire3")
cheshire3Code = os.path.join(cheshire3Root)
cheshire3Dbs = os.path.join(cheshire3Home, "dbs")
cheshire3Www = os.path.join(cheshire3Home, "www")

CONFIG_NS = "http://www.cheshire3.org/schemas/config/"
コード例 #55
0
def plugin_defs():
    globs = globalvars()
    plugin_path = resource_filename(Requirement.parse("molSimplify"),
                                    "molSimplify/plugindefines_reference.txt")
    return plugin_path
コード例 #56
0
ファイル: python_setup.py プロジェクト: spring-team/pants
 def _failsafe_parse(self, requirement):
     try:
         return Requirement.parse(requirement, replacement=False)
     except TypeError:
         return Requirement.parse(requirement)
コード例 #57
0
def lig_load(userligand, licores=None):
    if licores == None:
        licores = getlicores()
        #@licores.pop("x", None)
    globs = globalvars()
    ### get groups ###
    groups = []
    for entry in licores:
        groups += licores[entry][3]
    groups = sorted(list(set(groups)))
    # check if user requested group
    if userligand.lower() in groups:
        subligs = [
            key for key in licores if userligand.lower() in licores[key][3]
        ]
        # randomly select ligand
        userligand = random.choice(subligs)
    if '~' in userligand:
        homedir = os.path.expanduser("~")
        userligand = userligand.replace('~', homedir)
    emsg = False
    lig = mol3D()  # initialize ligand molecule
    ### check if ligand exists in dictionary
    if userligand in licores.keys():
        print('loading ligand from dictionary')
        dbentry = licores[userligand]
        # load lig mol file (with hydrogens)
        if globs.custom_path:
            flig = globs.custom_path + "/Ligands/" + dbentry[0]
        else:
            flig = resource_filename(Requirement.parse("molSimplify"),
                                     "molSimplify/Ligands/" + dbentry[0])
        # check if ligand xyz/mol file exists
        if not glob.glob(flig):
            emsg = "We can't find the ligand structure file %s right now! Something is amiss. Exiting..\n" % flig
            print emsg
            return False, emsg
        if ('.xyz' in flig):
            lig.OBMol = lig.getOBMol(flig, 'xyzf')
        elif ('.mol' in flig):
            lig.OBMol = lig.getOBMol(flig, 'molf')
        elif ('.smi' in flig):
            lig.OBMol = lig.getOBMol(flig, 'smif')
        ### modified the check for length,
        ### as it parsing string length instead of
        ### list length!
        if isinstance(dbentry[2], (str, unicode)):
            lig.denticity = 1
        else:
            lig.denticity = len(dbentry[2])
        lig.ident = dbentry[1]
        lig.charge = lig.OBMol.GetTotalCharge()
        if 'pi' in dbentry[2]:
            lig.cat = [int(l) for l in dbentry[2][:-1]]
            lig.cat.append('pi')
        else:
            if lig.denticity == 1:
                lig.cat = [int(dbentry[2])]
            else:
                lig.cat = [int(l) for l in dbentry[2]]
        if lig.denticity > 1:
            lig.grps = dbentry[3]
        else:
            lig.grps = []
        if len(dbentry) > 3:
            lig.ffopt = dbentry[4][0]
    ### load from file
    elif ('.mol' in userligand or '.xyz' in userligand or '.smi' in userligand
          or '.sdf' in userligand):
        #flig = resource_filename(Requirement.parse("molSimplify"),"molSimplify/" +userligand)
        if glob.glob(userligand):
            ftype = userligand.split('.')[-1]
            # try and catch error if conversion doesn't work
            try:
                print('ligand is an ' + ftype + ' file')
                lig.OBMol = lig.getOBMol(userligand,
                                         ftype + 'f')  # convert from file
                # generate coordinates if not existing
                lig.charge = lig.OBMol.GetTotalCharge()
                print('Ligand successfully converted to OBMol')
            except IOError:
                emsg = 'Failed converting file ' + userligand + ' to molecule..Check your file.\n'
                return False, emsg
            lig.ident = userligand.rsplit('/')[-1]
            lig.ident = lig.ident.split('.' + ftype)[0]
        else:
            emsg = 'Ligand file ' + userligand + ' does not exist. Exiting..\n'
            print emsg
            return False, emsg
    ### if not, try interpreting as SMILES string
    else:
        try:
            lig.getOBMol(userligand, 'smistring', True)  # convert from smiles
            lig.convert2mol3D()
            assert lig.natoms
            lig.charge = lig.OBMol.GetTotalCharge()
            print('Ligand successfully interpreted as SMILES')
        except:
            emsg = "We tried converting the string '%s' to a molecule but it wasn't a valid SMILES string.\n" % userligand
            emsg += "Furthermore, we couldn't find the ligand structure: '%s' in the ligands dictionary. Try again!\n" % userligand
            emsg += "\nAvailable ligands are: %s\n" % getligs()
            emsg += "\nAnd available groups are: %s\n" % getligroups(licores)
            print emsg
            return False, emsg
        lig.ident = 'smi'
    lig.name = userligand
    return lig, emsg
コード例 #58
0
def _make_license_table(docdir, reqs=None):
    """
    Generates a file in docs/licenses/licenses_table.rst that
    contains a restructured text table with the name, license, and home-page of
    all distributions that openmdao depends on.
    """
    meta_names = ['name', 'version', 'license', 'home-page']
    headers = [
        '**Distribs Used by OpenMDAO**', '**Version**', '**License**',
        '**Link**'
    ]
    numcols = len(meta_names)
    data_templates = ["%s", "%s", "%s", "%s"]
    col_spacer = ' '
    max_col_width = 80

    license_fname = os.path.join(docdir, 'licenses', 'licenses_table.txt')

    if reqs is None:
        reqs = [Requirement.parse(p) for p in get_openmdao_packages()]

    reqset = set(reqs)
    dists = set()
    done = set()
    while reqset:
        req = reqset.pop()
        if req.project_name not in done:
            done.add(req.project_name)
            dist = working_set.find(req)
            if dist is not None:
                dists.add(dist)
                reqset.update(dist.requires())

    metadict = {}
    for dist in dists:
        metadict[dist.project_name] = get_dist_metadata(dist)
    for projname, meta in metadict.items():
        for i, name in enumerate(meta_names):
            try:
                meta[name] = data_templates[i] % str(meta[name])
            except KeyError:
                meta[name] = 'UNKNOWN'
        if meta['name'] == 'UNKNOWN':
            meta['name'] = projname
    # figure out sizes of table columns
    colwidths = [len(s) + 1 for s in headers]
    for i, name in enumerate(meta_names):
        sz = max([len(m[name]) for m in metadict.values()]) + 1
        sz = min(sz, max_col_width)
        colwidths[i] = max(colwidths[i], sz)

    with open(license_fname, 'wb') as outfile:
        # write header
        outfile.write(_get_border_line(numcols, colwidths, char='='))
        for i, header in enumerate(headers):
            outfile.write(header + ' ' * (colwidths[i] - len(header)))
            outfile.write(col_spacer)
        outfile.write('\n')
        outfile.write(_get_border_line(numcols, colwidths, char='='))

        # write table data
        tups = [(k, v) for k, v in metadict.items()]
        tups = sorted(tups, lambda x, y: cmp(x[0].lower(), y[0].lower()))
        for j, tup in enumerate(tups):
            for i, name in enumerate(meta_names):
                outfile.write(_get_table_cell(tup[1][name], colwidths[i]))
                outfile.write(col_spacer)
            outfile.write('\n')
            if j < len(tups) - 1:
                outfile.write(_get_border_line(numcols, colwidths, char='-'))

        # bottom border
        outfile.write(_get_border_line(numcols, colwidths, char='='))
        outfile.write('\n')
コード例 #59
0
    def __init__(self, input_model, output_model, **extra_args):
        """
        Prepare remote execution of geosphere wrapper script.
        The resulting Application will associate a remote execution like:

        geosphere_wrapper.sh [options] <input model <output model>

        Options:
        -w <working dir>         name of the working dir extracted from 
                                 .tgz file
        -g <grok binary file>    path to 'grok' binary. Default in PATH
        -h <hgs binary file>     path to 'hgs' binary. Default in PATH
        -d                       enable debug
        """


        self.input_model = input_model
        self.output_model = output_model

        inputs = []
        outputs = []

        geosphere_wrapper_sh = resource_filename(
            Requirement.parse("gc3pie"),"gc3libs/etc/geosphere_wrapper.sh"
        )

        inputs.append((geosphere_wrapper_sh,
                       os.path.basename(geosphere_wrapper_sh)))

        cmd = "./geosphere_wrapper.sh -d "

        if 's3cfg' in extra_args:
            inputs.append((extra_args['s3cfg'],
                           "etc/s3cfg"))

        if 'grok_bin' in extra_args:
            cmd += "-g %s " % extra_args['grok_bin']

            inputs.append((extra_args['grok_bin'],
                          os.path.join("./bin",
                                       os.path.basename(extra_args['grok_bin']))
                       ))

        if 'hgs_bin' in extra_args:
            cmd += "-h %s " % extra_args['hgs_bin']

            inputs.append((extra_args['hgs_bin'],
                          os.path.join("./bin",
                                       os.path.basename(extra_args['hgs_bin']))
                       ))

        # Pass input model location
        if self.input_model.scheme == 'file':
            # Include local input files as part of Application
            inputs.append((str(self.input_model),os.path.join(".",
                                                              os.path.basename(
                                                                  str(self.input_model)))))
            cmd += " ./%s " % os.path.basename(str(self.input_model))
        else:
            # just pass remote input model URI
            cmd += "%s" % str(input_model)

        # Pass working dir name argument
        cmd += " %s " % extra_args['jobname']

        # Pass output location argument
        if self.output_model.scheme == 'file':
            # Include output as part of data to be retrieved locally
            outputs.append((os.path.basename(str(self.output_model)),
                            str(self.output_model)))
            cmd += "./%s " % os.path.basename(str(self.output_model))
        else:
            # just pass remote output model URI
            cmd += "%s " % str(self.output_model)

        Application.__init__(
             self,
             # arguments should mimic the command line interface of the command to be
             # executed on the remote end
             arguments = cmd,
             inputs = inputs,
             outputs = outputs,
             stdout = 'geosphere.log',
             join=True,
             **extra_args)
コード例 #60
0
ファイル: main.py プロジェクト: avallbona/naniano
def main():
    """ Opciones de configuracion para apps django """

    parser = OptionParser(usage="usage: %prog [options] [VALUE]")
    #parser.add_option("-n", "--no-rotate",
    #action="store_true", dest="no_rotate", default=False,
    #help="rotate ftp files on upload cmd")
    parser.add_option("--version", "-v", action="store_true")
    parser.add_option("--list", "-l", action="store_true")
    parser.add_option("--get", "-g", action="store", dest="get_option")
    parser.add_option("--set",
                      "-s",
                      action="store",
                      dest="set_option",
                      help="set SET_OPTION parameter to VALUE")

    options, args = parser.parse_args()

    if options.version:
        print("%s v. %s") % (NAME, __VERSION__)
        return 0

    if os.path.isfile(DEFAULT_CONFIG_FILE):
        cfgfile = DEFAULT_CONFIG_FILE
    else:
        try:
            cfgfile = resource_filename(Requirement.parse("apconf"),
                                        "apconf/conf/apconf-sample.ini")
            #log.warn("Usando sample config file: %s" % cfgfile)
        except:
            log.error("No se ha encontrado el fichero de configuración")
            parser.print_help()
            return 1

    opts = Options()

    if options.list:
        from clint.textui import puts, colored
        pformat = "%30s: %s"
        puts('')
        for section in opts.sections:
            puts(pformat % (colored.green("[%s]" % section), ''))
            #puts(pformat % (colored.white('Option'), colored.cyan('Value')))
            for key, value in opts.items(section):
                puts(pformat % (colored.blue(key), colored.white(value)))

            puts('')
    elif options.get_option:
        try:
            print(opts.get(options.get_option))
        except KeyError:
            parser.print_help()
            return 1
    elif options.set_option:
        try:
            opts.set(options.set_option, args[0])
            opts.write()
        except IndexError:
            parser.print_help()
            return 1
    else:
        parser.print_help()
        return 1
    return 0