def __init__(self, **kwargs):
        Script.__init__(self, **kwargs)
        self.title = "Gulf of Maine intertidal density/cover (Petraitis et al. 2008)"
        self.citation = "Peter S. Petraitis, Harrison Liu, and " \
                        "Erika C. Rhile. 2008. Densities and cover " \
                        "data for intertidal organisms in the Gulf of " \
                        "Maine, USA, from 2003 to 2007. Ecology 89:588."
        self.name = "intertidal-abund-me"
        self.ref = "https://figshare.com/collections/DENSITIES_AND_COVER_DATA_FOR_INTERTIDAL_ORGANISMS_IN_THE_GULF_OF_MAINE_USA_FROM_2003_TO_2007/3300200"
        self.description = "The data on densities and percent cover in the " \
                           "60 experimental plots from 2003 to 2007 and to " \
                           "update data from 1996 to 2002 that are already " \
                           "published in Ecological Archives." \
                           "Includes densities of mussels, " \
                           "herbivorous limpet, herbivorous snails, " \
                           "predatory snail, barnacle , fucoid algae and " \
                           "percent cover by mussels, barnacles, fucoids, " \
                           "and other sessile organisms."
        self.retriever_minimum_version = '2.0.dev'
        self.version = '1.5.3'
        self.urls = {"main": "https://ndownloader.figshare.com/files/5600831"}
        self.cleanup_func_table = Cleanup(correct_invalid_value, missing_values=[-999.9])

        if parse_version(VERSION) <= parse_version("2.0.0"):
            self.shortname = self.name
            self.name = self.title
            self.cleanup_func_table = Cleanup(correct_invalid_value, nulls=[-999.9])
        self.tables = {"main": Table("main", cleanup=self.cleanup_func_table)}
Beispiel #2
0
    def check_api_version(self):
        """Checks the API version on the server to determine which to use."""
        try:
            root_resource = self.api_get('api/')
            rsp = self.api_get(root_resource['links']['info']['href'])

            self.rb_version = rsp['info']['product']['package_version']

            if parse_version(self.rb_version) >= parse_version('1.5.2'):
                self.deprecated_api = False
                self.root_resource = root_resource
                debug('Using the new web API')
                return True
        except APIError, e:
            if e.http_status not in (401, 404):
                # We shouldn't reach this. If there's a permission denied
                # from lack of logging in, then the basic auth handler
                # should have hit it.
                #
                # However in some versions it wants you to be logged in
                # and returns a 401 from the application after you've
                # done your http basic auth
                die("Unable to access the root /api/ URL on the server.")

                return False
Beispiel #3
0
def check_for_updates(request):
    """Check if a new version of Modoboa is available."""
    local_config = models.LocalConfig.objects.first()
    client = ModoAPIClient()
    extensions = exts_pool.list_all()
    extensions = [{
        "label": "Modoboa",
        "name": "modoboa",
        "description": _("The core part of Modoboa"),
        "version": client.local_core_version
    }] + extensions
    update_avail = False
    for extension in extensions:
        pkgname = extension["name"].replace("_", "-")
        for api_extension in local_config.api_versions:
            if api_extension["name"] != pkgname:
                continue
            extension["last_version"] = api_extension["version"]
            if (
                parse_version(api_extension["version"]) >
                parse_version(extension["version"])
            ):
                extension["update"] = True
                extension["changelog_url"] = api_extension["url"]
                update_avail = True
                break
    return update_avail, extensions
Beispiel #4
0
def from_pandas(X):
    """A simple wrapper for H2OFrame.from_python. This takes
    a pandas dataframe and returns an H2OFrame with all the 
    default args (generally enough) plus named columns.

    Parameters
    ----------

    X : pd.DataFrame
        The dataframe to convert.

    Returns
    -------

    H2OFrame
    """
    pd, _ = validate_is_pd(X, None)

    # older version of h2o are super funky with this
    if parse_version(h2o.__version__) < parse_version('3.10.0.7'):
        h = 1
    else:
        h = 0

    # if h2o hasn't started, we'll let this fail through
    return H2OFrame.from_python(X, header=h, column_names=X.columns.tolist())
Beispiel #5
0
    def do_reset():
        """ Reset and wait. Returns True on failure. """
        if not args.quiet:
            print "Checking ECME versions..."

        results, errors = run_command(args, nodes, "get_versions")
        if errors:
            print "ERROR: MC reset aborted. Backup partitions not updated."
            return True

        for result in results.values():
            version = result.ecme_version.lstrip("v")
            if parse_version(version) < parse_version("1.2.0"):
                print "ERROR: MC reset is unsafe on ECME version v%s" % version
                print "Please power cycle the system and start a new fwupdate."
                return True

        if not args.quiet:
            print "Resetting nodes..."

        results, errors = run_command(args, nodes, "mc_reset", True)
        if errors:
            print "ERROR: MC reset failed. Backup partitions not updated."
            return True

        return False
def version_compare(a, b):
    aStrippedFromStartingV = ''.join(a.split('v', 1))
    bStrippedFromStartingV = ''.join(b.split('v', 1))
    if parse_version(aStrippedFromStartingV) < parse_version(bStrippedFromStartingV):
        return 1
    else:
        return -1
def exists(env):

    icecc = env.get('ICECC', False)
    if not icecc:
        return False
    icecc = env.WhereIs(icecc)

    pipe = SCons.Action._subproc(env, SCons.Util.CLVar(icecc) + ['--version'],
                                 stdin = 'devnull',
                                 stderr = 'devnull',
                                 stdout = subprocess.PIPE)

    if pipe.wait() != 0:
        return False

    validated = False
    for line in pipe.stdout:
        if validated:
            continue  # consume all data
        version_banner = re.search(r'^ICECC ', line)
        if not version_banner:
            continue
        icecc_version = re.split('ICECC (.+)', line)
        if len(icecc_version) < 2:
            continue
        icecc_version = parse_version(icecc_version[1])
        needed_version = parse_version(icecream_version_min)
        if icecc_version >= needed_version:
            validated = True

    return validated
Beispiel #8
0
    def assertInterfacesImplemented(self, cls):
        "Given a class, assert that the zope.interface.Interfaces are implemented to specification."

        # see if this version of zope.interface is too old to run these tests
        zi_vers = pkg_resources.working_set.find(
            pkg_resources.Requirement.parse('zope.interface')).version
        if pkg_resources.parse_version(zi_vers) < pkg_resources.parse_version('4.1.1'):
            raise unittest.SkipTest("zope.interfaces is too old to run this test")

        import zope.interface.interface
        for interface in zope.interface.implementedBy(cls):
            for attr, template_argspec in interface.namesAndDescriptions():
                if not hasattr(cls, attr):
                    msg = "Expected: %r; to implement: %s as specified in %r" % (
                        cls, attr,
                        interface)
                    self.fail(msg)
                actual_argspec = getattr(cls, attr)
                while hasattr(actual_argspec, 'func_original'):
                    actual_argspec = actual_argspec.func_original
                actual_argspec = zope.interface.interface.fromMethod(actual_argspec)

                if actual_argspec.getSignatureInfo() != template_argspec.getSignatureInfo():
                    msg = "%s: expected: %s; got: %s" % (
                        attr,
                        template_argspec.getSignatureString(),
                        actual_argspec.getSignatureString())
                    self.fail(msg)
Beispiel #9
0
def has_environment_marker_range_operators_support():
    """Code extracted from 'pytest/setup.py'
    https://github.com/pytest-dev/pytest/blob/7538680c/setup.py#L31
    The first known release to support environment marker with range operators
    it is 17.1, see: https://setuptools.readthedocs.io/en/latest/history.html#id113
    """
    return parse_version(setuptools_version) >= parse_version('17.1')
Beispiel #10
0
    def add_version_section(self, version, date, contents):
        '''
        Adds a version section

        :param version: ``str`` version as a string
        :param date: ``datetime.datetime`` version date
        :param contents: ``list(list([str|Reference]))``` contents as a list
          of lists which contain a combination of ``str`` and
          ``Reference`` objects
        :returns: None
        '''
        if version in self.__versions:
            raise DuplicateVersionsException(version)
        self.__parsed_versions.append(pkg_resources.parse_version(version))
        self.__parsed_versions = sorted(self.__parsed_versions)
        # Cannot go parsed -> str, so sorting must be done by comparison
        new_versions = [None] * len(self.__parsed_versions)
        for v in self.__versions + [version]:
            parsed_v = pkg_resources.parse_version(v)
            index = self.__parsed_versions.index(parsed_v)
            if index == -1:
                raise RuntimeError("Inconsistent internal version storage state")
            new_versions[index] = v
        self.__versions = new_versions
        self.__dates[version] = date
        self.__content[version] = contents
Beispiel #11
0
def process(cmd, opts):
  """
  Creates a process.
  Adds options (provided as keyword args) to the given cmd.

  Parameters
  ----------
  cmd: str
    the command to add options
  opts: dict
    the options to add
  """
  if opts is None:
    opts = {}

  cmd = 'dat ' + cmd

  for key, val in opts.items():
    if (len(key) == 1):
      cmd += " -{0} {1}".format(key, val)
    else:
      cmd += " --{0}={1}".format(key, val)

  current_version = subprocess.check_output(['dat -v'], shell=True).decode().strip()
  if parse_version(current_version) < parse_version(COMPATIBLE_DAT_VERSION):
    raise DatException("Please update the dat version with npm install -g dat.",
                    "Your version is {0}, this datpy requires {1}".format(current_version, COMPATIBLE_DAT_VERSION))
  return subprocess.Popen(cmd, stdout=subprocess.PIPE, stdin=subprocess.PIPE,
                          stderr=subprocess.PIPE, shell=True)
Beispiel #12
0
def compare_pypi_repos(reference_repo, other_repo):
    upgrade_table = PrettyTable(["Package", reference_repo.server, other_repo.server])
    downgrade_table = PrettyTable(["Package", reference_repo.server, other_repo.server])
    skipped_table = PrettyTable(["Package", reference_repo.server, other_repo.server])
    skipped_packages = get_skipped_packages()
    reference_repo_versions = get_versions_from_reference(reference_repo)
    packages_to_check = list(reference_repo_versions.keys())
    for name in sorted(packages_to_check):
        try:
            reference_repo_version = reference_repo_versions[name]
            other_repo_version = other_repo.get_latest_version(name)
        except PackageNotFound:
            continue
        if other_repo_version != reference_repo_version:
            if name in skipped_packages or any(x in other_repo_version for x in ['a', 'b', 'dev', 'post', 'rc']):
                skipped_table.add_row([name, reference_repo_version, other_repo_version])
            elif parse_version(reference_repo_version) < parse_version(other_repo_version):
                upgrade_table.add_row([name, reference_repo_version, other_repo_version])
            else:
                downgrade_table.add_row([name, reference_repo_version, other_repo_version])

    print("Upgradable Packages:")
    print(upgrade_table)
    print()
    print("Downgradable Packages:")
    print(downgrade_table)
    print()
    print("Skipped Packages:")
    print(skipped_table)
Beispiel #13
0
 def manage_nap_firmware_update(self, check_version=False):
   # Flash NAP if out of date.
   try:
     local_nap_version = parse_version(
         self.settings['system_info']['nap_version'].value)
     remote_nap_version = parse_version(self.newest_nap_vers)
     nap_out_of_date = local_nap_version != remote_nap_version
   except KeyError:
     nap_out_of_date = True
   if nap_out_of_date or check_version==False:
     text = "Updating NAP"
     self._write(text)
     self.create_flash("M25")
     nap_n_ops = self.pk_flash.ihx_n_ops(self.nap_fw.ihx)
     progress_dialog = PulsableProgressDialog(nap_n_ops, True)
     progress_dialog.title = text
     GUI.invoke_later(progress_dialog.open)
     self.pk_flash.write_ihx(self.nap_fw.ihx, self.stream, mod_print=0x40, \
                             elapsed_ops_cb = progress_dialog.progress)
     self.stop_flash()
     self._write("")
     progress_dialog.close()
     return True
   else:
     text = "NAP is already to latest version, not updating!"
     self._write(text)
     self._write("")
     return False
Beispiel #14
0
    def __lt__(self, other):
        if self.context != other.context:
            raise TypeError("{0}.context != {1}.context".format(self, other))

        return self._sort_key < other._sort_key

        # XXX prune

        sn = self.parsed_filename.group('name')
        on = other.parsed_filename.group('name')
        if sn != on:
            return sn < on
        sv = parse_version(self.parsed_filename.group('ver'))
        ov = parse_version(other.parsed_filename.group('ver'))
        if sv != ov:
            return sv < ov
        # Compatibility
        if self.context != other.context:
            raise TypeError("{0}.context != {1}.context".format(self, other))
        sc = self.rank
        oc = other.rank
        if sc is not None and oc is not None and sc != oc:
            # Smaller compatibility ranks are "better" than larger ones,
            # so we have to reverse the sense of the comparison here!
            return sc > oc
        elif sc is None and oc is not None:
            return False
        return self.filename < other.filename
Beispiel #15
0
	def _has_updated_kernel(self):
		if os.path.isdir('/lib/modules/'):
			for k_version in next(os.walk('/lib/modules/'))[1]:
				if parse_version(os.uname()[2]) < parse_version(k_version):
					return True
			return False
		return False
Beispiel #16
0
    def test_simple(self):
        r"""simple test of version (to confirm we got right)"""

        import signet
        self.assertGreaterEqual(
            pkg_resources.parse_version(signet.__version__),
            pkg_resources.parse_version("1.0.2"))
Beispiel #17
0
def list_available_extensions(index_url=None, show_details=False):
    index_data = get_index_extensions(index_url=index_url)
    if show_details:
        return index_data
    installed_extensions = get_extensions()
    installed_extension_names = [e.name for e in installed_extensions]
    results = []
    for name, items in OrderedDict(sorted(index_data.items())).items():
        # exclude extensions/versions incompatible with current CLI version
        items = [item for item in items if ext_compat_with_cli(item['metadata'])[0]]
        if not items:
            continue

        latest = max(items, key=lambda c: parse_version(c['metadata']['version']))
        installed = False
        if name in installed_extension_names:
            installed = True
            ext_version = get_extension(name).version
            if ext_version and parse_version(latest['metadata']['version']) > parse_version(ext_version):
                installed = str(True) + ' (upgrade available)'
        results.append({
            'name': name,
            'version': latest['metadata']['version'],
            'summary': latest['metadata']['summary'],
            'preview': latest['metadata'].get(EXT_METADATA_ISPREVIEW, False),
            'installed': installed
        })
    return results
Beispiel #18
0
 def _modify_qchem_according_to_version(cls, qchem_cmd):
     cmd2 = copy.deepcopy(qchem_cmd)
     try:
         from rubicon.utils.qchem_info import get_qchem_version
         cur_version = get_qchem_version()
     except:
         cur_version = parse_version("4.3.0")
     if cmd2 is not None:
         if cur_version >= parse_version("4.3.0"):
             if cmd2[0] == "qchem":
                 if "-seq" in cmd2:
                     cmd2.remove("-seq")
                 if "NERSC_HOST" in os.environ and \
                         os.environ["NERSC_HOST"] in ["cori", "edison"]:
                     if "-dbg" not in cmd2:
                         cmd2.insert(1, "-dbg")
                     if "-seq" in cmd2:
                         cmd2.remove("-seq")
                 elif "NERSC_HOST" in os.environ and \
                         os.environ["NERSC_HOST"] == "matgen":
                     if "-dbg" not in cmd2:
                         cmd2.insert(1, "-dbg")
                     if "-seq" in cmd2:
                         cmd2.remove("-seq")
         else:
             if "-dbg" in cmd2:
                 cmd2.remove("-dbg")
             if "-pbs" in cmd2:
                 cmd2.remove("-pbs")
     return cmd2
Beispiel #19
0
def fix_code(code, directory):
    """Formats Python code to conform to the PEP 8 style guide.

    """
    if not black:
        raise Fault("black not installed", code=400)
    # Get black config from pyproject.toml
    line_length = black.DEFAULT_LINE_LENGTH
    string_normalization = True
    parser = configparser.ConfigParser()
    pyproject_path = os.path.join(directory, "pyproject.toml")
    if parser.read(pyproject_path):
        if parser.has_option("tool.black", "line-length"):
            line_length = parser.getint("tool.black", "line-length")
        if parser.has_option("tool.black", "skip-string-normalization"):
            string_normalization = not parser.getboolean(
                "tool.black", "skip-string-normalization"
            )
    try:
        if parse_version(black.__version__) < parse_version("19.0"):
            reformatted_source = black.format_file_contents(
                src_contents=code, line_length=line_length, fast=False
            )
        else:
            fm = black.FileMode(
                line_length=line_length, string_normalization=string_normalization
            )
            reformatted_source = black.format_file_contents(
                src_contents=code, fast=False, mode=fm
            )
        return reformatted_source
    except black.NothingChanged:
        return code
    except Exception as e:
        raise Fault("Error during formatting: {}".format(e), code=400)
Beispiel #20
0
 def _scan_dirs(self):
     """
     Build up a dictionary (self.packages) of the latest versions of each 
     RPM found in the local directories.  The keys are the package names,
     and the values are dictionaries containing the version and the path.
     """
     for dir_name in self.dir_list:
         for file_name in os.listdir(dir_name):
             m = self._re_match(file_name)
             if not m:
                 continue
             name = m.group(1)
             version = m.group(2)
             release = m.group(3)
             # arch = m.group(4)
             parsed_version = parse_version(version)
             parsed_release = parse_version(release)
             if self.debug:
                 print "considering name=%s version=%s release=%s:" % (name, version, release)
             if (name not in self.packages
                 or ((parsed_version, parsed_release) > 
                     (self.packages[name]["parsed_version"], self.packages[name]["parsed_release"]))):
                 if self.debug:
                     print "  best so far"
                 path = os.path.join(dir_name, file_name)
                 self.packages[name] = {"version": version,
                                        "parsed_version": parsed_version, 
                                        "release": release,
                                        "parsed_release": parsed_release,
                                        "path": path}
             elif self.debug:
                 print ("  ignoring - already have version %s release %s" % 
                        (self.packages[name]["version"], self.packages[name]["release"]))
Beispiel #21
0
    def _check_placement(self):
        """Checks to see if the placement API is ready for scheduling.

        Checks to see that the placement API service is registered in the
        service catalog and that we can make requests against it.
        """
        try:
            # TODO(efried): Use ksa's version filtering in _placement_get
            versions = self._placement_get("/")
            max_version = pkg_resources.parse_version(
                versions["versions"][0]["max_version"])
            needs_version = pkg_resources.parse_version(
                MIN_PLACEMENT_MICROVERSION)
            if max_version < needs_version:
                msg = (_('Placement API version %(needed)s needed, '
                         'you have %(current)s.') %
                       {'needed': needs_version, 'current': max_version})
                return UpgradeCheckResult(UpgradeCheckCode.FAILURE, msg)
        except ks_exc.MissingAuthPlugin:
            msg = _('No credentials specified for placement API in nova.conf.')
            return UpgradeCheckResult(UpgradeCheckCode.FAILURE, msg)
        except ks_exc.Unauthorized:
            msg = _('Placement service credentials do not work.')
            return UpgradeCheckResult(UpgradeCheckCode.FAILURE, msg)
        except ks_exc.EndpointNotFound:
            msg = _('Placement API endpoint not found.')
            return UpgradeCheckResult(UpgradeCheckCode.FAILURE, msg)
        except ks_exc.DiscoveryFailure:
            msg = _('Discovery for placement API URI failed.')
            return UpgradeCheckResult(UpgradeCheckCode.FAILURE, msg)
        except ks_exc.NotFound:
            msg = _('Placement API does not seem to be running.')
            return UpgradeCheckResult(UpgradeCheckCode.FAILURE, msg)

        return UpgradeCheckResult(UpgradeCheckCode.SUCCESS)
def legendBoxProperties():
    """Legend properties dictionary with keys consistent with MPL version.

    The argument names have changed in matplotlib 0.98.5.
    Old arguments do not work with later versions of matplotlib.

    Return dictionary of legend properties.
    """
    global _lbp
    # return immediately if properties have already been cached
    if len(_lbp) > 0:   return _lbp
    #  figure out matplotlib version and appropriate names
    from pkg_resources import parse_version
    from matplotlib import __version__ as mplver
    if parse_version(mplver) >= parse_version('0.98.5'):
        _lbp = {
            'loc' : 'upper right',
            'numpoints' : 3,        # number of points in the legend line
            'borderpad' : 0.25,     # whitespace in the legend border
            'labelspacing' : 0,     # space between legend entries
            'handlelength' : 1.5,   # the length of the legend lines
            'handletextpad' : 0.5,  # separation between line and text
            'prop' : FontProperties(size='medium'),
        }
    else:
        _lbp = {
            'loc' : 'upper right',
            'numpoints' : 3,        # number of points in the legend line
            'pad' : 0.20,           # whitespace in the legend border
            'labelsep' : 0.005,     # space between legend entries
            'handlelen' : 0.03,     # the length of the legend lines
            'handletextsep' : 0.02, # separation between line and text
            'prop' : FontProperties(size='medium'),
        }
    return _lbp
Beispiel #23
0
 def _checkparams(clevel, shuffle, cname, quantize):
     if clevel is not None:
         if not isinstance(clevel, int):
             raise ValueError("`clevel` must be an int.")
         if clevel < 0:
             raise ValueError("`clevel` must be 0 or a positive integer.")
     if shuffle is not None:
         if not isinstance(shuffle, (bool, int)):
             raise ValueError("`shuffle` must be an int.")
         if shuffle not in [bcolz.NOSHUFFLE, bcolz.SHUFFLE, bcolz.BITSHUFFLE]:
             raise ValueError("`shuffle` value not allowed.")
         if (shuffle == bcolz.BITSHUFFLE and
             parse_version(bcolz.blosc_version()[0]) < parse_version("1.8.0")):
             raise ValueError("You need C-Blosc 1.8.0 or higher for using "
                              "BITSHUFFLE.")
     # Store the cname as bytes object internally
     if cname is not None:
         list_cnames = bcolz.blosc_compressor_list()
         if cname not in list_cnames:
             raise ValueError(
                 "Compressor '%s' is not available in this build" % cname)
     if quantize is not None:
         if not isinstance(quantize, int):
             raise ValueError("`quantize` must be an int.")
         if quantize < 0:
             raise ValueError("`quantize` must be 0 or a positive integer.")
     return clevel, shuffle, cname, quantize
Beispiel #24
0
def version_check(version: str,
                  exact: bool = False,
                  compiled: bool = True) -> bool:
    """Check if the Qt runtime version is the version supplied or newer.

    Args:
        version: The version to check against.
        exact: if given, check with == instead of >=
        compiled: Set to False to not check the compiled version.
    """
    # Catch code using the old API for this
    assert exact not in [operator.gt, operator.lt, operator.ge, operator.le,
                         operator.eq], exact
    if compiled and exact:
        raise ValueError("Can't use compiled=True with exact=True!")

    parsed = pkg_resources.parse_version(version)
    op = operator.eq if exact else operator.ge
    result = op(pkg_resources.parse_version(qVersion()), parsed)
    if compiled and result:
        # qVersion() ==/>= parsed, now check if QT_VERSION_STR ==/>= parsed.
        result = op(pkg_resources.parse_version(QT_VERSION_STR), parsed)
    if compiled and result:
        # FInally, check PYQT_VERSION_STR as well.
        result = op(pkg_resources.parse_version(PYQT_VERSION_STR), parsed)
    return result
Beispiel #25
0
    def _set_model(self, model):
        import tensorflow as tf
        import keras.backend.tensorflow_backend as KTF

        self.model = model
        self.sess = KTF.get_session()
        if self.histogram_freq and self.merged is None:
            layers = self.model.layers
            for layer in layers:
                if hasattr(layer, 'W'):
                    tf.histogram_summary('{}_W'.format(layer), layer.W)
                if hasattr(layer, 'b'):
                    tf.histogram_summary('{}_b'.format(layer), layer.b)
                if hasattr(layer, 'output'):
                    tf.histogram_summary('{}_out'.format(layer),
                                         layer.output)
        self.merged = tf.merge_all_summaries()
        if self.write_graph:
            if parse_version(tf.__version__) >= parse_version('0.8.0'):
                self.writer = tf.train.SummaryWriter(self.log_dir,
                                                     self.sess.graph)
            else:
                self.writer = tf.train.SummaryWriter(self.log_dir,
                                                     self.sess.graph_def)
        else:
            self.writer = tf.train.SummaryWriter(self.log_dir)
    def equals(first, other):
        """
        Compares two params.Version objects

        @param first: version first to compare
        @param other: version two to compare
        
        @return: 0 if equals
                -1 if first > other
                 1 if first < other
        """

        if VersionUtils.__isVersionObject(first) and \
            VersionUtils.__isVersionObject(other):

            left = pkg_resources.parse_version(
                             VersionUtils.__toString(first)
                 )
            right = pkg_resources.parse_version(
                             VersionUtils.__toString(other)
                 )

            if left == right: return 0
            if left > right: return -1
            return 1
    def _parse_old_style_extra_args(self, *args):
        """Returns the post-0.3.0 style args if the pre-0.3.0 style args are
        passed in. The pre-0.3.0 style args always have three args: (x, t,
        d) where d is is a dictionary which should always at least contain
        the key 'constants'. It may also contain a key 'specified'."""

        # DEPRECATED : Remove before 0.4.0 release.
        if parse_version(pydy.__version__) > parse_version('0.4.0'):
            msg = ("The old style args, i.e. {'constants': , 'specified'}, "
                    "for the generated function is no longer supported as "
                    "of PyDy 0.4.0. Please remove this function.")

        last_arg = args[-1]
        try:
            constants = last_arg['constants']
        # ValueError is needed for older NumPy versions.
        except (KeyError, IndexError, ValueError):
            return args
        else:
            warnings.warn("The old style args, i.e. {'constants': , "
                          "'specified'}, for the generated function will be "
                          "removed in PyDy 0.4.0.",
                          PyDyDeprecationWarning)

            new_args = list(args[:-1])  # gets x and t

            if self.specifieds is not None:
                new_args.append(last_arg['specified'])

            new_args.append(constants)

            return tuple(new_args)
Beispiel #28
0
    def check_for_update(self):
        """ Check if a newer version is available online and offer to update """
        # TODO: just return True or False, and let the caller call show_update_dialog()
        logging.info("Retrieving version info...")

        web_version = self.get_remote_version()

        if web_version is None:
            logging.info("Could not retrieve remote version, will not update")
            return

        logging.info("Found remote version %s", web_version)

        lv = pkg_resources.parse_version(odemis.get_version_simplified())
        rv = pkg_resources.parse_version(web_version)
        if rv <= lv:
            wx.MessageBox(
                u"You are already using the most recent version of Odemis.",
                u"Odemis Updater",
                style=wx.OK | wx.CENTER | wx.ICON_ASTERISK
            )
            return

        logging.info("Newer version found, suggesting update...")

        self.show_update_dialog(web_version)
Beispiel #29
0
    def match_api_version(self, request_version, allowed_version):
        """

        :param request_version:
        :param allowed_version: may be represented in following forms:
            1. ``VERSION``
            2. ``==VERSION`` (the same as above)
            3. ``>VERSION``
            4. ``<VERSION``
            5. ``>=VERSION``
            6. ``<=Version``
            7. Comma-separated list of 1-7 evaluated as AND
        :return: :raise ValueError:
        """
        distinct_versions = {version.strip() for version in allowed_version.split(',')}
        for distinct_version in distinct_versions:
            operation = self.OPERATORS.get(distinct_version[:2])
            if operation:
                # prepare cases #2, #5, #6
                compare_with = distinct_version[2:]
            else:
                operation = self.OPERATORS.get(distinct_version[0])
                if operation:
                    # prepare cases #3, #4
                    compare_with = distinct_version[1:]
                else:
                    # prepare case #1
                    compare_with = distinct_version
                    operation = self.OPERATORS['==']

            # evaluate the case
            matched = operation(parse_version(request_version), parse_version(compare_with))
            if not matched:
                return False
        return True
def get_version_info(url):
    out_stream = StringIO()
    pkg_maxlen = 0
    version_maxlen = 0

    version_sections = extract_versions_section(url)
    for pkg_name, version_infos in version_sections.items():
        pkg_maxlen = max(len(pkg_name), pkg_maxlen)
        for version_info in version_infos:
            version_maxlen = max(len(version_info.version), version_maxlen)

    outfmt = ('{{pkg_name:{pkg_maxlen}}} = {{color}}{{version_info.version:{version_maxlen}}}{reset} {{index}} {{version_info.origin}}\n'  # NOQA
              .format(pkg_maxlen=pkg_maxlen, version_maxlen=version_maxlen,
                      reset=colorama.Fore.RESET))

    for pkg_name, version_infos in sorted(version_sections.items()):
        if (max((parse_version(version_info.version)
                 for version_info in version_infos)) !=
                parse_version(version_infos[0].version)):
            color = colorama.Fore.RED
        else:
            color = colorama.Fore.RESET
        for index, version_info in enumerate(version_infos):
            pkg_name_to_show = pkg_name if not index else ''
            out_stream.write(outfmt.format(pkg_name=pkg_name_to_show,
                                           version_info=version_info,
                                           color=color,
                                           index=index))
    out_stream.seek(0)
    return out_stream
Beispiel #31
0
# Contains all the support functions/modules required by Vidgear

# import the necessary packages
import os, sys, requests, platform, errno
import numpy as np
from pkg_resources import parse_version
from colorlog import ColoredFormatter
from tqdm import tqdm
import logging as log

try:
    # import OpenCV Binaries
    import cv2

    # check whether OpenCV Binaries are 3.x+
    if parse_version(cv2.__version__) < parse_version("3"):
        raise ImportError(
            "[Vidgear:ERROR] :: Installed OpenCV API version(< 3.0) is not supported!"
        )
except ImportError:
    raise ImportError(
        "[Vidgear:ERROR] :: Failed to detect correct OpenCV executables, install it with `pip3 install opencv-python` command."
    )


def logger_handler():
    """
    returns logger handler
    """
    # logging formatter
    formatter = ColoredFormatter(
Beispiel #32
0
from pkg_resources import parse_version

from .ray_util import contains_points

from .. import util
from .. import caching
from .. import intersections

# the factor of geometry.scale to offset a ray from a triangle
# to reliably not hit its origin triangle
_ray_offset_factor = 1e-4
# we want to clip our offset to a sane distance
_ray_offset_floor = 1e-8

# see if we're using a newer version of the pyembree wrapper
_embree_new = parse_version(_ver) >= parse_version('0.1.4')
# both old and new versions require exact but different type
_embree_dtype = [np.float64, np.float32][int(_embree_new)]


class RayMeshIntersector(object):

    def __init__(self,
                 geometry,
                 scale_to_box=True):
        """
        Do ray- mesh queries.

        Parameters
        -------------
        geometry     : Trimesh object
Beispiel #33
0
        # Initialize options
        self.without_leveldb = None

    def run(self):
        # Use options
        if self.without_leveldb:
            with suppress(ValueError):
                idx = list(map(lambda i: "plyvel" in i, self.distribution.install_requires)).index(True)
                self.distribution.install_requires.pop(idx)

        super().run()

try:
    from pip._internal.req import parse_requirements
    from pip import __version__ as __pip_version
    pip_version = parse_version(__pip_version)
    if (pip_version >= parse_version("20")):
        from pip._internal.network.session import PipSession
    elif (pip_version >= parse_version("10")):
        from pip._internal.download import PipSession
except ImportError:  # pip version < 10.0
    from pip.req import parse_requirements
    from pip.download import PipSession

with open('README.rst') as readme_file:
    readme = readme_file.read()


leveldb_requirements = ["plyvel==1.3.0" if sys.platform in ["darwin", "linux"] else "plyvel-win32"]

# get the requirements from requirements.txt
Beispiel #34
0
def astra_cuda_bp_scaling_factor(proj_space, reco_space, geometry):
    """Volume scaling accounting for differing adjoint definitions.

    ASTRA defines the adjoint operator in terms of a fully discrete
    setting (transposed "projection matrix") without any relation to
    physical dimensions, which makes a re-scaling necessary to
    translate it to spaces with physical dimensions.

    Behavior of ASTRA changes slightly between versions, so we keep
    track of it and adapt the scaling accordingly.
    """
    # Angular integration weighting factor
    # angle interval weight by approximate cell volume
    angle_extent = float(geometry.motion_partition.extent)
    num_angles = float(geometry.motion_partition.size)
    scaling_factor = angle_extent / num_angles

    # Correct in case of non-weighted spaces
    proj_extent = float(proj_space.partition.extent.prod())
    proj_size = float(proj_space.partition.size)
    proj_weighting = proj_extent / proj_size

    scaling_factor *= (proj_space.weighting.const / proj_weighting)
    scaling_factor /= (reco_space.weighting.const / reco_space.cell_volume)

    if parse_version(ASTRA_VERSION) < parse_version('1.8rc1'):
        if isinstance(geometry, Parallel2dGeometry):
            # Scales with 1 / cell_volume
            scaling_factor *= float(reco_space.cell_volume)
        elif isinstance(geometry, FanFlatGeometry):
            # Scales with 1 / cell_volume
            scaling_factor *= float(reco_space.cell_volume)
            # Additional magnification correction
            src_radius = geometry.src_radius
            det_radius = geometry.det_radius
            scaling_factor *= ((src_radius + det_radius) / src_radius)
        elif isinstance(geometry, Parallel3dAxisGeometry):
            # Scales with voxel stride
            # In 1.7, only cubic voxels are supported
            voxel_stride = reco_space.cell_sides[0]
            scaling_factor /= float(voxel_stride)
        elif isinstance(geometry, HelicalConeFlatGeometry):
            # Scales with 1 / cell_volume
            # In 1.7, only cubic voxels are supported
            voxel_stride = reco_space.cell_sides[0]
            scaling_factor /= float(voxel_stride)
            # Magnification correction
            src_radius = geometry.src_radius
            det_radius = geometry.det_radius
            scaling_factor *= ((src_radius + det_radius) / src_radius)**2

    else:
        if isinstance(geometry, Parallel2dGeometry):
            # Scales with 1 / cell_volume
            scaling_factor *= float(reco_space.cell_volume)
        elif isinstance(geometry, FanFlatGeometry):
            # Scales with 1 / cell_volume
            scaling_factor *= float(reco_space.cell_volume)
            # Magnification correction
            src_radius = geometry.src_radius
            det_radius = geometry.det_radius
            scaling_factor *= ((src_radius + det_radius) / src_radius)
        elif isinstance(geometry, Parallel3dAxisGeometry):
            # Scales with cell volume
            # currently only square voxels are supported
            scaling_factor /= reco_space.cell_volume
        elif isinstance(geometry, HelicalConeFlatGeometry):
            # Scales with cell volume
            scaling_factor /= reco_space.cell_volume
            # Magnification correction
            src_radius = geometry.src_radius
            det_radius = geometry.det_radius
            scaling_factor *= ((src_radius + det_radius) / src_radius)**2

            # Correction for scaled 1/r^2 factor in ASTRA's density weighting
            det_px_area = geometry.det_partition.cell_volume
            scaling_factor *= (src_radius**2 * det_px_area**2 /
                               reco_space.cell_volume**2)

        # TODO: add case with new ASTRA release

    return scaling_factor
Beispiel #35
0
RETURN = '''
obj:
    description: IpamDnsProviderProfile (api/ipamdnsproviderprofile) object
    returned: success, changed
    type: dict
'''

from ansible.module_utils.basic import AnsibleModule
try:
    from avi.sdk.utils.ansible_utils import avi_common_argument_spec
    from pkg_resources import parse_version
    import avi.sdk
    sdk_version = getattr(avi.sdk, '__version__', None)
    if ((sdk_version is None)
            or (sdk_version and
                (parse_version(sdk_version) < parse_version('17.1')))):
        # It allows the __version__ to be '' as that value is used in development builds
        raise ImportError
    from avi.sdk.utils.ansible_utils import avi_ansible_api
    HAS_AVI = True
except ImportError:
    HAS_AVI = False


def main():
    argument_specs = dict(
        state=dict(default='present', choices=['absent', 'present']),
        avi_api_update_method=dict(default='put', choices=['put', 'patch']),
        avi_api_patch_op=dict(choices=['add', 'replace', 'delete']),
        allocate_ip_in_vrf=dict(type='bool', ),
        aws_profile=dict(type='dict', ),
Beispiel #36
0
def atleast_ncver(ver):
    from pkg_resources import parse_version
    import netCDF4
    return parse_version(netCDF4.__version__) >= parse_version(ver)
Beispiel #37
0
CFLAGS = tuple(EXTRA_COMPILE_ARGS)
if "linux" in sys.platform or "darwin" in sys.platform:
    pymodinit_type = 'PyObject*' if PY3 else 'void'
    pymodinit = 'extern "C" __attribute__((visibility ("default"))) {}'.format(
        pymodinit_type)
    DEFINE_MACROS += (('PyMODINIT_FUNC', pymodinit),)
    DEFINE_MACROS += (('GRPC_POSIX_FORK_ALLOW_PTHREAD_ATFORK', 1),)

# By default, Python3 distutils enforces compatibility of
# c plugins (.so files) with the OSX version Python was built with.
# We need OSX 10.10, the oldest which supports C++ thread_local.
# Python 3.9: Mac OS Big Sur sysconfig.get_config_var('MACOSX_DEPLOYMENT_TARGET') returns int (11)
if 'darwin' in sys.platform:
    mac_target = sysconfig.get_config_var('MACOSX_DEPLOYMENT_TARGET')
    if mac_target:
        mac_target = pkg_resources.parse_version(str(mac_target))
        if mac_target < pkg_resources.parse_version('10.10.0'):
            os.environ['MACOSX_DEPLOYMENT_TARGET'] = '10.10'
            os.environ['_PYTHON_HOST_PLATFORM'] = re.sub(
                r'macosx-[0-9]+\.[0-9]+-(.+)', r'macosx-10.10-\1',
                util.get_platform())


def cython_extensions_and_necessity():
    cython_module_files = [
        os.path.join(PYTHON_STEM,
                     name.replace('.', '/') + '.pyx')
        for name in CYTHON_EXTENSION_MODULE_NAMES
    ]
    config = os.environ.get('CONFIG', 'opt')
    prefix = 'libs/' + config + '/'
Beispiel #38
0
from pathlib import Path
from pkg_resources import parse_version

version_file = Path(__file__).parent / "support/VERSION"

__version__ = version_file.read_text().strip()
__parsed_version__ = parse_version(__version__)
Beispiel #39
0
        assert_almost_equal(dm, np.hypot(1, 2), 2)

        # Variance of the candidate sample does not affect answer.
        x = randn(0, 1, (n, d))
        y = randn([1, 2], 2, (m, d))
        dm = xca.seuclidean(x, y)
        assert_almost_equal(dm, np.hypot(1, 2), 2)

    def test_compare_with_matlab(self):
        x, y = matlab_sample()
        dm = xca.seuclidean(x, y)
        assert_almost_equal(dm, 2.8463, 4)


@pytest.mark.skipif(
    parse_version(__scipy_version__) < parse_version("1.6.0"),
    reason="Not supported in scipy<1.6.0",
)
class TestNN:
    def test_simple(self):
        d = 2
        n, m = 200, 200
        np.random.seed(1)
        x = np.random.randn(n, d)
        y = np.random.randn(m, d)

        # Almost identical samples
        dm = xca.nearest_neighbor(x + 0.001, x)
        assert_almost_equal(dm, 0, 2)

        # Same distribution but mixed
Beispiel #40
0
    async def query(self, filters, options):
        """
        Query available chart releases.

        `query-options.extra.retrieve_resources` is a boolean when set will retrieve existing kubernetes resources
        in the chart namespace.

        `query-options.extra.history` is a boolean when set will retrieve all chart version upgrades
        for a chart release.

        `query-options.extra.include_chart_schema` is a boolean when set will retrieve the schema being used by
        the chart release in question.
        """
        if not await self.middleware.call('service.started', 'kubernetes'):
            # We use filter_list here to ensure that `options` are respected, options like get: true
            return filter_list([], filters, options)

        update_catalog_config = {}
        catalogs = await self.middleware.call(
            'catalog.query', [], {'extra': {
                'item_details': True
            }})
        container_images = {}
        for image in await self.middleware.call('container.image.query'):
            for tag in image['repo_tags']:
                if not container_images.get(tag):
                    container_images[tag] = image

        for catalog in catalogs:
            update_catalog_config[catalog['label']] = {}
            for train in catalog['trains']:
                train_data = {}
                for catalog_item in catalog['trains'][train]:
                    versions = {
                        k: v
                        for k, v in catalog['trains'][train][catalog_item]
                        ['versions'].items() if v['healthy']
                    }
                    max_version = max([parse_version(v) for v in versions],
                                      default=parse_version('0.0.0'))
                    app_version = None
                    if str(max_version) in versions:
                        app_version = versions[str(
                            max_version)]['chart_metadata'].get('appVersion')

                    train_data[catalog_item] = {
                        'chart_version': max_version,
                        'app_version': app_version,
                    }

                update_catalog_config[catalog['label']][train] = train_data

        k8s_config = await self.middleware.call('kubernetes.config')
        k8s_node_ip = await self.middleware.call('kubernetes.node_ip')
        options = options or {}
        extra = copy.deepcopy(options.get('extra', {}))
        retrieve_schema = extra.get('include_chart_schema')
        get_resources = extra.get('retrieve_resources')
        get_history = extra.get('history')
        if retrieve_schema:
            questions_context = await self.middleware.call(
                'catalog.get_normalised_questions_context')
        else:
            questions_context = None

        if filters and len(filters) == 1 and filters[0][:2] == ['id', '=']:
            extra['namespace_filter'] = [
                'metadata.namespace', '=',
                f'{CHART_NAMESPACE_PREFIX}{filters[0][-1]}'
            ]
            resources_filters = [extra['namespace_filter']]
        else:
            resources_filters = [[
                'metadata.namespace', '^', CHART_NAMESPACE_PREFIX
            ]]

        ports_used = collections.defaultdict(list)
        for node_port_svc in await self.middleware.call(
                'k8s.service.query',
            [['spec.type', '=', 'NodePort']] + resources_filters):
            release_name = node_port_svc['metadata']['namespace'][
                len(CHART_NAMESPACE_PREFIX):]
            ports_used[release_name].extend([{
                'port': p['node_port'],
                'protocol': p['protocol']
            } for p in node_port_svc['spec']['ports']])

        storage_classes = collections.defaultdict(lambda: None)
        for storage_class in await self.middleware.call(
                'k8s.storage_class.query'):
            storage_classes[storage_class['metadata']['name']] = storage_class

        persistent_volumes = collections.defaultdict(list)

        # If the chart release was consuming any PV's, they would have to be manually removed from k8s database
        # because of chart release reclaim policy being retain
        for pv in await self.middleware.call('k8s.pv.query', [[
                'spec.csi.volume_attributes.openebs\\.io/poolname', '^',
                f'{os.path.join(k8s_config["dataset"], "releases")}/'
        ]]):
            dataset = pv['spec']['csi']['volume_attributes'][
                'openebs.io/poolname']
            rl = dataset.split('/', 4)
            if len(rl) > 4:
                persistent_volumes[rl[3]].append(pv)

        resources = {r.value: collections.defaultdict(list) for r in Resources}
        workload_status = collections.defaultdict(lambda: {
            'desired': 0,
            'available': 0
        })

        for resource in Resources:
            for r_data in await self.middleware.call(
                    f'k8s.{resource.name.lower()}.query', resources_filters):
                release_name = r_data['metadata']['namespace'][
                    len(CHART_NAMESPACE_PREFIX):]
                resources[resource.value][release_name].append(r_data)
                if resource in (Resources.DEPLOYMENT, Resources.STATEFULSET):
                    workload_status[release_name]['desired'] += (
                        r_data['status']['replicas'] or 0)
                    workload_status[release_name]['available'] += (
                        r_data['status']['ready_replicas'] or 0)

        release_secrets = await self.middleware.call(
            'chart.release.releases_secrets', extra)
        releases = []
        for name, release in release_secrets.items():
            config = {}
            release_data = release['releases'].pop(0)
            cur_version = release_data['chart_metadata']['version']

            for rel_data in filter(
                    lambda r: r['chart_metadata']['version'] == cur_version,
                    itertools.chain(reversed(release['releases']),
                                    [release_data])):
                config.update(rel_data['config'])

            pods_status = workload_status[name]
            pod_diff = pods_status['available'] - pods_status['desired']
            status = 'ACTIVE'
            if pod_diff == 0 and pods_status['desired'] == 0:
                status = 'STOPPED'
            elif pod_diff < 0:
                status = 'DEPLOYING'

            # We will retrieve all host ports being used
            for pod in filter(lambda p: p['status']['phase'] == 'Running',
                              resources[Resources.POD.value][name]):
                for container in pod['spec']['containers']:
                    ports_used[name].extend([{
                        'port': p['host_port'],
                        'protocol': p['protocol']
                    } for p in (container['ports'] or []) if p['host_port']])

            release_data.update({
                'path':
                os.path.join('/mnt', k8s_config['dataset'], 'releases', name),
                'dataset':
                os.path.join(k8s_config['dataset'], 'releases', name),
                'config':
                config,
                'status':
                status,
                'used_ports':
                ports_used[name],
                'pod_status':
                pods_status,
            })

            release_resources = {
                'storage_class':
                storage_classes[get_storage_class_name(name)],
                'persistent_volumes':
                persistent_volumes[name],
                'host_path_volumes':
                await
                self.host_path_volumes(resources[Resources.POD.value][name]),
                **{r.value: resources[r.value][name]
                   for r in Resources},
            }
            release_resources = {
                **release_resources,
                'container_images': {
                    i_name: {
                        'id':
                        image_details.get('id'),
                        'update_available':
                        image_details.get('update_available', False)
                    }
                    for i_name, image_details in map(
                        lambda i: (i, container_images.get(i, {})),
                        list(
                            set(c['image']
                                for workload_type in ('deployments',
                                                      'statefulsets') for
                                workload in release_resources[workload_type]
                                for c in workload['spec']['template']['spec']
                                ['containers'])))
                },
                'truenas_certificates': [
                    v['id'] for v in release_data['config'].get(
                        'ixCertificates', {}).values()
                ],
                'truenas_certificate_authorities': [
                    v['id'] for v in release_data['config'].get(
                        'ixCertificateAuthorities', {}).values()
                ],
            }
            if get_resources:
                release_data['resources'] = release_resources

            if get_history:
                release_data['history'] = release['history']

            current_version = parse_version(
                release_data['chart_metadata']['version'])
            catalog_version_dict = update_catalog_config.get(
                release_data['catalog'],
                {}).get(release_data['catalog_train'],
                        {}).get(release_data['chart_metadata']['name'], {})
            latest_version = catalog_version_dict.get('chart_version',
                                                      current_version)
            latest_app_version = catalog_version_dict.get('app_version')
            release_data['update_available'] = latest_version > current_version

            app_version = None
            if release_data['chart_metadata']['name'] == 'ix-chart':
                image_config = release_data['config'].get('image') or {}
                if all(k in image_config for k in ('tag', 'repository')):
                    # TODO: Let's see if we can find sane versioning for `latest` from upstream
                    if image_config['tag'] == 'latest':
                        app_version = f'{image_config["repository"]}:{image_config["tag"]}'
                    else:
                        app_version = image_config['tag']
                # Latest app version for ix-chart remains same
                latest_app_version = app_version
            else:
                app_version = release_data['chart_metadata'].get('appVersion')

            for key, app_v, c_v in (
                ('human_version', app_version, current_version),
                ('human_latest_version', latest_app_version, latest_version),
            ):
                if app_v:
                    release_data[key] = f'{app_v}_{c_v}'
                else:
                    release_data[key] = str(c_v)

            if retrieve_schema:
                chart_path = os.path.join(
                    release_data['path'], 'charts',
                    release_data['chart_metadata']['version'])
                if os.path.exists(chart_path):
                    release_data['chart_schema'] = await self.middleware.call(
                        'catalog.item_version_details', chart_path,
                        questions_context)
                else:
                    release_data['chart_schema'] = None

            release_data['container_images_update_available'] = any(
                details['update_available']
                for details in release_resources['container_images'].values())
            release_data['chart_metadata']['latest_chart_version'] = str(
                latest_version)
            release_data['portals'] = await self.middleware.call(
                'chart.release.retrieve_portals_for_chart_release',
                release_data, k8s_node_ip)

            if 'icon' not in release_data['chart_metadata']:
                release_data['chart_metadata']['icon'] = None

            releases.append(release_data)

        return filter_list(releases, filters, options)
Beispiel #41
0
    ('index', 'buildbot', u'BuildBot Documentation',
     [u'Brian Warner'], 1)
]


# Monkey-patch Sphinx to treat unhiglighted code as error.
from sphinx.errors import SphinxWarning
import sphinx
import sphinx.highlighting
from pkg_resources import parse_version

# Versions of Sphinx below changeset 1860:19b394207746 (before v0.6.6 release)
# won't work due to different PygmentsBridge interface.
required_sphinx_version = '0.6.6'
sphinx_version_supported = \
    parse_version(sphinx.__version__) >= parse_version(required_sphinx_version)

# This simple monkey-patch allows either fail on first unhighlighted block or
# print all unhighlighted blocks and don't fail at all.
# First behaviour is useful for testing that all code is highlighted, second ---
# for fixing lots of unhighlighted code.
fail_on_first_unhighlighted = True

class UnhighlightedError(SphinxWarning):
    pass

# PygmentsBridge.unhighlighted() added in Sphinx in changeset 574:f1c885fdd6ad
# (0.5 release).
def patched_unhighlighted(self, source):
    indented_source = '    ' + '\n    '.join(source.split('\n'))
Beispiel #42
0
def _get_version_class():
    modern_version = parse_version("1.0")
    if isinstance(modern_version, tuple):
        return None
    else:
        return type(modern_version)
Beispiel #43
0
from pkg_resources import parse_version

from snapcraft.internal import (
    common,
    errors,
    os_release,
    repo,
)


logger = logging.getLogger(__name__)


# Old pyelftools uses byte strings for section names.  Some data is
# also returned as bytes, which is handled below.
if parse_version(elftools.__version__) >= parse_version('0.24'):
    _DYNAMIC = '.dynamic'              # type: Union[str, bytes]
    _GNU_VERSION_R = '.gnu.version_r'  # type: Union[str, bytes]
    _INTERP = '.interp'                # type: Union[str, bytes]
else:
    _DYNAMIC = b'.dynamic'
    _GNU_VERSION_R = b'.gnu.version_r'
    _INTERP = b'.interp'


class SonameCache:
    """A cache for sonames."""
    def __getitem__(self, key):
        return self._soname_paths[key]

    def __setitem__(self, key, item):
Beispiel #44
0
import os
here = os.path.abspath(os.path.dirname(__file__))

import sys
if sys.version_info[:2] < (2, 6) or (3, 0) <= sys.version_info[0:2] < (3, 2):
    raise RuntimeError("Python version 2.6, 2.7, or 3.2+ required.")

# Get the long description from the relevant file
with open(os.path.join(here, 'README.rst')) as f:
    long_description = f.read()

# Get the version from the relevant file
with open(os.path.join(here, 'modred/_version.py')) as f:
    exec(f.read())
# Get the development status from the version string
parsed_version = str(parse_version(__version__))
if any(w in ['*a', '*alpha'] for w in parsed_version):
    devstatus = 'Development Status :: 3 - Alpha'
elif any(w in ['*b', '*beta'] for w in parsed_version):
    devstatus = 'Development Status :: 4 - Beta'
else:
    devstatus = 'Development Status :: 5 - Production/Stable'

# OKID_test_data_files = list()
# for c in ['SISO', 'SIMO', 'MISO', 'MIMO']:
#     OKID_test_data_files.append((os.path.join('modred', 'tests', 'OKID_files' , c),
#         glob.glob(os.path.join('modred', 'tests', 'OKID_files', c, '*'))))
setup(
    name='modred',
    version=__version__,
    description=('Compute modal decompositions and reduced-order models, '
def v(versiontext):
    import pkg_resources
    return pkg_resources.parse_version(versiontext)
# This is a generated file! Please edit source .ksy file and use kaitai-struct-compiler to rebuild

from pkg_resources import parse_version
import kaitaistruct
from kaitaistruct import KaitaiStruct, KaitaiStream, BytesIO
from enum import Enum
import collections


if parse_version(kaitaistruct.__version__) < parse_version('0.9'):
    raise Exception("Incompatible Kaitai Struct Python API: 0.9 or later is required, but you have %s" % (kaitaistruct.__version__))

class MicrosoftPe(KaitaiStruct):
    """
    .. seealso::
       Source - https://docs.microsoft.com/en-us/windows/win32/debug/pe-format
    """

    class PeFormat(Enum):
        rom_image = 263
        pe32 = 267
        pe32_plus = 523
    SEQ_FIELDS = ["mz"]
    def __init__(self, _io, _parent=None, _root=None):
        self._io = _io
        self._parent = _parent
        self._root = _root if _root else self
        self._debug = collections.defaultdict(dict)

    def _read(self):
        self._debug['mz']['start'] = self._io.pos()
Beispiel #47
0
import pkg_resources
import pytest
from _pytest.runner import runtestprotocol

HAS_RESULTLOG = False

try:
    from _pytest.resultlog import ResultLog

    HAS_RESULTLOG = True
except ImportError:
    # We have a pytest >= 6.1
    pass

PYTEST_GTE_54 = pkg_resources.parse_version(
    pytest.__version__) >= pkg_resources.parse_version("5.4")


def works_with_current_xdist():
    """Returns compatibility with installed pytest-xdist version.

    When running tests in parallel using pytest-xdist < 1.20.0, the first
    report that is logged will finish and terminate the current node rather
    rerunning the test. Thus we must skip logging of intermediate results under
    these circumstances, otherwise no test is rerun.

    """
    try:
        d = pkg_resources.get_distribution("pytest-xdist")
        return d.parsed_version >= pkg_resources.parse_version("1.20")
    except pkg_resources.DistributionNotFound:
Beispiel #48
0
from __future__ import absolute_import, print_function

from builtins import str
from builtins import super
from builtins import range
from psychopy import logging
import wx
import numpy
import os
from psychopy.localization import _translate
from pkg_resources import parse_version

OK = wx.ID_OK

thisVer = parse_version(wx.__version__)


def ensureWxApp():
    # make sure there's a wxApp prior to showing a gui, e.g., for expInfo
    # dialog
    try:
        wx.Dialog(None, -1)  # not shown; FileDialog gives same exception
        return True
    except wx._core.PyNoAppError:
        if thisVer < parse_version('2.9'):
            return wx.PySimpleApp()
        elif thisVer >= parse_version('4.0') and thisVer < parse_version(
                '4.1'):
            raise Exception(
                "wx>=4.0 clashes with pyglet and making it unsafe "
    ) as mock_version:
        mock_version.side_effect = [min_bq_version, pandas_version]
        try:
            gbq.to_gbq(
                DataFrame([[1]]),
                "dataset.tablename",
                project_id="my-project",
                verbose=True,
            )
        except gbq.TableCreationError:
            pass
        assert len(recwarn) == 0


@pytest.mark.skipif(
    pandas_installed_version < pkg_resources.parse_version("0.24.0"),
    reason="Requires pandas 0.24+",
)
def test_to_gbq_with_private_key_new_pandas_warns_deprecation(
        min_bq_version, monkeypatch):
    import pkg_resources
    from pandas_gbq import auth

    monkeypatch.setattr(auth, "get_credentials", mock_get_credentials)

    pandas_version = pkg_resources.parse_version("0.24.0")
    with pytest.warns(FutureWarning), mock.patch(
            "pkg_resources.Distribution.parsed_version",
            new_callable=mock.PropertyMock,
    ) as mock_version:
        mock_version.side_effect = [min_bq_version, pandas_version]
Beispiel #50
0
# pip<1.4 doesn't have the --pre flag, and will thus attempt to install alpha
# and beta versions of Buildbot.  Prevent that from happening.
VERSION_MSG = """
This is a pre-release version of Buildbot, which can only be installed with
pip-1.4 or later Try installing the latest stable version of Buildbot instead:
    pip install buildbot==0.8.12
See https://pypi.python.org/pypi/buildbot to verify the current stable version.
"""
if 'a' in version or 'b' in version:
    try:
        pip_dist = pkg_resources.get_distribution('pip')
    except pkg_resources.DistributionNotFound:
        pip_dist = None

    if pip_dist:
        if parse_version(pip_dist.version) < parse_version('1.4'):
            raise RuntimeError(VERSION_MSG)

twisted_ver = ">= 17.9.0"
autobahn_ver = ">= 0.16.0"
txaio_ver = ">= 2.2.2"

bundle_version = version.split("-")[0]

# dependencies
setup_args['install_requires'] = [
    'setuptools >= 8.0',
    'Twisted ' + twisted_ver,
    'Jinja2 >= 2.1',
    # required for tests, but Twisted requires this anyway
    'zope.interface >= 4.1.1',
Beispiel #51
0
# for complete details.

from __future__ import absolute_import, division, print_function

import os
import platform
import sys
from distutils.command.build import build

import pkg_resources

import setuptools
from setuptools import find_packages, setup
from setuptools.command.install import install

if (pkg_resources.parse_version(setuptools.__version__) <
        pkg_resources.parse_version("18.5")):
    raise RuntimeError(
        "cryptography requires setuptools 18.5 or newer, please upgrade to a "
        "newer version of setuptools")

base_dir = os.path.dirname(__file__)
src_dir = os.path.join(base_dir, "src")

# When executing the setup.py, we need to be able to import ourselves, this
# means that we need to add the src/ directory to the sys.path.
sys.path.insert(0, src_dir)

about = {}
with open(os.path.join(src_dir, "cryptography", "__about__.py")) as f:
    exec(f.read(), about)
def min_bq_version():
    import pkg_resources

    return pkg_resources.parse_version("1.9.0")
 def testVersionHashable(self):
     """
     Ensure that our versions stay hashable even though we've subclassed
     them and added some shim code to them.
     """
     assert (hash(parse_version("1.0")) == hash(parse_version("1.0")))
Beispiel #54
0
output_dir = os.path.join(__location__, "api")
module_dir = os.path.join(__location__, "../src/geolomas")
try:
    shutil.rmtree(output_dir)
except FileNotFoundError:
    pass

try:
    import sphinx
    from pkg_resources import parse_version

    cmd_line_template = "sphinx-apidoc -f -o {outputdir} {moduledir}"
    cmd_line = cmd_line_template.format(outputdir=output_dir, moduledir=module_dir)

    args = cmd_line.split(" ")
    if parse_version(sphinx.__version__) >= parse_version('1.7'):
        args = args[1:]

    apidoc.main(args)
except Exception as e:
    print("Running `sphinx-apidoc` failed!\n{}".format(e))

# -- General configuration -----------------------------------------------------

# If your documentation needs a minimal Sphinx version, state it here.
# needs_sphinx = '1.0'

# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = ['sphinx.ext.autodoc', 'sphinx.ext.intersphinx', 'sphinx.ext.todo',
              'sphinx.ext.autosummary', 'sphinx.ext.viewcode', 'sphinx.ext.coverage',
 def testVersionIndexable(self):
     """
     Some projects were doing things like parse_version("v")[0], so we'll
     support indexing the same as we support iterating.
     """
     assert parse_version("1.0")[0] == "00000001"
Beispiel #56
0
    DEFAULT_CALLBACKS,
    Linker,
)
from bleach.sanitizer import (
    ALLOWED_ATTRIBUTES,
    ALLOWED_PROTOCOLS,
    ALLOWED_STYLES,
    ALLOWED_TAGS,
    Cleaner,
)

# yyyymmdd
__releasedate__ = '20200311'
# x.y.z or x.y.z.dev0 -- semver
__version__ = '3.1.2'
VERSION = parse_version(__version__)

__all__ = ['clean', 'linkify']


def clean(text,
          tags=ALLOWED_TAGS,
          attributes=ALLOWED_ATTRIBUTES,
          styles=ALLOWED_STYLES,
          protocols=ALLOWED_PROTOCOLS,
          strip=False,
          strip_comments=True):
    """Clean an HTML fragment of malicious content and return it

    This function is a security-focused function whose sole purpose is to
    remove malicious content from a string such that it can be displayed as
Beispiel #57
0
    def init(self, inventory, config, initial_limit=None):
        # Config validation
        #

        # If no config, create one using the defaults
        if config is None:
            config = Config()

        # Error if our min version is not met
        if config.MIN_PYINFRA_VERSION is not None:
            if config.REQUIRE_PYINFRA_VERSION is None:
                config.REQUIRE_PYINFRA_VERSION = '>={0}'.format(
                    config.MIN_PYINFRA_VERSION)
                logger.warning(
                    '`MIN_PYINFRA_VERSION` is deprecated, please use `REQUIRE_PYINFRA_VERSION`.',
                )
            else:
                logger.warning(
                    'Ignoring legacy `MIN_PYINFRA_VERSION` because '
                    '`REQUIRE_PYINFRA_VERSION` also exists.', )

        if config.REQUIRE_PYINFRA_VERSION is not None:
            running_version = parse_version(__version__)
            required_versions = Requirement.parse(
                'pyinfra{0}'.format(config.REQUIRE_PYINFRA_VERSION), )

            if running_version not in required_versions:
                raise PyinfraError(('pyinfra version requirement not met '
                                    '(requires {0}, running {1})').format(
                                        config.REQUIRE_PYINFRA_VERSION,
                                        __version__,
                                    ))

        if not config.PARALLEL:
            # TODO: benchmark this
            # In my own tests the optimum number of parallel SSH processes is
            # ~20 per CPU core - no science here yet, needs benchmarking!
            cpus = cpu_count()
            ideal_parallel = cpus * 20

            config.PARALLEL = (min(ideal_parallel, len(inventory),
                                   MAX_PARALLEL) if MAX_PARALLEL is not None
                               else min(ideal_parallel, len(inventory)))

        # If explicitly set, just issue a warning
        elif MAX_PARALLEL is not None and config.PARALLEL > MAX_PARALLEL:
            logger.warning((
                'Parallel set to {0}, but this may hit the open files limit of {1}.\n'
                '    Max recommended value: {2}').format(
                    config.PARALLEL, nofile_limit, MAX_PARALLEL))

        # Actually initialise the state object
        #

        self.callback_handlers = []

        # Setup greenlet pools
        self.pool = Pool(config.PARALLEL)
        self.fact_pool = Pool(config.PARALLEL)

        # Connection storage
        self.ssh_connections = {}
        self.sftp_connections = {}

        # Private keys
        self.private_keys = {}

        # Facts storage
        self.facts = {}
        self.fact_locks = {}

        # Assign inventory/config
        self.inventory = inventory
        self.config = config

        # Hosts we've activated at any time
        self.activated_hosts = set()
        # Active hosts that *haven't* failed yet
        self.active_hosts = set()
        # Hosts that are ready to be deployed to
        self.ready_hosts = set()
        # Hosts that have failed
        self.failed_hosts = set()

        # Limit hosts changes dynamically to limit operations to a subset of hosts
        self.limit_hosts = initial_limit

        # Op basics
        self.op_line_numbers_to_hash = {}
        self.op_meta = {}  # maps operation hash -> names/etc
        self.ops_run = set()  # list of ops which have been started/run

        # Op dict for each host
        self.ops = {host: {} for host in inventory}

        # Facts dict for each host
        self.facts = {host: {} for host in inventory}

        # Meta dict for each host
        self.meta = {
            host: {
                'ops': 0,  # one function call in a deploy file
                'commands': 0,  # actual # of commands to run
                'op_hashes': set(),
            }
            for host in inventory
        }

        # Results dict for each host
        self.results = {
            host: {
                'ops': 0,  # success_ops + failed ops w/ignore_errors
                'success_ops': 0,
                'error_ops': 0,
                'commands': 0,
            }
            for host in inventory
        }

        # Assign state back references to inventory & config
        inventory.state = config.state = self
        for host in inventory:
            host.state = self

        self.initialised = True

        # Flag to track added users (via `server.user` operation calls). This is
        # specifically to address users not existing during fact gathering phase
        # causing failures with su_user/sudo_user. If we expect to add the user
        # those facts should not fail but default.
        self.will_add_users = []
 def testVersionTupleSort(self):
     """
     Some projects expected to be able to sort tuples against the return
     value of parse_version. So again we'll add a warning enabled shim to
     make this possible.
     """
     assert parse_version("1.0") < tuple(parse_version("2.0"))
     assert parse_version("1.0") <= tuple(parse_version("2.0"))
     assert parse_version("1.0") == tuple(parse_version("1.0"))
     assert parse_version("3.0") > tuple(parse_version("2.0"))
     assert parse_version("3.0") >= tuple(parse_version("2.0"))
     assert parse_version("3.0") != tuple(parse_version("2.0"))
     assert not (parse_version("3.0") != tuple(parse_version("3.0")))
Beispiel #59
0
# Translation of filters to strings
filters = {
    NOSHUFFLE: "noshuffle",
    SHUFFLE: "shuffle",
    BITSHUFFLE: "bitshuffle"
}

min_numexpr_version = '2.5.2'  # the minimum version of Numexpr needed
numexpr_here = False
try:
    import numexpr
except ImportError:
    pass
else:
    if parse_version(
            numexpr.__version__) >= parse_version(min_numexpr_version):
        numexpr_here = True

# Check for dask (as another virtual machine for chunked eval)
min_dask_version = '0.9.0'  # the minimum version of Numexpr needed
dask_here = False
try:
    import dask
except ImportError:
    pass
else:
    if parse_version(dask.__version__) >= parse_version(min_dask_version):
        dask_here = True

# Check for pandas (for data container conversion purposes)
pandas_here = False
 def c(s1, s2):
     p1, p2 = parse_version(s1), parse_version(s2)
     assert p1 < p2, (s1, s2, p1, p2)