Exemple #1
0
with open("version.yml", 'r') as file:
    data = yaml.load(file, Loader=yaml.SafeLoader)
    version = data['version']

os.environ["PYFA_DIST_DIR"] = os.path.join(os.getcwd(), 'dist')

os.environ["PYFA_VERSION"] = version
iscc = "C:\Program Files (x86)\Inno Setup 6\ISCC.exe"

source = os.path.join(os.environ["PYFA_DIST_DIR"], "pyfa")

fileName = "pyfa-{}-win".format(os.environ["PYFA_VERSION"])

print("Compiling EXE")

v = Version(version)

print(v)

call([
    iscc,
    os.path.join(os.getcwd(), "dist_assets", "win", "pyfa-setup.iss"),
    "/dMyAppVersion=%s" % v,
    "/dMyAppDir=%s" % source,
    "/dMyOutputDir=%s" % os.path.join(os.getcwd()),
    "/dMyOutputFile=%s" % fileName
])  # stdout=devnull, stderr=devnull

print("Done")
def _extract_version(raw):
    match = re.search(RELEASE_REGEX, raw)
    if match:
        return Version(match.group())
    else:
        return None
def mxnet_eia_py_version(mxnet_eia_version, request):
    if Version(mxnet_eia_version) < Version("1.7.0"):
        return request.param
    else:
        return "py3"
Exemple #4
0
"""
Unit tests for the Drycc scheduler module.

Run the tests with './manage.py test scheduler'
"""
from packaging.version import Version
from unittest import mock

from scheduler import KubeHTTPException, KubeException
from scheduler.tests import TestCase
from scheduler.utils import generate_random_name


@mock.patch('scheduler.KubeHTTPClient.version', lambda *args: Version('1.2'))
class HorizontalPodAutoscalersTest(TestCase):
    """Tests scheduler horizontalpodautoscaler calls"""
    def create(self, namespace=None, name=generate_random_name(), **kwargs):
        """
        Helper function to create and verify a horizontalpodautoscaler on the namespace

        Creates a Deployment so that HPA can work off an object
        """
        namespace = self.namespace if namespace is None else namespace
        # these are all required even if it is kwargs...
        d_kwargs = {
            'app_type': kwargs.get('app_type', 'web'),
            'version': kwargs.get('version', 'v99'),
            'replicas': kwargs.get('replicas', 1),
            'pod_termination_grace_period_seconds': 2,
            'image': 'quay.io/fake/image',
            'entrypoint': 'sh',
def pw_input_helper(input_params,
                    structure,
                    stop_at_first_error=False,
                    flat_mode=False,
                    version='6.2'):
    """Validate if the input dictionary for Quantum ESPRESSO is valid.

    Return the dictionary (possibly with small variations: e.g. convert integer to float where necessary, recreate the
    proper structure if flat_mode is True, ...) to use as input parameters (use_parameters) for the pw.x calculation.

    :param input_params: If flat_mode is True, pass a dictionary
        with 'key' = value; use the correct type
        (int, bool, ...) for value. If an array is required:

           * if its length is fixed: pass a list of the required length

           * if its length is 'ntyp': pass a dictionary, associating each
             specie to its value.

           * (other lengths are not supported)

       Example::

             {
             'calculation': 'vc-relax',
             'ecutwfc': 30.,
             'hubbard_u': {'O': 1},
             }

       If instead flat_mode is False, pass a dictionary in the format
       expected by AiiDA (keys are namelists, values are in the format
       specified above, i.e. key/value pairs for all keywords in the
       given namelist).
       Example::

             {
                 'CONTROL': {
                     'calculation': 'vc-relax'
                     },
                 'SYSTEM': {
                     'hubbard_u': {'O': 1.0},
                     'ecutwfc': 30.,
                     },
             },


    :param structure: the StructureData object used as input for QE pw.x
    :param stop_at_first_error: if True, stops at the first error.
        Otherwise, when, possible, continue and give a global error for all
        the issues encountered.
    :param flat_mode: if True, instead of passing the dictionary of namelists,
        and inside the keywords, pass directly the keywords - this function
        will return the correct dictionary to pass to the PwCalculation,
        with the keywords arranged in the correct namelist.
    :param version: string with version number, used to find the correct XML
        file descriptor. If not specified, uses the most recent version
        available in the validator. It reads the definitions from the XML files
        in the same folder as this python module. If the version is not
        recognised, the Exception message will also suggest a close-by version.

    :raise QEInputValidationError:
        if the input is not considered valid.
    """
    # pylint: disable=too-many-branches,too-many-statements
    errors_list = []

    # =========== LIST OF KNOWN NAMELISTS, CARDS, VARIABLES, ... ===============
    compulsory_namelists = ['CONTROL', 'SYSTEM', 'ELECTRONS']

    valid_calculations_and_opt_namelists = {  # pylint: disable=invalid-name
        'scf': [],
        'nscf': [],
        'bands': [],
        'relax': ['IONS'],
        'md': ['IONS'],
        'vc-relax': ['IONS', 'CELL'],
        'vc-md': ['IONS', 'CELL'],
    }

    if not isinstance(input_params, dict):
        raise QEInputValidationError('input_params must be a dictionary')
    # So that if I modify input_params, nothing happens outside
    if flat_mode:
        input_params_internal = copy.deepcopy(input_params)
    else:
        input_params_internal = {}
        input_original_namelists = {}
        all_input_namelists = set()
        for namelist, content in input_params.items():
            if not isinstance(content, dict):
                raise QEInputValidationError(
                    "The content associated to the namelist '{}' must be a dictionary"
                    .format(namelist))
            all_input_namelists.add(namelist)
            for key, value in content.items():
                input_params_internal[key] = copy.deepcopy(value)
                if key in input_original_namelists:
                    err_str = "The keyword '{}' was specified both in the namelist {} and {}.".format(
                        key, input_original_namelists[key], namelist)
                    if stop_at_first_error:
                        raise QEInputValidationError(err_str)
                    else:
                        errors_list.append(err_str)
                input_original_namelists[key] = namelist

    # List of the keywords that must not appear in the input
    # (e.g. because they are automatically filled in by the plugin)
    blocked_kws = [
        i.lower() for i in [
            'pseudo_dir',
            'outdir',
            'ibrav',
            'celldm',
            'nat',
            'ntyp',
            'prefix',
            'a',
            'b',
            'c',
            'cosab',
            'cosac',
            'cosbc',
        ]
    ]

    # List of the keywords that must ALWAYS appear in the input
    compulsory_kws = {i.lower()
                      for i in [
                          'calculation',
                          'ecutwfc',
                      ]}

    # ===================== PARSING OF THE XML DEFINITION FILE ===============
    module_dir = os.path.dirname(__file__)
    if module_dir == '':
        module_dir = os.curdir
    xml_path = os.path.join(module_dir, 'INPUT_PW-{}.xml'.format(version))
    try:
        with open(xml_path, 'r') as handle:
            dom = xml.dom.minidom.parse(handle)
    except IOError:
        prefix = 'INPUT_PW-'
        suffix = '.xml'
        versions = [
            fname[len(prefix):-len(suffix)] for fname in os.listdir(module_dir)
            if fname.startswith(prefix) and fname.endswith(suffix)
        ]
        versions = sorted(versions, key=Version)
        strictversions = versions + [version]
        strictversions = sorted(strictversions, key=Version)
        pos = strictversions.index(version)
        if pos == 0:
            add_str = ' (the version you specified is too old)'
        else:
            add_str = ' (the older, closest version you can use is {})'.format(
                strictversions[pos - 1])
        raise QEInputValidationError('Unknown Quantum Espresso version: {}. '
                                     'Available versions: {};{}'.format(
                                         version, ', '.join(versions),
                                         add_str))

    # ========== List of known PW variables (from XML file) ===============
    known_kws = dom.getElementsByTagName('var')
    valid_kws = {}
    for keyword in known_kws:
        if keyword in valid_kws:
            raise InternalError(
                'Something strange, I found more than one '
                "keyword '{}' in the XML description...".format(keyword))

        valid_kws[keyword.getAttribute('name').lower()] = {}
        parent = keyword
        try:
            while True:
                parent = parent.parentNode
                if parent.tagName == 'namelist':
                    valid_kws[keyword.getAttribute('name').lower(
                    )]['namelist'] = parent.getAttribute('name').upper()
                    break
        except AttributeError:
            # There are also variables in cards instead of namelists:
            # I ignore them
            pass
            # raise QEInputValidationError("Unable to find namelist for "
            #     "keyword %s." % kw.getAttribute('name'))
        expected_type = keyword.getAttribute('type')
        # Fix for groups of variables
        if expected_type == '':
            if keyword.parentNode.tagName == 'vargroup':
                expected_type = keyword.parentNode.getAttribute('type')
        valid_kws[keyword.getAttribute(
            'name').lower()]['expected_type'] = expected_type.upper()

    # ====== List of known PW 'dimensions' (arrays) (from XML file) ===========
    known_dims = dom.getElementsByTagName('dimension')
    valid_dims = {}
    for dim in known_dims:
        if dim in valid_dims:
            raise InternalError(
                'Something strange, I found more than one '
                "keyword '{}' in the XML description...".format(dim))

        valid_dims[dim.getAttribute('name').lower()] = {}
        parent = dim
        try:
            while True:
                parent = parent.parentNode
                if parent.tagName == 'namelist':
                    valid_dims[dim.getAttribute('name').lower(
                    )]['namelist'] = parent.getAttribute('name').upper()
                    break
        except AttributeError:
            # There are also variables in cards instead of namelists:
            # I ignore them
            pass
            # raise QEInputValidationError("Unable to find namelist "
            #     "for keyword %s." % dim.getAttribute('name'))
        expected_type = dim.getAttribute('type')
        # Fix for groups of variables
        if expected_type == '':
            if dim.parentNode.tagName == 'vargroup':
                expected_type = dim.parentNode.getAttribute('type')
        valid_dims[dim.getAttribute(
            'name').lower()]['expected_type'] = expected_type.upper()
        # I assume start_val is always 1
        start_val = dim.getAttribute('start')
        if start_val != '1':
            raise InternalError(
                "Wrong start value '{}' in input array (dimension) {}".format(
                    start_val, dim.getAttribute('name')))
        # I save the string as it is; somewhere else I will check for its value
        valid_dims[dim.getAttribute(
            'name').lower()]['end_val'] = dim.getAttribute('end')

    # ====== List of known PW 'multidimensions' (arrays) (from XML file) ===========
    known_multidims = dom.getElementsByTagName('multidimension')
    valid_multidims = {}
    for dim in known_multidims:
        if dim in valid_multidims:
            raise InternalError(
                'Something strange, I found more than one '
                "multidimensional keyword '{}' in the XML description...".
                format(dim))

        valid_multidims[dim.getAttribute('name').lower()] = {}
        parent = dim
        try:
            while True:
                parent = parent.parentNode
                if parent.tagName == 'namelist':
                    valid_multidims[dim.getAttribute('name').lower(
                    )]['namelist'] = parent.getAttribute('name').upper()
                    break
        except AttributeError:
            # There are also variables in cards instead of namelists: ignore them
            pass

        expected_type = dim.getAttribute('type').upper()
        start_values = dim.getAttribute('start').split(',')
        end_values = dim.getAttribute('end').split(',')
        indexes = dim.getAttribute('indexes').split(',')

        valid_multidims[dim.getAttribute(
            'name').lower()]['expected_type'] = expected_type
        valid_multidims[dim.getAttribute(
            'name').lower()]['start'] = start_values
        valid_multidims[dim.getAttribute('name').lower()]['end'] = end_values
        valid_multidims[dim.getAttribute('name').lower()]['indexes'] = indexes

        if len(set([len(start_values), len(end_values), len(indexes)])) != 1:
            raise InternalError(
                'XML schema defines a multidimension keyword with start, end and indexes values of unequal length'
            )

    # Used to suggest valid keywords if an unknown one is found
    valid_invars_list = list(
        set([i.lower()
             for i in valid_dims] + [i.lower() for i in valid_multidims] +
            [i.lower() for i in valid_kws]) - set(blocked_kws))

    # =================== Check for blocked keywords ===========================
    for keyword in input_params_internal:
        if keyword in blocked_kws:
            err_str = "You should not provide explicitly keyword '{}'.".format(
                keyword)
            if stop_at_first_error:
                raise QEInputValidationError(err_str)
            else:
                errors_list.append(err_str)

    # from 5.0.2, this CANNOT be specified anymore!
    if Version(version) < Version('5.0.2'):
        # To be sure that things are read in angstrom - not possible in recent
        # versions
        input_params_internal['a'] = 1.

    # Get info on atomic species from the StructureData object
    atomic_species_list = [k.name for k in structure.kinds]

    try:
        calculation_type = input_params_internal['calculation']
    except KeyError:
        raise QEInputValidationError(
            'Error, you need to specify at least the '
            'calculation type (among {})'.format(', '.join(
                list(valid_calculations_and_opt_namelists.keys()))))

    try:
        opt_namelists = valid_calculations_and_opt_namelists[calculation_type]
    except KeyError:
        raise QEInputValidationError(
            'Error, {} is not a valid value for '
            'the calculation type (valid values: {})'.format(
                calculation_type,
                ', '.join(list(valid_calculations_and_opt_namelists.keys()))))

    internal_dict = {i: {} for i in compulsory_namelists + opt_namelists}
    all_namelists = set(compulsory_namelists)
    for namelists in valid_calculations_and_opt_namelists.values():
        all_namelists.update(namelists)

    if not flat_mode:
        # Unexpected namelists specified by the user
        additional_namelists = sorted(all_input_namelists - set(all_namelists))
        if additional_namelists:
            err_str = 'Error, the following namelists were specified but are not expected: {}'.format(
                ', '.join(additional_namelists))
            if stop_at_first_error:
                raise QEInputValidationError(err_str)
            else:
                errors_list.append(err_str)

    # Empty list that contains the list of provided kws to check for
    # the compulsory ones at the end
    inserted_kws = []
    # I parse each element of the input dictionary
    for keyword, value in input_params_internal.items():
        keyword = keyword.lower()

        if keyword in valid_kws:
            # It is a variable
            found_var = valid_kws[keyword]
            namelist_name = found_var['namelist']
            if not flat_mode:
                input_namelist_name = input_original_namelists[keyword]
                if namelist_name != input_namelist_name:
                    err_str = "Error, keyword '{}' specified in namelist '{}', but it should be instead in '{}'".format(
                        keyword, input_namelist_name, namelist_name)
                    if stop_at_first_error:
                        raise QEInputValidationError(err_str)
                    else:
                        errors_list.append(err_str)
            try:
                internal_dict[namelist_name][keyword] = _check_and_convert(
                    keyword, value, found_var['expected_type'])
            except KeyError:
                if namelist_name in all_namelists:
                    err_str = 'Error, namelist {} not valid for calculation type {}'.format(
                        namelist_name, calculation_type)
                    if stop_at_first_error:
                        raise QEInputValidationError(err_str)
                    else:
                        errors_list.append(err_str)
                else:
                    err_str = 'Error, unknown namelist {}'.format(
                        namelist_name)
                    if stop_at_first_error:
                        raise QEInputValidationError(err_str)
                    else:
                        errors_list.append(err_str)
            except TypeError as exception:
                if stop_at_first_error:
                    raise
                else:
                    errors_list.append(str(exception))

        elif keyword in valid_dims:
            # It is an array
            found_var = valid_dims[keyword]
            namelist_name = found_var['namelist']
            if not flat_mode:
                input_namelist_name = input_original_namelists[keyword]
                if namelist_name != input_namelist_name:
                    err_str = "Error, keyword '{}' specified in namelist '{}', but it should be instead in '{}'".format(
                        keyword, input_namelist_name, namelist_name)
                    if stop_at_first_error:
                        raise QEInputValidationError(err_str)
                    else:
                        errors_list.append(err_str)
            # I accept only ntyp or an integer as end_val
            if found_var['end_val'] == 'ntyp':
                if not isinstance(value, dict):
                    err_str = "Error, expected dictionary to associate each specie to a value for keyword '{}'.".format(
                        keyword)
                    if stop_at_first_error:
                        raise QEInputValidationError(err_str)
                    else:
                        errors_list.append(err_str)
                        continue

                outdict = {}
                for kindname, found_item in value.items():
                    if kindname not in atomic_species_list:
                        err_str = "Error, '{}' is not a valid kind name.".format(
                            kindname)
                        if stop_at_first_error:
                            raise QEInputValidationError(err_str)
                        else:
                            errors_list.append(err_str)
                            continue
                    try:
                        outdict[kindname] = _check_and_convert(
                            keyword, found_item, found_var['expected_type'])
                    except TypeError as exception:
                        if stop_at_first_error:
                            raise
                        else:
                            errors_list.append(str(exception))

                try:
                    internal_dict[namelist_name][keyword] = outdict
                except KeyError:
                    err_str = 'Error, unknown namelist {}'.format(
                        namelist_name)
                    if stop_at_first_error:
                        raise QEInputValidationError(err_str)
                    else:
                        errors_list.append(err_str)
                        continue
            else:
                try:
                    end_value = int(found_var['end_val'])
                except ValueError:
                    err_str = "Error, invalid end value '{}' for keyword '{}'.".format(
                        found_var['end_val'], keyword)
                    if stop_at_first_error:
                        raise QEInputValidationError(err_str)
                    else:
                        errors_list.append(err_str)
                        continue
                if not isinstance(value, list) or len(value) != end_value:
                    err_str = "Error, expecting a list of length {} for keyword ' {}'.".format(
                        end_value, keyword)
                    if stop_at_first_error:
                        raise QEInputValidationError(err_str)
                    else:
                        errors_list.append(err_str)
                        continue

                outlist = []
                for found_item in value:
                    if found_item is None:
                        # skip if the value is None (i.e., not provided)
                        outlist.append(None)
                    else:
                        try:
                            outlist.append(
                                _check_and_convert(keyword, found_item,
                                                   found_var['expected_type']))
                        except TypeError as exception:
                            if stop_at_first_error:
                                raise
                            else:
                                errors_list.append(str(exception))
                                outlist.append(None)

                try:
                    internal_dict[namelist_name][keyword] = outlist
                except KeyError:
                    err_str = 'Error, unknown namelist {}'.format(
                        namelist_name)
                    if stop_at_first_error:
                        raise QEInputValidationError(err_str)
                    else:
                        errors_list.append(err_str)
                        continue

        elif keyword in valid_multidims:
            # It is a multidimensional array

            variable = valid_multidims[keyword]
            indexes = variable['indexes']
            namelist_name = variable['namelist']

            # Create empty list for this keyword in the correct namelist
            try:
                internal_dict[namelist_name][keyword] = []
            except KeyError:
                err_str = 'Error, unknown namelist {}'.format(namelist_name)
                if stop_at_first_error:
                    raise QEInputValidationError(err_str)
                else:
                    errors_list.append(err_str)
                    continue

            for array in value:

                # Append empty list for this array of values
                internal_dict[namelist_name][keyword].append([])

                # Each array should contain N + 1 values, where N is the number of indexes for this multidimensional
                if len(array) != len(indexes) + 1:
                    err_str = 'Error, expected {} values per array for kw {}, got only {}.'.format(
                        len(indexes) + 1, keyword, len(array))
                    if stop_at_first_error:
                        raise QEInputValidationError(err_str)
                    else:
                        errors_list.append(err_str)
                        continue

                actual_value = array[-1]

                for i, index in enumerate(indexes):

                    index_value = array[i]

                    try:
                        int(variable['start'][i])
                    except ValueError:
                        err_str = "Error, invalid start value '{}' for keyword '{}'.".format(
                            variable['start'][i], keyword)
                        if stop_at_first_error:
                            raise QEInputValidationError(err_str)
                        else:
                            errors_list.append(err_str)
                            continue

                    end_value = variable['end'][i]

                    if end_value == 'ntyp':

                        kindname = index_value

                        if kindname not in atomic_species_list:
                            err_str = "Error, '{}' is not a valid kind name.".format(
                                kindname)
                            if stop_at_first_error:
                                raise QEInputValidationError(err_str)
                            else:
                                errors_list.append(err_str)
                                continue

                        internal_dict[namelist_name][keyword][-1].append(
                            kindname)
                    else:
                        # Other types are assumed to be an integer
                        try:
                            index_value = int(index_value)
                        except ValueError:
                            err_str = 'Error, only integer types are supported for index {}, got {}'.format(
                                index, index_value)
                            if stop_at_first_error:
                                raise QEInputValidationError(err_str)
                            else:
                                errors_list.append(err_str)
                                continue

                        internal_dict[namelist_name][keyword][-1].append(
                            index_value)

                # Validate the actual value, convert it and append to the current array
                converted = _check_and_convert(keyword, actual_value,
                                               variable['expected_type'])
                internal_dict[namelist_name][keyword][-1].append(converted)

        else:
            # Neither a variable nor an array
            err_str = 'Problem parsing keyword {}. '.format(keyword)
            similar_kws = difflib.get_close_matches(keyword, valid_invars_list)
            if len(similar_kws) == 1:
                err_str += 'Maybe you wanted to specify {}?'.format(
                    similar_kws[0])
            elif len(similar_kws) > 1:
                err_str += 'Maybe you wanted to specify one of these: {}?'.format(
                    ', '.join(similar_kws))
            else:
                err_str += '(No similar keywords found...)'
            if stop_at_first_error:
                raise QEInputValidationError(err_str)
            else:
                errors_list.append(err_str)

        # Used to check if all compulsory variables are set
        inserted_kws += [keyword]

    # ============== I check here compulsory variables ===========
    missing_kws = compulsory_kws - set(inserted_kws)
    if missing_kws:
        err_str = 'Missing compulsory variables: {}.'.format(
            ', '.join(missing_kws))
        if stop_at_first_error:
            raise QEInputValidationError(err_str)
        else:
            errors_list.append(err_str)

    if errors_list:
        raise QEInputValidationError(
            'Errors! {} issues found:\n* '.format(len(errors_list)) +
            '\n* '.join(errors_list))

    return internal_dict
Exemple #6
0
    def __call__(self):  # noqa: C901
        """Steps executed to bump."""
        try:
            current_version_instance: Version = Version(
                self.bump_settings["version"])
        except TypeError:
            out.error(
                "[NO_VERSION_SPECIFIED]\n"
                "Check if current version is specified in config file, like:\n"
                "version = 0.4.3\n")
            raise SystemExit(NO_VERSION_SPECIFIED)

        # Initialize values from sources (conf)
        current_version: str = self.config.settings["version"]

        tag_format: str = self.bump_settings["tag_format"]
        bump_commit_message: str = self.bump_settings["bump_message"]
        version_files: list = self.bump_settings["version_files"]

        dry_run: bool = self.arguments["dry_run"]
        is_yes: bool = self.arguments["yes"]
        increment: Optional[str] = self.arguments["increment"]
        prerelease: str = self.arguments["prerelease"]
        is_files_only: Optional[bool] = self.arguments["files_only"]

        current_tag_version: str = bump.create_tag(current_version,
                                                   tag_format=tag_format)

        is_initial = self.is_initial_tag(current_tag_version, is_yes)
        if is_initial:
            commits = git.get_commits()
        else:
            commits = git.get_commits(current_tag_version)

        # No commits, there is no need to create an empty tag.
        # Unless we previously had a prerelease.
        if not commits and not current_version_instance.is_prerelease:
            out.error("[NO_COMMITS_FOUND]\n" "No new commits found.")
            raise SystemExit(NO_COMMITS_FOUND)

        if increment is None:
            increment = self.find_increment(commits)

        # Increment is removed when current and next version
        # are expected to be prereleases.
        if prerelease and current_version_instance.is_prerelease:
            increment = None

        new_version = bump.generate_version(current_version,
                                            increment,
                                            prerelease=prerelease)
        new_tag_version = bump.create_tag(new_version, tag_format=tag_format)
        message = bump.create_commit_message(current_version, new_version,
                                             bump_commit_message)

        # Report found information
        out.write(f"message\n"
                  f"tag to create: {new_tag_version}\n"
                  f"increment detected: {increment}\n")

        # Do not perform operations over files or git.
        if dry_run:
            raise SystemExit()

        bump.update_version_in_files(current_version, new_version.public,
                                     version_files)
        if is_files_only:
            raise SystemExit()

        self.config.set_key("version", new_version.public)
        c = git.commit(message, args="-a")
        if c.err:
            out.error('git.commit errror: "{}"'.format(c.err.strip()))
            raise SystemExit(COMMIT_FAILED)
        c = git.tag(new_tag_version)
        if c.err:
            out.error(c.err)
            raise SystemExit(TAG_FAILED)
        out.success("Done!")
Exemple #7
0
from path import Path
import tox.config
from packaging.requirements import Requirement, InvalidRequirement
from packaging.specifiers import SpecifierSet, Specifier
from packaging.version import Version
import six

LIBRARY_REQUIRED_DJANGO_VERSIONS = {'1.8', '1.11'}
APPLICATION_ALLOWED_DJANGO_VERSIONS = {
    SpecifierSet('>=1.8,<1.9'),
    SpecifierSet('>=1.11,<2.0'),
}

DJANGO_VERSIONS = {
    Version(f'{major}.{minor}.{patch}')
    for major, minors in {
        1: {
            8: range(19),
            9: range(14),
            10: range(8),
            11: range(1)
        }
    }.items() for minor, patches in minors.items() for patch in patches
}

LOG = logging.getLogger(__name__)


def setup_call(parsed_setup_py):
    for statement in parsed_setup_py.body:
def xgboost_gpu_framework_version(xgboost_version):
    if xgboost_version in ("1", "latest"):
        pytest.skip("Skipping XGBoost algorithm version.")
    if Version(xgboost_version) < Version("1.2"):
        pytest.skip("Skipping XGBoost cpu-only version.")
    return xgboost_version
def tensorflow_inference_py_version(tensorflow_inference_version, request):
    version = Version(tensorflow_inference_version)
    if version == Version(
            "1.15") or Version("1.15.4") <= version < Version("1.16"):
        return "py36"
    return _tf_py_version(tensorflow_inference_version, request)
Exemple #10
0
    def __new__(
        self,
        *args,
        min_gpus: int = 0,
        min_torch: Optional[str] = None,
        max_torch: Optional[str] = None,
        min_python: Optional[str] = None,
        quantization: bool = False,
        amp_apex: bool = False,
        amp_native: bool = False,
        tpu: bool = False,
        horovod: bool = False,
        horovod_nccl: bool = False,
        skip_windows: bool = False,
        special: bool = False,
        rpc: bool = False,
        fairscale: bool = False,
        fairscale_pipe: bool = False,
        deepspeed: bool = False,
        **kwargs
    ):
        """
        Args:
            args: native pytest.mark.skipif arguments
            min_gpus: min number of gpus required to run test
            min_torch: minimum pytorch version to run test
            max_torch: maximum pytorch version to run test
            min_python: minimum python version required to run test
            quantization: if `torch.quantization` package is required to run test
            amp_apex: NVIDIA Apex is installed
            amp_native: if native PyTorch native AMP is supported
            tpu: if TPU is available
            horovod: if Horovod is installed
            horovod_nccl: if Horovod is installed with NCCL support
            skip_windows: skip test for Windows platform (typically fo some limited torch functionality)
            special: running in special mode, outside pytest suit
            rpc: requires Remote Procedure Call (RPC)
            fairscale: if `fairscale` module is required to run the test
            deepspeed: if `deepspeed` module is required to run the test
            kwargs: native pytest.mark.skipif keyword arguments
        """
        conditions = []
        reasons = []

        if min_gpus:
            conditions.append(torch.cuda.device_count() < min_gpus)
            reasons.append(f"GPUs>={min_gpus}")

        if min_torch:
            torch_version = get_distribution("torch").version
            conditions.append(Version(torch_version) < Version(min_torch))
            reasons.append(f"torch>={min_torch}")

        if max_torch:
            torch_version = get_distribution("torch").version
            conditions.append(Version(torch_version) >= Version(max_torch))
            reasons.append(f"torch<{max_torch}")

        if min_python:
            py_version = f"{sys.version_info.major}.{sys.version_info.minor}.{sys.version_info.micro}"
            conditions.append(Version(py_version) < Version(min_python))
            reasons.append(f"python>={min_python}")

        if quantization:
            _miss_default = 'fbgemm' not in torch.backends.quantized.supported_engines
            conditions.append(not _TORCH_QUANTIZE_AVAILABLE or _miss_default)
            reasons.append("PyTorch quantization")

        if amp_native:
            conditions.append(not _NATIVE_AMP_AVAILABLE)
            reasons.append("native AMP")

        if amp_apex:
            conditions.append(not _APEX_AVAILABLE)
            reasons.append("NVIDIA Apex")

        if skip_windows:
            conditions.append(sys.platform == "win32")
            reasons.append("unimplemented on Windows")

        if tpu:
            conditions.append(not _TPU_AVAILABLE)
            reasons.append("TPU")

        if horovod:
            conditions.append(not _HOROVOD_AVAILABLE)
            reasons.append("Horovod")

        if horovod_nccl:
            conditions.append(not _HOROVOD_NCCL_AVAILABLE)
            reasons.append("Horovod with NCCL")

        if special:
            env_flag = os.getenv("PL_RUNNING_SPECIAL_TESTS", '0')
            conditions.append(env_flag != '1')
            reasons.append("Special execution")

        if rpc:
            conditions.append(not _RPC_AVAILABLE)
            reasons.append("RPC")

        if fairscale:
            conditions.append(not _FAIRSCALE_AVAILABLE)
            reasons.append("Fairscale")

        if fairscale_pipe:
            conditions.append(not _FAIRSCALE_PIPE_AVAILABLE)
            reasons.append("Fairscale Pipe")

        if deepspeed:
            conditions.append(not _DEEPSPEED_AVAILABLE)
            reasons.append("Deepspeed")

        reasons = [rs for cond, rs in zip(conditions, reasons) if cond]
        return pytest.mark.skipif(
            *args,
            condition=any(conditions),
            reason=f"Requires: [{' + '.join(reasons)}]",
            **kwargs,
        )
def huggingface_pytorch_latest_inference_py_version(
        huggingface_inference_pytorch_latest_version):
    return ("py38" if Version(huggingface_inference_pytorch_latest_version) >=
            Version("1.9") else "py36")
Exemple #12
0
def _is_plotting_supported():
    import sklearn

    return Version(sklearn.__version__) >= Version("0.22.0")
Exemple #13
0
def _is_supported_version():
    import sklearn

    return Version(sklearn.__version__) >= Version(_MIN_SKLEARN_VERSION)
Exemple #14
0
    def fixed_in(self,
                 version: Optional[str] = None,
                 pattern: str = "(?P<version>.*)") -> None:
        """Checks if there is a release with higher or equal version number in the watched repository.

        Useful when issue is fixed (closed), not yet released but the maintainer
        has stated in which version it will be released. This assertion will fail when the
        expected version or higher is available.

        Git tags are used as version numbers and are interpreted according to PEP 440
        (https://www.python.org/dev/peps/pep-0440/). Anything that does not resemble a
        valid version number will be ignored. To parse git tags that contain version
        numbers, use the ``pattern`` argument.

        :param version: The lowest version number to trigger an ``AssertionError``.
        :param pattern: Use for parsing version number out of a git tag. The value is a
            regular expression acceptable by :py:func:`re.compile`. It is expected that the
            version is wrapped in a group named ``version``.

            Example: To match version number ``2.3.4`` in ``releases/2.3.4``, use
            ``pattern="releases/(?P<version>.*)"``. Note that :py:func:`re.match` is used
            internally so the string must be matched from the beginning.

            If you would like to test the version parsing, leave the ``version`` parameter
            unset. The test will fail and show the latest version as part of the error
            message.

        :raises requests.HTTPError: When response status code from GitHub is not 200.
        :raises AssertionError: When test fails.
        :raises ValueError: When ``pattern`` does not contain correct group.
        """
        releases_url = f"{self._URL_API}/repos/{self._repository_id}/git/refs/tags"

        if "(?P<version>" not in pattern:
            raise ValueError(
                "The 'pattern' parameter must contain a group '(?P<version>…)'."
            )

        try:
            latest_version = Version(self._cache["latest_version"])
            pass  # pylint: disable=unnecessary-pass; this line should be covered
        except (KeyError, ValueError):
            response: Response = requests.get(releases_url, auth=self._auth)
            self._handle_connection_error(response)

            versions = self._ordered_version_numbers(response.json(), pattern)
            assert versions, "No tags with a valid semantic versions were found in the repository."
            latest_version = versions[0]

            self._cache["latest_version"] = str(latest_version)

        assert (
            version is not None
        ), f"This test does not have expected version number set. Latest version is '{latest_version}'."

        awaiting_version = Version(version)

        assert latest_version < awaiting_version, (
            f"Release '{version}' of '{self._repository_id}' is available. Latest version "
            f"is '{latest_version}'. Visit {self._URL_WEB}/{self._repository_id}/releases."
        )
Exemple #15
0
 def version(self):
     """ Version of Neo4j used to build the Docker image used by
     this service.
     """
     _, _, version, _ = self.tarball.split("-")
     return Version(version)
def _tf_py_version(tf_version, request):
    version = Version(tf_version)
    if version == Version(
            "1.15") or Version("1.15.4") <= version < Version("1.16"):
        return "py3"
    if version < Version("1.11"):
        return "py2"
    if version == Version(
            "2.0") or Version("2.0.3") <= version < Version("2.1"):
        return "py3"
    if version == Version(
            "2.1") or Version("2.1.2") <= version < Version("2.2"):
        return "py3"
    if version < Version("2.2"):
        return request.param
    if Version("2.2") <= version < Version("2.6"):
        return "py37"
    return "py38"
Exemple #17
0
        return torch.sinh(x)


@basic_unit
class UnaryCosh(nn.Module):
    def forward(self, x):
        return torch.cosh(x)


@basic_unit
class UnaryTanh(nn.Module):
    def forward(self, x):
        return torch.tanh(x)


if not Version(torch.__version__) >= Version(TorchVersion):

    @basic_unit
    class UnaryAsinh(nn.Module):
        def forward(self, x):
            return torch.asinh(x)


@basic_unit
class UnaryAtan(nn.Module):
    def forward(self, x):
        return torch.atan(x)


if not Version(torch.__version__) >= Version(TorchVersion):
Exemple #18
0
def run_sm_profiler_tests(image, profiler_tests_dir, test_file, processor):
    """
    Testrunner to execute SM profiler tests from DLC repo
    """
    ctx = Context()

    # Install profiler requirements only once - pytest-rerunfailures has a known issue
    # with the latest pytest https://github.com/pytest-dev/pytest-rerunfailures/issues/128
    try:
        ctx.run(
            "pip install -r "
            "https://raw.githubusercontent.com/awslabs/sagemaker-debugger/master/config/profiler/requirements.txt && "
            "pip install smdebug && "
            "pip uninstall -y pytest-rerunfailures",
            hide=True,
        )
    except UnexpectedExit:
        # Wait a minute and a half if we get an invoke failure - since smprofiler test requirements can be flaky
        time.sleep(90)

    framework, version = get_framework_and_version_from_tag(image)

    # Conditionally set sm data parallel tests, based on config file rules from link below:
    # https://github.com/awslabs/sagemaker-debugger/tree/master/config/profiler
    enable_sm_data_parallel_tests = "true"
    if framework == "pytorch" and Version(version) < Version("1.6"):
        enable_sm_data_parallel_tests = "false"
    if framework == "tensorflow" and Version(version) < Version("2.3"):
        enable_sm_data_parallel_tests = "false"

    # Set SMProfiler specific environment variables
    smprof_configs = {
        "use_current_branch": "false",
        "enable_smdataparallel_tests": enable_sm_data_parallel_tests,
        "force_run_tests": "false",
        "framework": framework,
        "build_type": "release"
    }

    # Command to set all necessary environment variables
    export_cmd = " && ".join(f"export {key}={val}" for key, val in smprof_configs.items())
    export_cmd = f"{export_cmd} && export ENV_CPU_TRAIN_IMAGE=test && export ENV_GPU_TRAIN_IMAGE=test && " \
                 f"export ENV_{processor.upper()}_TRAIN_IMAGE={image}"

    test_results_outfile = os.path.join(os.getcwd(), f"{get_container_name('smprof', image)}.txt")
    with ctx.prefix(f"cd {profiler_tests_dir}"):
        with ctx.prefix(f"cd sagemaker-tests && {export_cmd}"):
            try:
                ctx.run(
                    f"pytest --json-report --json-report-file={test_results_outfile} -n=auto "
                    f"-v -s -W=ignore tests/{test_file}::test_{processor}_jobs",
                    hide=True,
                )
                with open(test_results_outfile) as outfile:
                    result_data = json.load(outfile)
                    LOGGER.info(f"Tests passed on {image}; Results:\n{json.dumps(result_data, indent=4)}")
            except Exception as e:
                if os.path.exists(test_results_outfile):
                    with open(test_results_outfile) as outfile:
                        result_data = json.load(outfile)
                    raise SMProfilerRCTestFailure(
                        f"Failed SM Profiler tests. Results:\n{json.dumps(result_data, indent=4)}"
                    ) from e
                raise
from packaging.version import Version
from git import Repo

try:
    initial_branch = sys.argv[1]
except IndexError:
    initial_branch = None

repo = Repo(os.getcwd())
heads = repo.git.ls_remote(
    "--heads", "https://github.com/pulp/pulp_ansible.git").split("\n")
branches = [
    h.split("/")[-1] for h in heads if re.search(r"^([0-9]+)\.([0-9]+)$",
                                                 h.split("/")[-1])
]
branches.sort(key=lambda ver: Version(ver))

headers = {
    "Authorization": f"Bearer {os.environ['GITHUB_TOKEN']}",
    "Accept": "application/vnd.github.v3+json",
}

if not initial_branch or initial_branch not in branches:
    exit("Initial branch not found")
else:
    starting = branches.index(initial_branch)

github_api = "https://api.github.com"
workflow_path = "/actions/workflows/update_ci.yml/dispatches"
url = f"{github_api}/repos/pulp/pulp_ansible{workflow_path}"
Exemple #20
0
        """
        Connect to a database.

        :param host: The database host to connect to.
        :param port: The database port to connect to.
        :param username: The database username used for the authentication.
        :param schema: The database schema to connect to.
        :return: the authorized connection object.
        """


# In case we are running it on Airflow 2.4+, we should use BaseHook, but on Airflow 2.3 and below
# We want the DbApiHook to derive from the original DbApiHook from airflow, because otherwise
# SqlSensor and BaseSqlOperator from "airflow.operators" and "airflow.sensors" will refuse to
# accept the new Hooks as not derived from the original DbApiHook
if Version(version) < Version('2.4'):
    try:
        from airflow.hooks.dbapi import DbApiHook as BaseForDbApiHook
    except ImportError:
        # just in case we have a problem with circular import
        BaseForDbApiHook: Type[BaseHook] = BaseHook  # type: ignore[no-redef]
else:
    BaseForDbApiHook: Type[BaseHook] = BaseHook  # type: ignore[no-redef]


class DbApiHook(BaseForDbApiHook):
    """
    Abstract base class for sql hooks.

    :param schema: Optional DB schema that overrides the schema specified in the connection. Make sure that
        if you change the schema parameter value in the constructor of the derived Hook, such change
Exemple #21
0
from multiprocessing import cpu_count
import pkg_resources as pkgr
from packaging.version import Version
import numpy as np

from nipype.interfaces.ants.registration import RegistrationOutputSpec
from nipype.interfaces.ants import AffineInitializer
from nipype.interfaces.base import (traits, isdefined, BaseInterface,
                                    BaseInterfaceInputSpec, File)

from templateflow.api import get as get_template
from .. import NIWORKFLOWS_LOG, __version__
from .fixes import (FixHeaderRegistration as Registration)

niworkflows_version = Version(__version__)


class _RobustMNINormalizationInputSpec(BaseInterfaceInputSpec):
    # Enable deprecation
    package_version = niworkflows_version

    # Moving image.
    moving_image = File(exists=True,
                        mandatory=True,
                        desc='image to apply transformation to')
    # Reference image (optional).
    reference_image = File(exists=True, desc='override the reference image')
    # Moving mask (optional).
    moving_mask = File(exists=True, desc='moving image mask')
    # Reference mask (optional).
Exemple #22
0
from packaging.version import Version
from path import Path
from six import with_metaclass
import requests
import zetup

__all__ = ('Source', )

BITS = int(platform.architecture()[0].split('bit')[0])
assert BITS in (32, 64)

MSVC = platform.python_compiler().startswith('MSC')

BOOST_URL = URL('http://www.boost.org')

MIN_BOOST_VERSION = Version('1.42.0')


class Meta(zetup.meta):
    """
    Metaclass for :class:`Boost.Source`.

    Provides class properties for retrieving info about available Boost
    releases
    """
    @property
    def RELEASE_URLS(cls):
        """
        ``dict`` of available Boost release versions and release page URLs.

        All (old) releases without download link are excluded
Exemple #23
0
    def __bool__(self) -> bool:
        self._check_requirement()
        return self.available

    def __str__(self) -> str:
        self._check_requirement()
        return self.message

    def __repr__(self) -> str:
        return self.__str__()


_IS_WINDOWS = platform.system() == "Windows"
_IS_INTERACTIVE = hasattr(sys, "ps1")  # https://stackoverflow.com/a/64523765
_PYTHON_GREATER_EQUAL_3_8_0 = Version(
    platform.python_version()) >= Version("3.8.0")
_TORCH_GREATER_EQUAL_1_8_1 = _compare_version("torch", operator.ge, "1.8.1")
_TORCH_GREATER_EQUAL_1_9 = _compare_version("torch", operator.ge, "1.9.0")
_TORCH_GREATER_EQUAL_1_9_1 = _compare_version("torch", operator.ge, "1.9.1")
_TORCH_GREATER_EQUAL_1_10 = _compare_version("torch", operator.ge, "1.10.0")
_TORCH_LESSER_EQUAL_1_10_2 = _compare_version("torch", operator.le, "1.10.2")
_TORCH_GREATER_EQUAL_1_11 = _compare_version("torch", operator.ge, "1.11.0")
_TORCH_GREATER_EQUAL_1_12 = _compare_version("torch",
                                             operator.ge,
                                             "1.12.0",
                                             use_base_version=True)

_APEX_AVAILABLE = _module_available("apex.amp")
_BAGUA_AVAILABLE = _package_available("bagua")
_DEEPSPEED_AVAILABLE = _package_available("deepspeed")
_DEEPSPEED_GREATER_EQUAL_0_5_9 = _DEEPSPEED_AVAILABLE and _compare_version(
Exemple #24
0
 def supports_multi(self):
     return self._neo4j_version >= Version("4.0")
Exemple #25
0
    def loadFromXML(self, filename):
        """Loads an xml file and parses the builder Experiment from it
        """
        self._doc.parse(filename)
        root = self._doc.getroot()


        # some error checking on the version (and report that this isn't valid
        # .psyexp)?
        filenameBase = os.path.basename(filename)

        if root.tag != "PsychoPy2experiment":
            logging.error('%s is not a valid .psyexp file, "%s"' %
                          (filenameBase, root.tag))
            # the current exp is already vaporized at this point, oops
            return
        self.psychopyVersion = root.get('version')
        # If running an experiment from a future version, send alert to change "Use Version"
        if Version(psychopy.__version__) < Version(self.psychopyVersion):
            alert(code=4051, strFields={'version': self.psychopyVersion})
        # If versions are either side of 2021, send alert
        if Version(psychopy.__version__) >= Version("2021.1.0") > Version(self.psychopyVersion):
            alert(code=4052, strFields={'version': self.psychopyVersion})

        # Parse document nodes
        # first make sure we're empty
        self.flow = Flow(exp=self)  # every exp has exactly one flow
        self.routines = {}
        self.namespace = NameSpace(self)  # start fresh
        modifiedNames = []
        duplicateNames = []

        # fetch exp settings
        settingsNode = root.find('Settings')
        for child in settingsNode:
            self._getXMLparam(params=self.settings.params, paramNode=child,
                              componentNode=settingsNode)
        # name should be saved as a settings parameter (only from 1.74.00)
        if self.settings.params['expName'].val in ['', None, 'None']:
            shortName = os.path.splitext(filenameBase)[0]
            self.setExpName(shortName)
        # fetch routines
        routinesNode = root.find('Routines')
        allCompons = getAllComponents(
            self.prefsBuilder['componentsFolders'], fetchIcons=False)
        # get each routine node from the list of routines
        for routineNode in routinesNode:
            routineGoodName = self.namespace.makeValid(
                routineNode.get('name'))
            if routineGoodName != routineNode.get('name'):
                modifiedNames.append(routineNode.get('name'))
            self.namespace.user.append(routineGoodName)
            routine = Routine(name=routineGoodName, exp=self)
            # self._getXMLparam(params=routine.params, paramNode=routineNode)
            self.routines[routineNode.get('name')] = routine
            for componentNode in routineNode:

                componentType = componentNode.tag
                if componentType in allCompons:
                    # create an actual component of that type
                    component = allCompons[componentType](
                        name=componentNode.get('name'),
                        parentName=routineNode.get('name'), exp=self)
                else:
                    # create UnknownComponent instead
                    component = allCompons['UnknownComponent'](
                        name=componentNode.get('name'),
                        parentName=routineNode.get('name'), exp=self)
                # check for components that were absent in older versions of
                # the builder and change the default behavior
                # (currently only the new behavior of choices for RatingScale,
                # HS, November 2012)
                # HS's modification superceded Jan 2014, removing several
                # RatingScale options
                if componentType == 'RatingScaleComponent':
                    if (componentNode.get('choiceLabelsAboveLine') or
                            componentNode.get('lowAnchorText') or
                            componentNode.get('highAnchorText')):
                        pass
                    # if not componentNode.get('choiceLabelsAboveLine'):
                    #    # this rating scale was created using older version
                    #    component.params['choiceLabelsAboveLine'].val=True
                # populate the component with its various params
                for paramNode in componentNode:
                    self._getXMLparam(params=component.params,
                                      paramNode=paramNode,
                                      componentNode=componentNode)
                compGoodName = self.namespace.makeValid(
                    componentNode.get('name'))
                if compGoodName != componentNode.get('name'):
                    modifiedNames.append(componentNode.get('name'))
                self.namespace.add(compGoodName)
                component.params['name'].val = compGoodName
                routine.append(component)
        # for each component that uses a Static for updates, we need to set
        # that
        for thisRoutine in list(self.routines.values()):
            for thisComp in thisRoutine:
                for thisParamName in thisComp.params:
                    thisParam = thisComp.params[thisParamName]
                    if thisParamName == 'advancedParams':
                        continue  # advanced isn't a normal param
                    elif thisParam.updates and "during:" in thisParam.updates:
                        # remove the part that says 'during'
                        updates = thisParam.updates.split(': ')[1]
                        routine, static = updates.split('.')
                        if routine not in self.routines:
                            msg = ("%s was set to update during %s Static "
                                   "Component, but that component no longer "
                                   "exists")
                            logging.warning(msg % (thisParamName, static))
                        else:
                            self.routines[routine].getComponentFromName(
                                static).addComponentUpdate(
                                thisRoutine.params['name'],
                                thisComp.params['name'], thisParamName)
        # fetch flow settings
        flowNode = root.find('Flow')
        loops = {}
        for elementNode in flowNode:
            if elementNode.tag == "LoopInitiator":
                loopType = elementNode.get('loopType')
                loopName = self.namespace.makeValid(elementNode.get('name'))
                if loopName != elementNode.get('name'):
                    modifiedNames.append(elementNode.get('name'))
                self.namespace.add(loopName)
                loop = eval('%s(exp=self,name="%s")' % (loopType, loopName))
                loops[loopName] = loop
                for paramNode in elementNode:
                    self._getXMLparam(paramNode=paramNode, params=loop.params)
                    # for conditions convert string rep to list of dicts
                    if paramNode.get('name') == 'conditions':
                        param = loop.params['conditions']
                        # e.g. param.val=[{'ori':0},{'ori':3}]
                        try:
                            param.val = eval('%s' % (param.val))
                        except SyntaxError:
                            # This can occur if Python2.7 conditions string
                            # contained long ints (e.g. 8L) and these can't be
                            # parsed by Py3. But allow the file to carry on
                            # loading and the conditions will still be loaded
                            # from the xlsx file
                            pass
                # get condition names from within conditionsFile, if any:
                try:
                    # psychophysicsstaircase demo has no such param
                    conditionsFile = loop.params['conditionsFile'].val
                except Exception:
                    conditionsFile = None
                if conditionsFile in ['None', '']:
                    conditionsFile = None
                if conditionsFile:
                    try:
                        trialList, fieldNames = data.importConditions(
                            conditionsFile, returnFieldNames=True)
                        for fname in fieldNames:
                            if fname != self.namespace.makeValid(fname):
                                duplicateNames.append(fname)
                            else:
                                self.namespace.add(fname)
                    except Exception:
                        pass  # couldn't load the conditions file for now
                self.flow.append(LoopInitiator(loop=loops[loopName]))
            elif elementNode.tag == "LoopTerminator":
                self.flow.append(LoopTerminator(
                    loop=loops[elementNode.get('name')]))
            elif elementNode.tag == "Routine":
                if elementNode.get('name') in self.routines:
                    self.flow.append(self.routines[elementNode.get('name')])
                else:
                    logging.error("A Routine called '{}' was on the Flow but "
                                  "could not be found (failed rename?). You "
                                  "may need to re-insert it".format(
                        elementNode.get('name')))
                    logging.flush()

        if modifiedNames:
            msg = 'duplicate variable name(s) changed in loadFromXML: %s\n'
            logging.warning(msg % ', '.join(list(set(modifiedNames))))
        if duplicateNames:
            msg = 'duplicate variable names: %s'
            logging.warning(msg % ', '.join(list(set(duplicateNames))))
        # if we succeeded then save current filename to self
        self.filename = filename
Exemple #26
0
    def __call__(self):
        """Steps executed to bump."""
        try:
            current_version_instance: Version = Version(
                self.parameters["version"])
        except TypeError:
            out.error("[NO_VERSION_SPECIFIED]")
            out.error(
                "Check if current version is specified in config file, like:")
            out.error("version = 0.4.3")
            raise SystemExit(NO_VERSION_SPECIFIED)

        # Initialize values from sources (conf)
        current_version: str = self.config["version"]
        tag_format: str = self.parameters["tag_format"]
        bump_commit_message: str = self.parameters["bump_message"]
        current_tag_version: str = bump.create_tag(current_version,
                                                   tag_format=tag_format)
        files: list = self.parameters["files"]
        dry_run: bool = self.parameters["dry_run"]

        is_yes: bool = self.arguments["yes"]
        prerelease: str = self.arguments["prerelease"]
        increment: Optional[str] = self.arguments["increment"]

        is_initial = self.is_initial_tag(current_tag_version, is_yes)
        commits = git.get_commits(current_tag_version,
                                  from_beginning=is_initial)

        # No commits, there is no need to create an empty tag.
        # Unless we previously had a prerelease.
        if not commits and not current_version_instance.is_prerelease:
            out.error("[NO_COMMITS_FOUND]")
            out.error("No new commits found.")
            raise SystemExit(NO_COMMITS_FOUND)

        if increment is None:
            bump_pattern = self.cz.bump_pattern
            bump_map = self.cz.bump_map
            if not bump_map or not bump_pattern:
                out.error(
                    f"'{self.config['name']}' rule does not support bump")
                raise SystemExit(NO_PATTERN_MAP)
            increment = bump.find_increment(commits,
                                            regex=bump_pattern,
                                            increments_map=bump_map)

        # Increment is removed when current and next version
        # are expected to be prereleases.
        if prerelease and current_version_instance.is_prerelease:
            increment = None

        new_version = bump.generate_version(current_version,
                                            increment,
                                            prerelease=prerelease)
        new_tag_version = bump.create_tag(new_version, tag_format=tag_format)
        message = bump.create_commit_message(current_version, new_version,
                                             bump_commit_message)

        # Report found information
        out.write(message)
        out.write(f"tag to create: {new_tag_version}")
        out.write(f"increment detected: {increment}")

        # Do not perform operations over files or git.
        if dry_run:
            raise SystemExit()

        config.set_key("version", new_version.public)
        bump.update_version_in_files(current_version, new_version.public,
                                     files)
        c = git.commit(message, args="-a")
        if c.err:
            out.error(c.err)
            raise SystemExit(COMMIT_FAILED)
        c = git.tag(new_tag_version)
        if c.err:
            out.error(c.err)
            raise SystemExit(TAG_FAILED)
        out.success("Done!")
Exemple #27
0
        ("Foo.Bar", "foo-bar"),
        ("Foo.....Bar", "foo-bar"),
        ("foo_bar", "foo-bar"),
        ("foo___bar", "foo-bar"),
        ("foo-bar", "foo-bar"),
        ("foo----bar", "foo-bar"),
    ],
)
def test_canonicalize_name(name, expected):
    assert canonicalize_name(name) == expected


@pytest.mark.parametrize(
    ("version", "expected"),
    [
        (Version("1.4.0"), "1.4"),
        ("1.4.0", "1.4"),
        ("1.40.0", "1.40"),
        ("1.4.0.0.00.000.0000", "1.4"),
        ("1.0", "1"),
        ("1.0+abc", "1+abc"),
        ("1.0.dev0", "1.dev0"),
        ("1.0.post0", "1.post0"),
        ("1.0a0", "1a0"),
        ("1.0rc0", "1rc0"),
        ("100!0.0", "100!0"),
        ("1.0.1-test7", "1.0.1-test7"),  # LegacyVersion is unchanged
    ],
)
def test_canonicalize_version(version, expected):
    assert canonicalize_version(version) == expected
Exemple #28
0
def parse_version_failsafe(version_string):
    try:
        return Version(version_string)
    except InvalidVersion:
        return None
def huggingface_pytorch_latest_training_py_version(
        huggingface_training_pytorch_latest_version):
    return ("py38" if Version(huggingface_training_pytorch_latest_version) >=
            Version("1.9") else "py36")
Exemple #30
0
#
#      The author may be contacted through the project's GitHub, at:
#      https://github.com/Hari-Nagarajan/fairgame

import requests
from packaging.version import Version, parse, InvalidVersion

_LATEST_URL = "https://api.github.com/repos/Hari-Nagarajan/fairgame/releases/latest"

# Use a Version object to gain additional version identification capabilities
# See https://github.com/pypa/packaging for details
# See https://www.python.org/dev/peps/pep-0440/ for specification
# See https://www.python.org/dev/peps/pep-0440/#examples-of-compliant-version-schemes for examples

__VERSION = "0.6.2"
version = Version(__VERSION)


def is_latest():
    remote_version = get_latest_version()

    if version < remote_version:
        return False
    elif version.is_prerelease:
        return False
    else:
        return True


def get_latest_version():
    try: