Exemple #1
0
def bump(dev=False, patch=False, minor=False, major=False, nocommit=False):
    """Bump version number and commit change."""
    if sum([int(x) for x in (patch, minor, major)]) > 1:
        raise ValueError('Only one of patch, minor, major can be incremented.')

    if check_staged():
        raise EnvironmentError('There are staged changes, abort.')

    with open(str(INIT_PATH)) as f:
        lines = f.readlines()

    for i, line in enumerate(lines):
        varmatch = re.match("__([a-z]+)__ = '([^']+)'", line)
        if varmatch:
            if varmatch.group(1) == 'version':
                version = Version(varmatch.group(2))
                vdict = version._version._asdict()
                print('Current version:', version)
                increment_release = True
                if dev:
                    if vdict['dev']:
                        vdict['dev'] = (vdict['dev'][0], vdict['dev'][1] + 1)
                        increment_release = False
                        if sum([int(x) for x in (patch, minor, major)]) > 0:
                            raise ValueError('Cannot increment patch, minor, or major between dev versions.')
                    else:
                        vdict['dev'] = ('dev', 0)
                else:
                    if vdict['dev']:
                        vdict['dev'] = None
                        increment_release = False

                if increment_release:
                    rel = vdict['release']
                    if major:
                        vdict['release'] = (rel[0] + 1, 0, 0)
                    elif patch:
                        vdict['release'] = (rel[0], rel[1], rel[2] + 1)
                    else:  # minor is default
                        vdict['release'] = (rel[0], rel[1] + 1, 0)

                version._version = _Version(**vdict)
                print('Version bumped to:', version)
                lines[i] = "__version__ = '{!s}'\n".format(version)
                break

    with open(str(INIT_PATH), 'w') as f:
        f.writelines(lines)

    if not nocommit:
        call(['git', 'add', 'bucketcache/__init__.py'])
        call(['git', 'commit', '-m', 'Bumped version number to {!s}'.format(version)])
    return version
Exemple #2
0
 def test_versions_condaforge(self):
     self.assertSetEqual({Version('1.0'), Version('1.2'), Version('2.0')}, self.pkg_a.versions_condaforge)
     self.assertSetEqual(set(), self.pkg_bad.versions_condaforge)
Exemple #3
0
from collections.abc import MutableSequence, MutableMapping
from collections import defaultdict, OrderedDict
from contextlib import contextmanager
from datetime import datetime
from functools import partial
from html import escape  # noqa
from importlib import import_module
from packaging.version import Version

import bokeh
import param
import numpy as np

datetime_types = (np.datetime64, dt.datetime, dt.date)

bokeh_version = Version(bokeh.__version__)


def isfile(path):
    """Safe version of os.path.isfile robust to path length issues on Windows"""
    try:
        return os.path.isfile(path)
    except ValueError:  # path too long for Windows
        return False


def isurl(obj, formats=None):
    if not isinstance(obj, str):
        return False
    lower_string = obj.lower().split('?')[0].split('#')[0]
    return (lower_string.startswith('http://') or
Exemple #4
0
def test_postgis_version(ngw_txn):
    """ Useless PostgreGIS version check """

    version = Version(
        DBSession.execute(text('SELECT PostGIS_Lib_Version()')).scalar())
    assert version >= Version('2.5.0')
Exemple #5
0
    def __new__(
        self,
        *args,
        min_gpus: int = 0,
        min_torch: Optional[str] = None,
        max_torch: Optional[str] = None,
        min_python: Optional[str] = None,
        quantization: bool = False,
        amp_apex: bool = False,
        amp_native: bool = False,
        tpu: bool = False,
        ipu: bool = False,
        horovod: bool = False,
        horovod_nccl: bool = False,
        skip_windows: bool = False,
        special: bool = False,
        fairscale: bool = False,
        fairscale_fully_sharded: bool = False,
        deepspeed: bool = False,
        **kwargs,
    ):
        """
        Args:
            args: native pytest.mark.skipif arguments
            min_gpus: min number of gpus required to run test
            min_torch: minimum pytorch version to run test
            max_torch: maximum pytorch version to run test
            min_python: minimum python version required to run test
            quantization: if `torch.quantization` package is required to run test
            amp_apex: NVIDIA Apex is installed
            amp_native: if native PyTorch native AMP is supported
            tpu: if TPU is available
            ipu: if IPU is available
            horovod: if Horovod is installed
            horovod_nccl: if Horovod is installed with NCCL support
            skip_windows: skip test for Windows platform (typically fo some limited torch functionality)
            special: running in special mode, outside pytest suit
            fairscale: if `fairscale` module is required to run the test
            fairscale_fully_sharded: if `fairscale` fully sharded module is required to run the test
            deepspeed: if `deepspeed` module is required to run the test
            kwargs: native pytest.mark.skipif keyword arguments
        """
        conditions = []
        reasons = []

        if min_gpus:
            conditions.append(torch.cuda.device_count() < min_gpus)
            reasons.append(f"GPUs>={min_gpus}")

        if min_torch:
            torch_version = get_distribution("torch").version
            conditions.append(Version(torch_version) < Version(min_torch))
            reasons.append(f"torch>={min_torch}")

        if max_torch:
            torch_version = get_distribution("torch").version
            conditions.append(Version(torch_version) >= Version(max_torch))
            reasons.append(f"torch<{max_torch}")

        if min_python:
            py_version = f"{sys.version_info.major}.{sys.version_info.minor}.{sys.version_info.micro}"
            conditions.append(Version(py_version) < Version(min_python))
            reasons.append(f"python>={min_python}")

        if quantization:
            _miss_default = "fbgemm" not in torch.backends.quantized.supported_engines
            conditions.append(not _TORCH_QUANTIZE_AVAILABLE or _miss_default)
            reasons.append("PyTorch quantization")

        if amp_native:
            conditions.append(not _NATIVE_AMP_AVAILABLE)
            reasons.append("native AMP")

        if amp_apex:
            conditions.append(not _APEX_AVAILABLE)
            reasons.append("NVIDIA Apex")

        if skip_windows:
            conditions.append(sys.platform == "win32")
            reasons.append("unimplemented on Windows")

        if tpu:
            conditions.append(not _TPU_AVAILABLE)
            reasons.append("TPU")

        if ipu:
            conditions.append(not _IPU_AVAILABLE)
            reasons.append("IPU")

        if horovod:
            conditions.append(not _HOROVOD_AVAILABLE)
            reasons.append("Horovod")

        if horovod_nccl:
            conditions.append(not _HOROVOD_NCCL_AVAILABLE)
            reasons.append("Horovod with NCCL")

        if special:
            env_flag = os.getenv("PL_RUNNING_SPECIAL_TESTS", "0")
            conditions.append(env_flag != "1")
            reasons.append("Special execution")

        if fairscale:
            conditions.append(not _FAIRSCALE_AVAILABLE)
            reasons.append("Fairscale")

        if fairscale_fully_sharded:
            conditions.append(not _FAIRSCALE_FULLY_SHARDED_AVAILABLE)
            reasons.append("Fairscale Fully Sharded")

        if deepspeed:
            conditions.append(not _DEEPSPEED_AVAILABLE)
            reasons.append("Deepspeed")

        reasons = [rs for cond, rs in zip(conditions, reasons) if cond]
        return pytest.mark.skipif(*args,
                                  condition=any(conditions),
                                  reason=f"Requires: [{' + '.join(reasons)}]",
                                  **kwargs)
Exemple #6
0
from packaging.version import Version
import importlib
import os
import warnings

import numpy as np
import pandas as pd
import pyproj
import shapely
import shapely.geos

# -----------------------------------------------------------------------------
# pandas compat
# -----------------------------------------------------------------------------

PANDAS_GE_11 = Version(pd.__version__) >= Version("1.1.0")
PANDAS_GE_115 = Version(pd.__version__) >= Version("1.1.5")
PANDAS_GE_12 = Version(pd.__version__) >= Version("1.2.0")
PANDAS_GE_13 = Version(pd.__version__) >= Version("1.3.0")
PANDAS_GE_14 = Version(pd.__version__) >= Version("1.4.0rc0")

# -----------------------------------------------------------------------------
# Shapely / PyGEOS compat
# -----------------------------------------------------------------------------

SHAPELY_GE_18 = Version(shapely.__version__) >= Version("1.8")
SHAPELY_GE_20 = Version(shapely.__version__) >= Version("2.0")

GEOS_GE_390 = shapely.geos.geos_version >= (3, 9, 0)

HAS_PYGEOS = None
Exemple #7
0
import pytest

from polite.paths import Directory
from dtocean_core.core import Core
from dtocean_core.menu import DataMenu, ProjectMenu, ThemeMenu
from dtocean_core.pipeline import Tree, _get_connector

# Check for module and version
pkg_title = "dtocean-environment"
pkg_import = "dtocean_environment"
min_version = "1.0"

pytest.importorskip(pkg_import)
version = pkg_resources.get_distribution(pkg_title).version
pytestmark = pytest.mark.skipif(Version(version) < Version(min_version),
                                reason="module version too old")

dir_path = os.path.dirname(__file__)


@pytest.fixture(scope="module")
def core():
    '''Share a Core object'''

    new_core = Core()

    return new_core


@pytest.fixture(scope="module")
Exemple #8
0
def setUpModule():  # pylint:disable=invalid-name
    """Skip tests on Pulp versions lower than 2.12."""
    set_up_module()
    if config.get_config().pulp_version < Version('2.12'):
        raise unittest.SkipTest('These tests require at least Pulp 2.12.')
Exemple #9
0
 def test_comparison_false(self, left, right, op):
     assert not op(Version(left), Version(right))
Exemple #10
0
 def test_comparison_true(self, left, right, op):
     assert op(Version(left), Version(right))
Exemple #11
0
 def test_version_is_postrelease(self, version, expected):
     assert Version(version).is_postrelease is expected
Exemple #12
0
 def test_version_post(self, version, post):
     assert Version(version).post == post
Exemple #13
0
 def test_version_dev(self, version, dev):
     assert Version(version).dev == dev
Exemple #14
0
 def test_version_pre(self, version, pre):
     assert Version(version).pre == pre
Exemple #15
0
 def test_version_local(self, version, local):
     assert Version(version).local == local
Exemple #16
0
 def test_dunder_op_returns_notimplemented(self, op):
     method = getattr(Version, "__{0}__".format(op))
     assert method(Version("1"), 1) is NotImplemented
Exemple #17
0
    def train(_log_models, original, *args, **kwargs):
        def record_eval_results(eval_results, metrics_logger):
            """
            Create a callback function that records evaluation results.
            """
            return picklable_exception_safe_function(
                functools.partial(_autolog_callback,
                                  metrics_logger=metrics_logger,
                                  eval_results=eval_results))

        def log_feature_importance_plot(features, importance, importance_type):
            """
            Log feature importance plot.
            """
            import matplotlib.pyplot as plt

            indices = np.argsort(importance)
            features = np.array(features)[indices]
            importance = importance[indices]
            num_features = len(features)

            # If num_features > 10, increase the figure height to prevent the plot
            # from being too dense.
            w, h = [6.4, 4.8]  # matplotlib's default figure size
            h = h + 0.1 * num_features if num_features > 10 else h
            fig, ax = plt.subplots(figsize=(w, h))

            yloc = np.arange(num_features)
            ax.barh(yloc, importance, align="center", height=0.5)
            ax.set_yticks(yloc)
            ax.set_yticklabels(features)
            ax.set_xlabel("Importance")
            ax.set_title("Feature Importance ({})".format(importance_type))
            fig.tight_layout()

            tmpdir = tempfile.mkdtemp()
            try:
                # pylint: disable=undefined-loop-variable
                filepath = os.path.join(
                    tmpdir, "feature_importance_{}.png".format(imp_type))
                fig.savefig(filepath)
                mlflow.log_artifact(filepath)
            finally:
                plt.close(fig)
                shutil.rmtree(tmpdir)

        autologging_client = MlflowAutologgingQueueingClient()

        # logging booster params separately via mlflow.log_params to extract key/value pairs
        # and make it easier to compare them across runs.
        booster_params = args[0] if len(args) > 0 else kwargs["params"]
        autologging_client.log_params(run_id=mlflow.active_run().info.run_id,
                                      params=booster_params)

        unlogged_params = [
            "params",
            "train_set",
            "valid_sets",
            "valid_names",
            "fobj",
            "feval",
            "init_model",
            "learning_rates",
            "callbacks",
        ]
        if Version(lightgbm.__version__) <= Version("3.3.1"):
            # The parameter `evals_result` in `lightgbm.train` is removed in this PR:
            # https://github.com/microsoft/LightGBM/pull/4882
            unlogged_params.append("evals_result")

        params_to_log_for_fn = get_mlflow_run_params_for_fn_args(
            original, args, kwargs, unlogged_params)
        autologging_client.log_params(run_id=mlflow.active_run().info.run_id,
                                      params=params_to_log_for_fn)

        param_logging_operations = autologging_client.flush(synchronous=False)

        all_arg_names = _get_arg_names(original)
        num_pos_args = len(args)

        # adding a callback that records evaluation results.
        eval_results = []
        callbacks_index = all_arg_names.index("callbacks")
        run_id = mlflow.active_run().info.run_id
        with batch_metrics_logger(run_id) as metrics_logger:
            callback = record_eval_results(eval_results, metrics_logger)
            if num_pos_args >= callbacks_index + 1:
                tmp_list = list(args)
                tmp_list[callbacks_index] += [callback]
                args = tuple(tmp_list)
            elif "callbacks" in kwargs and kwargs["callbacks"] is not None:
                kwargs["callbacks"] += [callback]
            else:
                kwargs["callbacks"] = [callback]

            # training model
            model = original(*args, **kwargs)

            # If early stopping is activated, logging metrics at the best iteration
            # as extra metrics with the max step + 1.
            early_stopping = model.best_iteration > 0
            if early_stopping:
                extra_step = len(eval_results)
                autologging_client.log_metrics(
                    run_id=mlflow.active_run().info.run_id,
                    metrics={
                        "stopped_iteration": extra_step,
                        # best_iteration is set even if training does not stop early.
                        "best_iteration": model.best_iteration,
                    },
                )
                # iteration starts from 1 in LightGBM.
                last_iter_results = eval_results[model.best_iteration - 1]
                autologging_client.log_metrics(
                    run_id=mlflow.active_run().info.run_id,
                    metrics=last_iter_results,
                    step=extra_step,
                )
                early_stopping_logging_operations = autologging_client.flush(
                    synchronous=False)

        # logging feature importance as artifacts.
        for imp_type in ["split", "gain"]:
            features = model.feature_name()
            importance = model.feature_importance(importance_type=imp_type)
            try:
                log_feature_importance_plot(features, importance, imp_type)
            except Exception:
                _logger.exception(
                    "Failed to log feature importance plot. LightGBM autologging "
                    "will ignore the failure and continue. Exception: ")

            imp = {ft: imp for ft, imp in zip(features, importance.tolist())}
            tmpdir = tempfile.mkdtemp()
            try:
                filepath = os.path.join(
                    tmpdir, "feature_importance_{}.json".format(imp_type))
                with open(filepath, "w") as f:
                    json.dump(imp, f, indent=2)
                mlflow.log_artifact(filepath)
            finally:
                shutil.rmtree(tmpdir)

        # train_set must exist as the original train function already ran successfully
        train_set = args[1] if len(args) > 1 else kwargs.get("train_set")

        # it is possible that the dataset was constructed before the patched
        #   constructor was applied, so we cannot assume the input_example_info exists
        input_example_info = getattr(train_set, "input_example_info", None)

        def get_input_example():
            if input_example_info is None:
                raise Exception(ENSURE_AUTOLOGGING_ENABLED_TEXT)
            if input_example_info.error_msg is not None:
                raise Exception(input_example_info.error_msg)
            return input_example_info.input_example

        def infer_model_signature(input_example):
            model_output = model.predict(input_example)
            model_signature = infer_signature(input_example, model_output)
            return model_signature

        # Whether to automatically log the trained model based on boolean flag.
        if _log_models:
            # Will only resolve `input_example` and `signature` if `log_models` is `True`.
            input_example, signature = resolve_input_example_and_signature(
                get_input_example,
                infer_model_signature,
                log_input_examples,
                log_model_signatures,
                _logger,
            )

            log_model(
                model,
                artifact_path="model",
                signature=signature,
                input_example=input_example,
                registered_model_name=registered_model_name,
            )

        param_logging_operations.await_completion()
        if early_stopping:
            early_stopping_logging_operations.await_completion()

        return model
Exemple #18
0
    def test_compare_other(self, op, expected):
        other = pretend.stub(
            **{"__{0}__".format(op): lambda other: NotImplemented})

        assert getattr(operator, op)(Version("1"), other) is expected
Exemple #19
0
def _get_versions():
    return {
        'postgres_version': Version(get_postgres_version()),
        'python_version': Version(platform.python_version()),
        'indico_version': Version(indico.__version__),
    }
Exemple #20
0
 def test_compare_legacyversion_version(self):
     result = sorted([Version("0"), LegacyVersion("1")])
     assert result == [LegacyVersion("1"), Version("0")]
Exemple #21
0
    def check_package_sanity(self):
        # if self.pkg_scm.is_dirty():
        # raise Error("Package is dirty. Quiting")

        if not os.path.exists(self.package_path):
            raise Error("Path %s is invalid, quiting." % self.package_path)

        # check if we have hardcoded version in setup.py
        # this is a dumb but hopefully effective method: we look for a line
        # starting with version= and fail if there's a number on it
        setup_py = find_file(self.package_path, 'setup.py')
        f = open(setup_py)
        version = [l for l in f.readlines() if l.strip().startswith('version')]
        for l in version:
            for c in l:
                if c.isdigit():
                    raise Error("There's a hardcoded version in the "
                                "setup.py file. Quiting.")

        vv = get_version(self.package_path)
        vh = FileHistoryParser(self.package_path).get_current_version()

        if self.resume_from > 1:
            return  # Bypass sanity checks when resuming

        vv_version = Version(vv)
        if not vv_version.is_prerelease:
            raise Error("Version.txt file does not contain a dev version. "
                        "Quiting.")

        vh_version = Version(vh)
        if not vh_version.is_prerelease:
            raise Error("HISTORY.txt file does not contain a dev version. "
                        "Quiting.")

        if vh != vv:
            raise Error("Latest version in HISTORY.txt is not the "
                        "same as in version.txt. Quiting.")

        # We depend on collective.dist installed in the python.
        # Installing eggmonkey under buildout with a different python doesn't
        # install properly the collective.dist
        print_msg("Installing collective.dist in ", self.python)
        cmd = self.python + " setup.py easy_install -q -U collective.dist"
        try:
            subprocess.check_call(cmd, cwd=self.package_path, shell=True)
        except subprocess.CalledProcessError:
            # raise Error("Failed to install collective.dist in", self.python)
            pass  # easier not to fail here

        # check if package metadata is properly filled
        try:
            cmd = self.python + " setup.py check --strict"
            subprocess.check_call(cmd, cwd=self.package_path, shell=True)
        except subprocess.CalledProcessError:
            print "Make sure that the package following metadata filled in:"
            print " - name"
            print " - version"
            print " - url"
            print " - author and author_email"
            print " - maintainer and maintainer_email"
            raise Error("Package has improperly filled metadata. Quiting")
Exemple #22
0
 def test_major_version(self):
     assert Version("2.1.0").major == 2
Exemple #23
0
def _parse_metadata(metadata):
    if Version(_mrcz.__version__) < Version("0.5"):
        return metadata[0]
    else:
        return metadata
Exemple #24
0
 def test_minor_version(self):
     assert Version("2.1.0").minor == 1
     assert Version("2").minor == 0
Exemple #25
0
import re

from packaging.version import Version
from sqlalchemy import text

from nextgisweb.models import DBSession


def test_postgres_version(ngw_txn):
    """ Useless PostgreSQL version check """

    raw = DBSession.execute(text('SHOW server_version')).scalar()
    if m := re.search(r'\d+(?:\.\d){1,}', raw):
        version = Version(m.group(0))
    assert version >= Version('10.0')


def test_postgis_version(ngw_txn):
    """ Useless PostgreGIS version check """

    version = Version(
        DBSession.execute(text('SELECT PostGIS_Lib_Version()')).scalar())
    assert version >= Version('2.5.0')
Exemple #26
0
 def test_micro_version(self):
     assert Version("2.1.3").micro == 3
     assert Version("2.1").micro == 0
     assert Version("2").micro == 0
Exemple #27
0
def test_postgres_version(ngw_txn):
    """ Useless PostgreSQL version check """

    raw = DBSession.execute(text('SHOW server_version')).scalar()
    if m := re.search(r'\d+(?:\.\d){1,}', raw):
        version = Version(m.group(0))
Exemple #28
0
 def test_valid_versions(self, version):
     Version(version)
        [
            vectorizer_layer,
            tf.keras.layers.Embedding(
                VOCAB_SIZE, EMBEDDING_DIM, name="embedding", mask_zero=True, input_shape=(1,),
            ),
            tf.keras.layers.GlobalAveragePooling1D(),
            tf.keras.layers.Dense(16, activation="relu"),
            tf.keras.layers.Dense(1, activation="tanh"),
        ]
    )
    model.compile(optimizer="adam", loss="mse", metrics="mae")
    return model


@pytest.mark.skipif(
    Version(tf.__version__) < Version("2.3.0"),
    reason=(
        "Deserializing a model with `TextVectorization` and `Embedding`"
        "fails in tensorflow < 2.3.0. See this issue:"
        "https://github.com/tensorflow/tensorflow/issues/38250"
    ),
)
def test_autolog_text_vec_model(tmpdir):
    """
    Verifies autolog successfully saves a model that can't be saved in the H5 format
    """
    mlflow.tensorflow.autolog()

    train_samples = np.array(["this is an example", "another example"])
    train_labels = np.array([0.4, 0.2])
    model = get_text_vec_model(train_samples)
import logging

from path import Path
import tox.config
from packaging.requirements import Requirement, InvalidRequirement
from packaging.specifiers import SpecifierSet, Specifier
from packaging.version import Version

LIBRARY_REQUIRED_DJANGO_VERSIONS = {'1.8', '1.11'}
APPLICATION_ALLOWED_DJANGO_VERSIONS = {
    SpecifierSet('>=1.8,<1.9'),
    SpecifierSet('>=1.11,<2.0'),
}

DJANGO_VERSIONS = {
    Version('{}.{}.{}'.format(major, minor, patch))
    for major, minors in {
        1: {
            8: range(19),
            9: range(14),
            10: range(8),
            11: range(1)
        }
    }.items() for minor, patches in minors.items() for patch in patches
}

LOG = logging.getLogger(__name__)


def setup_call(parsed_setup_py):
    for statement in parsed_setup_py.body:
Exemple #31
0
 def test__external_versions(self):
     self.assertSetEqual({Version('1.0'), Version('1.2'), Version('2.0')}, set(self.pkg_a._external_versions))
     self.assertSetEqual(set(), set(self.pkg_bad._external_versions))
def bump_version(version, bump_type):
    """Return a new version given a current version and action."""
    to_change = {}

    if bump_type == 'minor':
        # Convert 0.67.3 to 0.68.0
        # Convert 0.67.3.b5 to 0.68.0
        # Convert 0.67.3.dev0 to 0.68.0
        # Convert 0.67.0.b5 to 0.67.0
        # Convert 0.67.0.dev0 to 0.67.0
        to_change['dev'] = None
        to_change['pre'] = None

        if not version.is_prerelease or version.release[2] != 0:
            to_change['release'] = _bump_release(version.release, 'minor')

    elif bump_type == 'patch':
        # Convert 0.67.3 to 0.67.4
        # Convert 0.67.3.b5 to 0.67.3
        # Convert 0.67.3.dev0 to 0.67.3
        to_change['dev'] = None
        to_change['pre'] = None

        if not version.is_prerelease:
            to_change['release'] = _bump_release(version.release, 'patch')

    elif bump_type == 'dev':
        # Convert 0.67.3 to 0.67.4.dev0
        # Convert 0.67.3.b5 to 0.67.4.dev0
        # Convert 0.67.3.dev0 to 0.67.3.dev1
        if version.is_devrelease:
            to_change['dev'] = ('dev', version.dev + 1)
        else:
            to_change['pre'] = ('dev', 0)
            to_change['release'] = _bump_release(version.release, 'minor')

    elif bump_type == 'beta':
        # Convert 0.67.5 to 0.67.6b0
        # Convert 0.67.0.dev0 to 0.67.0b0
        # Convert 0.67.5.b4 to 0.67.5b5

        if version.is_devrelease:
            to_change['dev'] = None
            to_change['pre'] = ('b', 0)

        elif version.is_prerelease:
            if version.pre[0] == 'a':
                to_change['pre'] = ('b', 0)
            if version.pre[0] == 'b':
                to_change['pre'] = ('b', version.pre[1] + 1)
            else:
                to_change['pre'] = ('b', 0)
                to_change['release'] = _bump_release(version.release, 'patch')

        else:
            to_change['release'] = _bump_release(version.release, 'patch')
            to_change['pre'] = ('b', 0)

    else:
        assert False, 'Unsupported type: {}'.format(bump_type)

    temp = Version('0')
    temp._version = version._version._replace(**to_change)
    return Version(str(temp))