class TestCase(unittest.TestCase): def setUp(self): # type: ignore self.runner = CliRunner() @unittest.mock.patch.object(VersionInfo, 'semantic_version', return_value=SemanticVersion(0, 0, 0)) def test_patch_arg(self, _: Any) -> None: result = self.runner.invoke(cli, ['dummy', '--patch']) self.assertEqual('0.0.1', result.output) @unittest.mock.patch.object(VersionInfo, 'semantic_version', return_value=SemanticVersion(0, 0, 0)) def test_minor_arg(self, _: Any) -> None: result = self.runner.invoke(cli, ['dummy', '--minor']) self.assertEqual('0.1.0', result.output) @unittest.mock.patch.object(VersionInfo, 'semantic_version', return_value=SemanticVersion(0, 0, 0)) def test_major_arg(self, _: Any) -> None: result = self.runner.invoke(cli, ['dummy', '--major']) self.assertEqual('1.0.0', result.output) @unittest.mock.patch.object(VersionInfo, 'semantic_version', return_value=SemanticVersion( 0, 0, 1, None, None, 1)) def test_patch_arg_on_dev_ver(self, _: Any) -> None: result = self.runner.invoke(cli, ['dummy', '--patch']) self.assertEqual('0.0.1', result.output)
def upgrade_cluster(self, context, cluster, cluster_template, max_batch_size, nodegroup, scale_manager=None, rollback=False): osc = clients.OpenStackClients(context) _, heat_params, _ = (self._extract_template_definition( context, cluster, scale_manager=scale_manager)) # Extract labels/tags from cluster not template # There are some version tags are not decalared in labels explicitly, # so we need to get them from heat_params based on the labels given in # new cluster template. current_addons = {} new_addons = {} for label in cluster_template.labels: # This is upgrade API, so we don't introduce new stuff by this API, # but just focus on the version change. new_addons[label] = cluster_template.labels[label] if ((label.endswith('_tag') or label.endswith('_version')) and label in heat_params): current_addons[label] = heat_params[label] if (SV.from_pip_string(new_addons[label]) < SV.from_pip_string( current_addons[label])): raise exception.InvalidVersion(tag=label) heat_params["server_image"] = cluster_template.image_id heat_params["master_image"] = cluster_template.image_id heat_params["minion_image"] = cluster_template.image_id # NOTE(flwang): Overwrite the kube_tag as well to avoid a server # rebuild then do the k8s upgrade again, when both image id and # kube_tag changed heat_params["kube_tag"] = cluster_template.labels["kube_tag"] heat_params["kube_version"] = cluster_template.labels["kube_tag"] heat_params["master_kube_tag"] = cluster_template.labels["kube_tag"] heat_params["minion_kube_tag"] = cluster_template.labels["kube_tag"] heat_params["update_max_batch_size"] = max_batch_size # Rules: 1. No downgrade 2. Explicitly override 3. Merging based on set # Update heat_params based on the data generated above del heat_params['kube_service_account_private_key'] del heat_params['kube_service_account_key'] for label in new_addons: heat_params[label] = cluster_template.labels[label] cluster['cluster_template_id'] = cluster_template.uuid new_labels = cluster.labels.copy() new_labels.update(cluster_template.labels) cluster['labels'] = new_labels fields = { 'existing': True, 'parameters': heat_params, 'disable_rollback': not rollback } osc.heat().stacks.update(cluster.stack_id, **fields)
def run(program, commit, env_vars, dev_mode, use_public, bootstrap, do_build=True): datadir = os.path.realpath(config_options.datadir) yumrepodir = os.path.join("repos", commit.getshardedcommitdir()) yumrepodir_abs = os.path.join(datadir, yumrepodir) commit_hash = commit.commit_hash project_name = commit.project_name repo_dir = commit.repo_dir if do_build: # If yum repo already exists remove it and assume we're starting fresh if os.path.exists(yumrepodir_abs): shutil.rmtree(yumrepodir_abs) os.makedirs(yumrepodir_abs) sh.git("--git-dir", "%s/.git" % repo_dir, "--work-tree=%s" % repo_dir, "reset", "--hard", commit_hash) run_cmd = [] if env_vars: for env_var in env_vars: run_cmd.append(env_var) if (dev_mode or use_public): run_cmd.append("DLRN_DEV=1") if bootstrap is True: run_cmd.append("REPO_BOOTSTRAP=1") run_cmd.extend([ program, config_options.target, project_name, os.path.join(datadir, yumrepodir), datadir, config_options.baseurl ]) if not do_build: logger.info('Running %s' % ' '.join(run_cmd)) try: sh_version = SemanticVersion.from_pip_string(sh.__version__) min_sh_version = SemanticVersion.from_pip_string('1.09') if sh_version > min_sh_version: sh.env(run_cmd) else: sh.env_(run_cmd) except Exception as e: logger.error('cmd failed. See logs at: %s/%s/' % (datadir, yumrepodir)) raise e
def run(program, cp, commit, env_vars, dev_mode, use_public, bootstrap, do_build=True): datadir = os.path.realpath(cp.get("DEFAULT", "datadir")) target = cp.get("DEFAULT", "target") yumrepodir = os.path.join("repos", commit.getshardedcommitdir()) yumrepodir_abs = os.path.join(datadir, yumrepodir) baseurl = cp.get("DEFAULT", "baseurl") commit_hash = commit.commit_hash project_name = commit.project_name repo_dir = commit.repo_dir if do_build: # If yum repo already exists remove it and assume we're starting fresh if os.path.exists(yumrepodir_abs): shutil.rmtree(yumrepodir_abs) os.makedirs(yumrepodir_abs) sh.git("--git-dir", "%s/.git" % repo_dir, "--work-tree=%s" % repo_dir, "reset", "--hard", commit_hash) run_cmd = [] if env_vars: for env_var in env_vars: run_cmd.append(env_var) if (dev_mode or use_public): run_cmd.append("DELOREAN_DEV=1") if bootstrap is True: run_cmd.append("REPO_BOOTSTRAP=1") run_cmd.extend([program, target, project_name, os.path.join(datadir, yumrepodir), datadir, baseurl]) if not do_build: logger.info('Running %s' % ' '.join(run_cmd)) try: sh_version = SemanticVersion.from_pip_string(sh.__version__) min_sh_version = SemanticVersion.from_pip_string('1.09') if sh_version > min_sh_version: sh.env(run_cmd) else: sh.env_(run_cmd) except Exception as e: logger.error('cmd failed. See logs at: %s/%s/' % (datadir, yumrepodir)) raise e
def __init__(self, log_level=logging.INFO, **kwargs): self._log_level = log_level self._gateway_protocol = kwargs.get("gateway_protocol", "https") self._gateway_host = kwargs.get("gateway_host", "events.strmprivacy.io") self._gateway_endpoint = kwargs.get("gateway_endpoint", "/event") self._egress_protocol = kwargs.get("egress_protocol", "https") self._egress_host = kwargs.get("egress_host", "websocket.strmprivacy.io") self._egress_endpoint = kwargs.get("egress_endpoint", "/ws") self._egress_health_endpoint = kwargs.get("egress_health_endpoint", "/is-alive") self._sts_protocol = kwargs.get("sts_protocol", "https") self._sts_host = kwargs.get("sts_host", "sts.strmprivacy.io") self._sts_auth_endpoint = kwargs.get("sts_auth_endpoint", "/auth") self._sts_refresh_endpoint = kwargs.get("sts_refresh_endpoint", "/refresh") self._sts_refresh_interval = kwargs.get("sts_refresh_interval", 3300) self._version = SemanticVersion.from_pip_string(__version__)
def test(): extras = {} for i, k in enumerate(['project_id', 'user_id'], 1): try: extras[k] = sys.argv[i] except IndexError: pass vol = { 'name': 'test-volume', 'size': 1 } vol.update(extras) MAX_SEMVER = SemanticVersion(major=3, minor=4, patch=0) semver = VersionInfo('python-novaclient').semantic_version() def list_nova_volumes(): """Conditional on python-novaclient <= 3.3.0""" return nova.volumes.list() if semver <= MAX_SEMVER else [] def get_nova_volume(volume_id): pass print('Listing volumes') vols = { 'cinderclient': cinder.volumes.list(), 'novaclient': list_nova_volumes(), } pprint(vols) print('Creating volume') print(' with: %s' % vol) # NB(kamidzi): os-vol-* attrs appear later vol = cinder.volumes.create(**vol) for state in poll_volume(vol): # wait for tenant_id attribute if str(state.status).lower() == 'available' \ and hasattr(vol, 'os-vol-tenant-attr:tenant_id'): break pprint(render_volume(vol)) print('Listing volumes') vols = { 'cinderclient': cinder.volumes.list(), 'novaclient': list_nova_volumes(), } pprint(vols)
def upgrade_cluster(self, context, cluster, cluster_template, # noqa: C901 max_batch_size, nodegroup, scale_manager=None, rollback=False): osc = clients.OpenStackClients(context) # Use this just to check that we are not downgrading. heat_params = { "update_max_batch_size": max_batch_size, } if 'kube_tag' in nodegroup.labels: heat_params['kube_tag'] = nodegroup.labels['kube_tag'] current_addons = {} new_addons = {} for label in cluster_template.labels: # This is upgrade API, so we don't introduce new stuff by this API, # but just focus on the version change. new_addons[label] = cluster_template.labels[label] if ((label.endswith('_tag') or label.endswith('_version')) and label in heat_params): current_addons[label] = heat_params[label] try: if (SV.from_pip_string(new_addons[label]) < SV.from_pip_string(current_addons[label])): raise exception.InvalidVersion(tag=label) except exception.InvalidVersion: raise except Exception as e: # NOTE(flwang): Different cloud providers may use different # tag/version format which maybe not able to parse by # SemanticVersion. For this case, let's just skip it. LOG.debug("Failed to parse tag/version %s", str(e)) # Since the above check passed just # hardcode what we want to send to heat. # Rules: 1. No downgrade 2. Explicitly override 3. Merging based on set # Update heat_params based on the data generated above heat_params.update(self.get_heat_params(cluster_template)) stack_id = nodegroup.stack_id if nodegroup is not None and not nodegroup.is_default: heat_params['is_cluster_stack'] = False # For now set the worker_role explicitly in order to # make sure that the is_master condition fails. heat_params['worker_role'] = nodegroup.role # we need to set the whole dict to the object # and not just update the existing labels. This # is how obj_what_changed works. nodegroup.labels = new_labels = self.get_new_labels(nodegroup, cluster_template) if nodegroup.is_default: cluster.cluster_template_id = cluster_template.uuid cluster.labels = new_labels if nodegroup.role == 'master': other_default_ng = cluster.default_ng_worker else: other_default_ng = cluster.default_ng_master other_default_ng.labels = new_labels other_default_ng.save() fields = { 'existing': True, 'parameters': heat_params, 'disable_rollback': not rollback } osc.heat().stacks.update(stack_id, **fields)
# General information about the project. project = u'pebbles' copyright = u'2016, CSC - Center for Scientific Computing Ltd.' author = u'CSC - Center for Scientific Computing Ltd.' # The version info for the project you're documenting, acts as replacement for # |version| and |release|, also used in various other places throughout the # built documents. # #HackHack: read version from setup.cfg to have it in one place cfparser = ConfigParser() cfparser.read("../../setup.cfg") release = cfparser.get("metadata", "version") version_info = SemanticVersion(release) # a short version version = version_info.brief_string() # The full version, including alpha/beta/rc tags. # The language for content autogenerated by Sphinx. Refer to documentation # for a list of supported languages. # # This is also used if you do content translation via gettext catalogs. # Usually you set "language" from the command line for these cases. language = None # There are two options for replacing |today|: either, you set today to some # non-false value, then it is used: #
if isinstance(local_prop, RelProperty): return local_prop.uselist return False def apply_patches(): needs_patching = (primary_key_names, get_related_model, get_relations, is_like_list) for func in needs_patching: funcname = func.__name__ restless_mods = [ m for m in sys.modules if m.startswith('flask_restless') ] for mod in restless_mods: if funcname in dir(sys.modules[mod]): setattr(sys.modules[mod], funcname, func) sqla_version = VersionInfo('sqlalchemy').semantic_version() if sqla_version >= SemanticVersion(1, 3, 0): from sqlalchemy.ext.associationproxy import ( AssociationProxy, ObjectAssociationProxyInstance) from sqlalchemy.orm import (RelationshipProperty as RelProperty, ColumnProperty) from sqlalchemy.orm.attributes import QueryableAttribute ASSOCIATION_PROXIES_KLASSES = (AssociationProxy, ObjectAssociationProxyInstance) apply_patches()
#!/usr/bin/env python from ka_auth import sess from novaclient import client from pbr.version import SemanticVersion from pbr.version import VersionInfo import keystoneclient.exceptions import os import sys try: import simplejson as json except ImportError: import json MAX_SEMVER = SemanticVersion(major=7, minor=1, patch=2) semver = VersionInfo('python-novaclient').semantic_version() version = 2 nova = client.Client(version, session=sess) print('Nova API: %s' % nova.api_version) # floating_ips module deprecated as of python-novaclient >= 8.0.0 if semver <= MAX_SEMVER: ip_list = nova.floating_ips.list() print(ip_list) else: endpoint_filter = { 'service_type': 'compute', 'interface': 'public', # TODO(kamidzi): may break?? 'region_name': os.environ.get('OS_REGION'), }