def test_make_topology_unspecified_service_replicas(self): replicas = _yaml.AttrDict({"foobar": 42}) nodes = _yaml.AttrDict() self.assertRaises(RuntimeError, deploy._make_topology, nodes, self._roles, replicas)
def load_component_defaults(): from fuel_ccp.common import utils sections = [ 'versions', 'sources', 'configs', 'nodes', 'roles', 'replicas', 'url', 'files' ] new_config = _yaml.AttrDict((k, _yaml.AttrDict()) for k in sections) for path in utils.get_config_paths(): if not os.path.exists(path): LOG.debug("\"%s\" not found, skipping", path) continue LOG.debug("Adding parameters from \"%s\"", path) with open(path) as f: data = _yaml.load(f) for section in sections: if section in data: new_config[section]._merge(data[section]) global _REAL_CONF new_config['configs']['namespace'] = _REAL_CONF.kubernetes.namespace new_config['configs'][ 'cluster_domain'] = _REAL_CONF.kubernetes.cluster_domain new_config._merge(_REAL_CONF) # FIXME workaround to not deep merge 'sources' config for k, v in _REAL_CONF.sources._items(): new_config['sources'][k] = v _REAL_CONF = new_config
def test_make_topology_replicas_bigger_than_nodes(self): replicas = _yaml.AttrDict({"keystone": 2}) nodes = _yaml.AttrDict({"node1": {"roles": ["controller"]}}) self.assertRaises(RuntimeError, deploy._make_topology, nodes, self._roles, replicas)
def get_config_defaults(): defaults = _yaml.AttrDict({ 'debug': False, 'verbose_level': 1, 'log_file': None, }) for name in ['configs', 'nodes', 'roles', 'versions']: defaults[name] = _yaml.AttrDict() for module in CONFIG_MODULES: defaults._merge(module.DEFAULTS) return defaults
def test_make_topology_without_replicas_unused_role(self): nodes = _yaml.AttrDict({ "node1": { "roles": ["controller"] }, }) expected_topology = { "_ccp_jobs": ["node1"], "mysql": ["node1"], "keystone": ["node1"] } topology = deploy._make_topology(nodes, self._roles, _yaml.AttrDict()) self.assertDictEqual(expected_topology, topology)
def test_make_topology_failed(self): self.assertRaises(RuntimeError, deploy._make_topology, _yaml.AttrDict(), _yaml.AttrDict(), _yaml.AttrDict()) self.assertRaises(RuntimeError, deploy._make_topology, _yaml.AttrDict(), _yaml.AttrDict({"spam": "eggs"}), _yaml.AttrDict()) self.assertRaises(RuntimeError, deploy._make_topology, _yaml.AttrDict({"spam": "eggs"}), _yaml.AttrDict(), _yaml.AttrDict()) self.assertRaises( RuntimeError, deploy._make_topology, self.nested_dict_to_attrdict( {"node1": { "configs": "because-cows" }}), _yaml.AttrDict({"spam": "eggs"}), None)
def nested_dict_to_attrdict(d): if isinstance(d, dict): return _yaml.AttrDict({k: nested_dict_to_attrdict(v) for k, v in six.iteritems(d)}) elif isinstance(d, list): return list(map(nested_dict_to_attrdict, d)) else: return d
def test_dump(self): obj = _yaml.AttrDict() obj._merge(self.parsed) if str is bytes: stream = io.BytesIO() else: stream = io.StringIO() _yaml.dump(obj, stream) self.assertEqual(self.yaml, stream.getvalue())
def get_config_defaults(): defaults = _yaml.AttrDict({ 'debug': False, 'verbose_level': 1, 'log_file': None, 'default_log_levels': [ 'glanceclient=INFO', 'keystoneauth=INFO', 'neutronclient=INFO', 'novaclient=INFO', 'requests=WARN', 'stevedore=INFO', 'urllib3=WARN' ] }) for name in ['configs', 'nodes', 'roles', 'versions']: defaults[name] = _yaml.AttrDict() for module in CONFIG_MODULES: defaults._merge(module.DEFAULTS) return defaults
def test_make_topology_without_replicas_twice_used_role(self): nodes = _yaml.AttrDict({ "node1": { "roles": ["controller", "compute"] }, "node[2-3]": { "roles": ["compute"] } }) expected_topology = { "_ccp_jobs": ["node1", "node2", "node3"], "mysql": ["node1"], "keystone": ["node1"], "nova-compute": ["node1", "node2", "node3"], "libvirtd": ["node1", "node2", "node3"] } topology = deploy._make_topology(nodes, self._roles, _yaml.AttrDict()) self.assertDictEqual(expected_topology, topology)
def setUp(self): super(TestDeployMakeTopology, self).setUp() self.useFixture( fixtures.MockPatch("fuel_ccp.kubernetes.list_k8s_nodes")) node_list = ["node1", "node2", "node3"] self.useFixture( fixtures.MockPatch("fuel_ccp.kubernetes.get_object_names", return_value=node_list)) self._roles = _yaml.AttrDict({ "controller": ["mysql", "keystone"], "compute": ["nova-compute", "libvirtd"] })
def create_upgrade_jobs(component_name, upgrade_data, configmaps, topology, exports_ctx): from_version = upgrade_data['_meta']['from'] to_version = upgrade_data['_meta']['to'] component = upgrade_data['_meta']['component'] upgrade_def = component['upgrades']['default']['upgrade'] files = component['upgrades']['default'].get('files') prefix = '{}-{}-{}'.format(upgrade_def['name'], from_version, to_version) LOG.info("Scheduling component %s upgrade", component_name) for step in upgrade_def['steps']: if step.get('files'): step['files'] = {f: files[f] for f in step['files']} process_files(files, component['service_dir']) _create_files_configmap(prefix, files, exports_ctx['files_header']) container = { "name": prefix, "pre": [], "daemon": {}, "image": upgrade_def['image'], } service = { "name": prefix, "containers": [container], "exports_ctx": exports_ctx, } _create_meta_configmap(service) _create_service_configmap(prefix, _yaml.AttrDict()) workflows = {prefix: ""} jobs = container["pre"] last_deps = [] for step in upgrade_def['steps']: step_type = step.get('type', 'single') job_name = "{}-{}".format(prefix, step['name']) job = {"name": step['name'], "type": "single"} for key in ['files', 'volumes', 'topology_key']: if step.get(key): job[key] = step[key] jobs.append(job) workflow = { 'name': job_name, 'dependencies': last_deps, } last_deps = [job_name] if step_type == 'single': workflow['job'] = job = {} _fill_cmd(job, step) _push_files_to_workflow(workflow, step.get('files')) elif step_type == 'rolling-upgrade': services = step.get('services') if services is None: services = [s for s in upgrade_data if s != '_meta'] workflow['roll'] = roll = [] for service_name in services: roll.extend(upgrade_data[service_name]) elif step_type == 'kill-services': services = step.get('services') if services is None: services = [s for s in upgrade_data if s != '_meta'] workflow['kill'] = kill = [] for service_name in services: for object_dict in upgrade_data[service_name]: if object_dict['kind'] == 'Deployment': kill.append(object_dict) else: raise RuntimeError("Unsupported upgrade step type: %s" % step_type) workflows[step['name']] = \ json.dumps({'workflow': workflow}, sort_keys=True) _create_workflow(workflows, prefix) job_specs = _create_pre_jobs(service, container, component_name, topology) for job_spec in job_specs: kubernetes.process_object(job_spec) LOG.info("Upgrade of component %s successfuly scheduled", component_name)
def _make_topology(nodes, roles, replicas): failed = False if not deploy_validation.validate_nodes_section(nodes, CONF.configs): failed = True # TODO(sreshetniak): move it to validation if not roles: LOG.error("Roles section is not specified in configs") failed = True if failed: raise RuntimeError("Failed to create topology for services") # Replicas are optional, 1 replica will deployed by default replicas = replicas or _yaml.AttrDict() # TODO(sreshetniak): add validation k8s_nodes = kubernetes.list_k8s_nodes() k8s_node_names = kubernetes.get_object_names(k8s_nodes) def find_match(glob): matcher = re.compile(glob) nodes = [] for node in k8s_node_names: match = matcher.match(node) if match: nodes.append(node) return nodes roles_to_node = {} for node in sorted(nodes): matched_nodes = find_match(node) for role in nodes[node]["roles"] + [JOBS_ROLE]: roles_to_node.setdefault(role, []) roles_to_node[role].extend(matched_nodes) service_to_node = {} for role in sorted(roles): if role in roles_to_node: for svc in roles[role]: service_to_node.setdefault(svc, []) service_to_node[svc].extend(roles_to_node[role]) else: LOG.warning("Role '%s' defined, but unused", role) replicas = replicas._dict.copy() for svc, svc_hosts in six.iteritems(service_to_node): svc_replicas = replicas.pop(svc, None) if svc_replicas is None: continue svc_hosts_count = len(svc_hosts) if svc_replicas > svc_hosts_count: LOG.error( "Requested %s replicas for %s while only %s hosts able " "to run that service (%s)", svc_replicas, svc, svc_hosts_count, ", ".join(svc_hosts)) raise RuntimeError("Replicas doesn't match available hosts.") if replicas: LOG.error("Replicas defined for unspecified service(s): %s", ", ".join(replicas.keys())) raise RuntimeError("Replicas defined for unspecified service(s)") service_to_node[JOBS_ROLE] = roles_to_node[JOBS_ROLE] return {k: sorted(set(v)) for k, v in service_to_node.items()}
class TestAttrDict(testscenarios.WithScenarios, base.TestCase): scenarios = ( ('empty_dict', { 'init_dict': {}, 'value': {}, 'res': _yaml.AttrDict() }), ('one_value_dict', { 'init_dict': {}, 'value': { 'foo': True }, 'res': _yaml.AttrDict({'foo': True}) }), ('nested_dict', { 'init_dict': {}, 'value': { 'bar': { 'foo': True } }, 'res': _yaml.AttrDict({'bar': _yaml.AttrDict({'foo': True})}) }), ('nested_nested_dict', { 'init_dict': {}, 'value': { 'baz': { 'bar': { 'foo': True } } }, 'res': _yaml.AttrDict({ 'baz': _yaml.AttrDict({'bar': _yaml.AttrDict({'foo': True})}) }) }), ('merge_class', { 'init_dict': {}, 'value': _yaml.AttrDict({'foo': 'bar'}), 'res': _yaml.AttrDict({'foo': 'bar'}) }), ('merge_class_same_val', { 'init_dict': { 'foo': True }, 'value': _yaml.AttrDict({'foo': 'bar'}), 'res': _yaml.AttrDict({'foo': 'bar'}) }), ('merge_dict_same_val', { 'init_dict': { 'foo': True }, 'value': { 'foo': 'bar' }, 'res': _yaml.AttrDict({'foo': 'bar'}) }), ('merge_nested_multi', { 'init_dict': {}, 'value': { 'baz': { 'bar': { 'foo': True } }, 'boom': { 'cat': 'no' }, 'end': 'yes' }, 'res': _yaml.AttrDict({ 'baz': { 'bar': { 'foo': True } }, 'boom': { 'cat': 'no' }, 'end': 'yes' }) }), ('merge_dict_diff_val', { 'init_dict': { 'baz': True }, 'value': { 'foo': 'bar' }, 'res': _yaml.AttrDict({ 'baz': True, 'foo': 'bar' }) }), ('merge_dict_mixed_val', { 'init_dict': { 'baz': True, 'foo': False }, 'value': { 'foo': 'bar', 'cat': 'dog' }, 'res': _yaml.AttrDict({ 'baz': True, 'cat': 'dog', 'foo': 'bar' }) }), ) def test_merge(self): cls = _yaml.AttrDict(self.init_dict) cls._merge(self.value) self.assertEqual(self.res, cls)
def test_merge(self): cls = _yaml.AttrDict(self.init_dict) cls._merge(self.value) self.assertEqual(self.res, cls)
def test_json(self): source = _yaml.AttrDict({'a': 1, 'b': _yaml.AttrDict({'c': 2})}) res = source._json(sort_keys=True) self.assertEqual(res, '{"a": 1, "b": {"c": 2}}')