def test_extract_template_definition_with_lb_neutron( self, mock_kc, mock_driver, mock_objects_nodegroup_list, mock_objects_cluster_template_get_by_uuid): self.cluster_template_dict['master_lb_enabled'] = True cluster_template = objects.ClusterTemplate( self.context, **self.cluster_template_dict) mock_objects_cluster_template_get_by_uuid.return_value = \ cluster_template cluster = objects.Cluster(self.context, **self.cluster_dict) worker_ng = objects.NodeGroup(self.context, **self.worker_ng_dict) master_ng = objects.NodeGroup(self.context, **self.master_ng_dict) mock_objects_nodegroup_list.return_value = [master_ng, worker_ng] mock_driver.return_value = mesos_dr.Driver() mock_kc.return_value.client.services.list.return_value = [] (template_path, definition, env_files) = mock_driver()._extract_template_definition( self.context, cluster) expected = { 'ssh_key_name': 'keypair_id', 'external_network': 'external_network_id', 'fixed_network': 'fixed_network', 'fixed_subnet': 'fixed_subnet', 'dns_nameserver': 'dns_nameserver', 'server_image': 'image_id', 'master_flavor': 'master_flavor_id', 'slave_flavor': 'flavor_id', 'number_of_slaves': 1, 'number_of_masters': 1, 'http_proxy': 'http_proxy', 'https_proxy': 'https_proxy', 'no_proxy': 'no_proxy', 'cluster_name': 'cluster1', 'trustee_domain_id': self.mock_keystone.trustee_domain_id, 'trustee_username': '******', 'trustee_password': '******', 'trustee_user_id': '7b489f04-b458-4541-8179-6a48a553e656', 'trust_id': '', 'volume_driver': 'volume_driver', 'auth_url': 'http://192.168.10.10:5000/v3', 'region_name': self.mock_osc.cinder_region_name.return_value, 'username': '******', 'tenant_name': 'admin', 'domain_name': 'domainname', 'rexray_preempt': 'False', 'mesos_slave_executor_env_variables': '{}', 'mesos_slave_isolation': 'docker/runtime,filesystem/linux', 'mesos_slave_work_dir': '/tmp/mesos/slave', 'mesos_slave_image_providers': 'docker', 'verify_ca': True, 'openstack_ca': '', 'nodes_affinity_policy': 'soft-anti-affinity' } self.assertEqual(expected, definition) self.assertEqual([ '../../common/templates/environments/no_private_network.yaml', '../../common/templates/environments/with_master_lb.yaml' ], env_files)
def setUp(self): super(MonitorsTestCase, self).setUp() cluster = utils.get_test_cluster(node_addresses=['1.2.3.4'], api_address='https://5.6.7.8:2376', master_addresses=['10.0.0.6']) self.cluster = objects.Cluster(self.context, **cluster) nodegroups = utils.get_nodegroups_for_cluster( node_addresses=['1.2.3.4'], master_addresses=['10.0.0.6']) self.nodegroups = [ objects.NodeGroup(self.context, **nodegroups['master']), objects.NodeGroup(self.context, **nodegroups['worker']) ] self.monitor = swarm_monitor.SwarmMonitor(self.context, self.cluster) self.v2_monitor = swarm_v2_monitor.SwarmMonitor(self.context, self.cluster) self.k8s_monitor = k8s_monitor.K8sMonitor(self.context, self.cluster) self.mesos_monitor = mesos_monitor.MesosMonitor(self.context, self.cluster) p = mock.patch('magnum.drivers.swarm_fedora_atomic_v1.monitor.' 'SwarmMonitor.metrics_spec', new_callable=mock.PropertyMock) self.mock_metrics_spec = p.start() self.mock_metrics_spec.return_value = self.test_metrics_spec self.addCleanup(p.stop) p2 = mock.patch('magnum.drivers.swarm_fedora_atomic_v2.monitor.' 'SwarmMonitor.metrics_spec', new_callable=mock.PropertyMock) self.mock_metrics_spec_v2 = p2.start() self.mock_metrics_spec_v2.return_value = self.test_metrics_spec self.addCleanup(p2.stop)
def setUp(self): super(NeutronTest, self).setUp() cluster_dict = utils.get_test_cluster(node_count=1) nodegroups_dict = utils.get_nodegroups_for_cluster(node_count=1) self.cluster = objects.Cluster(self.context, **cluster_dict) self.nodegroups = [ objects.NodeGroup(self.context, **nodegroups_dict['master']), objects.NodeGroup(self.context, **nodegroups_dict['worker']) ]
def post(self, cluster_id, nodegroup): """Create NodeGroup. :param nodegroup: a json document to create this NodeGroup. """ context = pecan.request.context policy.enforce(context, 'nodegroup:create', action='nodegroup:create') cluster = api_utils.get_resource('Cluster', cluster_id) cluster_ngs = [ng.name for ng in cluster.nodegroups] if nodegroup.name in cluster_ngs: raise exception.NodeGroupAlreadyExists(name=nodegroup.name, cluster_id=cluster.name) _validate_node_count(nodegroup) if nodegroup.role == "master": # Currently we don't support adding master nodegroups. # Keep this until we start supporting it. raise exception.CreateMasterNodeGroup() if nodegroup.image_id is None or nodegroup.image_id == wtypes.Unset: nodegroup.image_id = cluster.cluster_template.image_id if nodegroup.flavor_id is None or nodegroup.flavor_id == wtypes.Unset: nodegroup.flavor_id = cluster.flavor_id if nodegroup.labels is None or nodegroup.labels == wtypes.Unset: nodegroup.labels = cluster.labels nodegroup_dict = nodegroup.as_dict() nodegroup_dict['cluster_id'] = cluster.uuid nodegroup_dict['project_id'] = context.project_id new_obj = objects.NodeGroup(context, **nodegroup_dict) new_obj.uuid = uuid.uuid4() pecan.request.rpcapi.nodegroup_create_async(cluster, new_obj) return NodeGroup.convert(new_obj)
def test_create(self): with mock.patch.object(self.dbapi, 'create_nodegroup', autospec=True) as mock_create_nodegroup: mock_create_nodegroup.return_value = self.fake_nodegroup nodegroup = objects.NodeGroup(self.context, **self.fake_nodegroup) nodegroup.create() mock_create_nodegroup.assert_called_once_with(self.fake_nodegroup) self.assertEqual(self.context, nodegroup._context)
def setUp(self): super(TestHandler, self).setUp() self.handler = cluster_conductor.Handler() cluster_template_dict = utils.get_test_cluster_template() self.cluster_template = objects.ClusterTemplate( self.context, **cluster_template_dict) self.cluster_template.create() self.cluster_dict = utils.get_test_cluster(node_count=1) self.nodegroups_dict = utils.get_nodegroups_for_cluster(node_count=1) del self.nodegroups_dict['master']['id'] del self.nodegroups_dict['worker']['id'] self.cluster = objects.Cluster(self.context, **self.cluster_dict) self.master_count = self.cluster.master_count self.node_count = self.cluster.node_count self.cluster.create() self.master = objects.NodeGroup(self.context, **self.nodegroups_dict['master']) self.worker = objects.NodeGroup(self.context, **self.nodegroups_dict['worker'])
def post(self, cluster_id, nodegroup): """Create NodeGroup. :param nodegroup: a json document to create this NodeGroup. """ context = pecan.request.context policy.enforce(context, 'nodegroup:create', action='nodegroup:create') cluster = api_utils.get_resource('Cluster', cluster_id) # Before we start, we need to check that the cluster has an # api_address. If not, just fail. if 'api_address' not in cluster or not cluster.api_address: raise exception.ClusterAPIAddressUnavailable() cluster_ngs = [ng.name for ng in cluster.nodegroups] if nodegroup.name in cluster_ngs: raise exception.NodeGroupAlreadyExists(name=nodegroup.name, cluster_id=cluster.name) _validate_node_count(nodegroup) if nodegroup.role == "master": # Currently we don't support adding master nodegroups. # Keep this until we start supporting it. raise exception.CreateMasterNodeGroup() if nodegroup.image_id is None or nodegroup.image_id == wtypes.Unset: nodegroup.image_id = cluster.cluster_template.image_id if nodegroup.flavor_id is None or nodegroup.flavor_id == wtypes.Unset: nodegroup.flavor_id = cluster.flavor_id if nodegroup.labels is None or nodegroup.labels == wtypes.Unset: nodegroup.labels = cluster.labels else: # If labels are provided check if the user wishes to merge # them with the values from the cluster. if nodegroup.merge_labels: labels = cluster.labels labels.update(nodegroup.labels) nodegroup.labels = labels nodegroup_dict = nodegroup.as_dict() nodegroup_dict['cluster_id'] = cluster.uuid nodegroup_dict['project_id'] = context.project_id new_obj = objects.NodeGroup(context, **nodegroup_dict) new_obj.uuid = uuid.uuid4() pecan.request.rpcapi.nodegroup_create_async(cluster, new_obj) return NodeGroup.convert(new_obj)
def get_test_nodegroup(context, **kw): db_nodegroup = db_utils.get_test_nodegroup(**kw) nodegroup = objects.NodeGroup(context) for key in db_nodegroup: setattr(nodegroup, key, db_nodegroup[key]) return nodegroup
def test_extract_template_definition_multi_master( self, mock_kc, mock_driver, mock_objects_nodegroup_list, mock_objects_cluster_template_get_by_uuid, mock_get): self.cluster_template_dict['master_lb_enabled'] = True self.master_ng_dict['node_count'] = 2 cluster_template = objects.ClusterTemplate( self.context, **self.cluster_template_dict) mock_objects_cluster_template_get_by_uuid.return_value = \ cluster_template expected_result = str('{"action":"get","node":{"key":"test","value":' '"2","modifiedIndex":10,"createdIndex":10}}') mock_resp = mock.MagicMock() mock_resp.text = expected_result mock_get.return_value = mock_resp mock_driver.return_value = swarm_dr.Driver() cluster = objects.Cluster(self.context, **self.cluster_dict) worker_ng = objects.NodeGroup(self.context, **self.worker_ng_dict) master_ng = objects.NodeGroup(self.context, **self.master_ng_dict) mock_objects_nodegroup_list.return_value = [master_ng, worker_ng] mock_kc.return_value.client.services.list.return_value = [] (template_path, definition, env_files) = mock_driver()._extract_template_definition( self.context, cluster) expected = { 'ssh_key_name': 'keypair_id', 'external_network': 'external_network_id', 'fixed_network': 'fixed_network', 'fixed_subnet': 'fixed_subnet', 'dns_nameserver': 'dns_nameserver', 'server_image': 'image_id', 'master_flavor': 'master_flavor_id', 'node_flavor': 'flavor_id', 'number_of_masters': 2, 'number_of_nodes': 1, 'docker_volume_size': 20, 'docker_storage_driver': 'devicemapper', 'discovery_url': 'https://discovery.test.io/123456789', 'http_proxy': 'http_proxy', 'https_proxy': 'https_proxy', 'no_proxy': 'no_proxy', 'cluster_uuid': '5d12f6fd-a196-4bf0-ae4c-1f639a523a52', 'magnum_url': self.mock_osc.magnum_url.return_value, 'tls_disabled': False, 'registry_enabled': False, 'network_driver': 'network_driver', 'flannel_network_cidr': '10.101.0.0/16', 'flannel_network_subnetlen': '26', 'flannel_backend': 'vxlan', 'trustee_domain_id': self.mock_keystone.trustee_domain_id, 'trustee_username': '******', 'trustee_password': '******', 'trustee_user_id': '7b489f04-b458-4541-8179-6a48a553e656', 'trust_id': 'bd11efc5-d4e2-4dac-bbce-25e348ddf7de', 'auth_url': 'http://192.168.10.10:5000/v3', 'swarm_version': 'fake-version', 'swarm_strategy': u'spread', 'volume_driver': 'rexray', 'rexray_preempt': 'False', 'docker_volume_type': 'lvmdriver-1', 'verify_ca': True, 'openstack_ca': '', 'nodes_affinity_policy': 'soft-anti-affinity' } self.assertEqual(expected, definition) self.assertEqual([ '../../common/templates/environments/no_private_network.yaml', '../../common/templates/environments/with_volume.yaml', '../../common/templates/environments/with_master_lb.yaml' ], env_files)
def setUp(self): super(PeriodicTestCase, self).setUp() self.context = context.make_admin_context() # Can be identical for all clusters. trust_attrs = { 'trustee_username': '******', 'trustee_password': '******', 'trust_id': '39d920ca-67c6-4047-b57a-01e9e16bb96f', } uuid = uuidutils.generate_uuid() trust_attrs.update({ 'id': 1, 'stack_id': '11', 'uuid': uuid, 'status': cluster_status.CREATE_IN_PROGRESS, 'status_reason': 'no change', 'keypair': 'keipair1', 'health_status': None }) cluster1 = utils.get_test_cluster(**trust_attrs) ngs1 = utils.get_nodegroups_for_cluster() uuid = uuidutils.generate_uuid() trust_attrs.update({ 'id': 2, 'stack_id': '22', 'uuid': uuid, 'status': cluster_status.DELETE_IN_PROGRESS, 'status_reason': 'no change', 'keypair': 'keipair1', 'health_status': None }) cluster2 = utils.get_test_cluster(**trust_attrs) ngs2 = utils.get_nodegroups_for_cluster() uuid = uuidutils.generate_uuid() trust_attrs.update({ 'id': 3, 'stack_id': '33', 'uuid': uuid, 'status': cluster_status.UPDATE_IN_PROGRESS, 'status_reason': 'no change', 'keypair': 'keipair1', 'health_status': None }) cluster3 = utils.get_test_cluster(**trust_attrs) ngs3 = utils.get_nodegroups_for_cluster() uuid = uuidutils.generate_uuid() trust_attrs.update({ 'id': 4, 'stack_id': '44', 'uuid': uuid, 'status': cluster_status.DELETE_IN_PROGRESS, 'status_reason': 'no change', 'keypair': 'keipair1', 'health_status': None }) cluster4 = utils.get_test_cluster(**trust_attrs) ngs4 = utils.get_nodegroups_for_cluster() uuid = uuidutils.generate_uuid() trust_attrs.update({ 'id': 5, 'stack_id': '55', 'uuid': uuid, 'status': cluster_status.ROLLBACK_IN_PROGRESS, 'status_reason': 'no change', 'keypair': 'keipair1', 'health_status': None }) cluster5 = utils.get_test_cluster(**trust_attrs) ngs5 = utils.get_nodegroups_for_cluster() self.nodegroups1 = [ objects.NodeGroup(self.context, **ngs1['master']), objects.NodeGroup(self.context, **ngs1['worker']) ] self.nodegroups2 = [ objects.NodeGroup(self.context, **ngs2['master']), objects.NodeGroup(self.context, **ngs2['worker']) ] self.nodegroups3 = [ objects.NodeGroup(self.context, **ngs3['master']), objects.NodeGroup(self.context, **ngs3['worker']) ] self.nodegroups4 = [ objects.NodeGroup(self.context, **ngs4['master']), objects.NodeGroup(self.context, **ngs4['worker']) ] self.nodegroups5 = [ objects.NodeGroup(self.context, **ngs5['master']), objects.NodeGroup(self.context, **ngs5['worker']) ] self.cluster1 = objects.Cluster(self.context, **cluster1) self.cluster2 = objects.Cluster(self.context, **cluster2) self.cluster3 = objects.Cluster(self.context, **cluster3) self.cluster4 = objects.Cluster(self.context, **cluster4) self.cluster5 = objects.Cluster(self.context, **cluster5) # This is used to mock the get_cluster_nodegroups from magnum.db.api. # It's not the greatest way to do it, But we have to populate the # dictionary in the runtime (or have statically defined uuids per NG). global cluster_ngs cluster_ngs = { self.cluster1.uuid: self.nodegroups1, self.cluster2.uuid: self.nodegroups2, self.cluster3.uuid: self.nodegroups3, self.cluster4.uuid: self.nodegroups4, self.cluster5.uuid: self.nodegroups5 } # these tests are based on the basic behavior of our standard # Heat-based drivers, but drivers based on other orchestration # methods should generally behave in a similar fashion as far # as the actual calls go. It is up to the driver implementor # to ensure their implementation of update_cluster_status behaves # as expected regardless of how the periodic updater task works self.mock_heat_client = mock.MagicMock() self.stack1 = fake_stack(id='11', stack_status=cluster_status.CREATE_COMPLETE, stack_status_reason='fake_reason_11') self.stack2 = fake_stack( id='22', stack_status=cluster_status.DELETE_IN_PROGRESS, stack_status_reason='fake_reason_11') self.stack3 = fake_stack(id='33', stack_status=cluster_status.UPDATE_COMPLETE, stack_status_reason='fake_reason_33') self.stack5 = fake_stack(id='55', stack_status=cluster_status.ROLLBACK_COMPLETE, stack_status_reason='fake_reason_55') self.mock_heat_client.stacks.list.return_value = [ self.stack1, self.stack2, self.stack3, self.stack5 ] self.get_stacks = { '11': self.stack1, '22': self.stack2, '33': self.stack3, '55': self.stack5 } self.mock_driver = mock.MagicMock(spec=driver.Driver) def _mock_update_status(context, cluster): try: stack = self.get_stacks[cluster.stack_id] except KeyError: cluster.status_reason = "Stack %s not found" % cluster.stack_id if cluster.status == "DELETE_IN_PROGRESS": cluster.status = cluster_status.DELETE_COMPLETE else: cluster.status = cluster.status.replace( "IN_PROGRESS", "FAILED") cluster.status = cluster.status.replace( "COMPLETE", "FAILED") else: if cluster.status != stack.stack_status: cluster.status = stack.stack_status cluster.status_reason = stack.stack_status_reason self.mock_driver.update_cluster_status.side_effect = ( _mock_update_status)