def setUp(self): super(PeriodicTestCase, self).setUp() ctx = context.make_admin_context() # Can be identical for all clusters. trust_attrs = { 'trustee_username': '******', 'trustee_password': '******', 'trust_id': '39d920ca-67c6-4047-b57a-01e9e16bb96f', } trust_attrs.update({'id': 1, 'stack_id': '11', 'status': cluster_status.CREATE_IN_PROGRESS}) cluster1 = utils.get_test_cluster(**trust_attrs) trust_attrs.update({'id': 2, 'stack_id': '22', 'status': cluster_status.DELETE_IN_PROGRESS}) cluster2 = utils.get_test_cluster(**trust_attrs) trust_attrs.update({'id': 3, 'stack_id': '33', 'status': cluster_status.UPDATE_IN_PROGRESS}) cluster3 = utils.get_test_cluster(**trust_attrs) trust_attrs.update({'id': 4, 'stack_id': '44', 'status': cluster_status.CREATE_COMPLETE}) cluster4 = utils.get_test_cluster(**trust_attrs) trust_attrs.update({'id': 5, 'stack_id': '55', 'status': cluster_status.ROLLBACK_IN_PROGRESS}) cluster5 = utils.get_test_cluster(**trust_attrs) self.cluster1 = objects.Cluster(ctx, **cluster1) self.cluster2 = objects.Cluster(ctx, **cluster2) self.cluster3 = objects.Cluster(ctx, **cluster3) self.cluster4 = objects.Cluster(ctx, **cluster4) self.cluster5 = objects.Cluster(ctx, **cluster5)
def setUp(self): super(PeriodicTestCase, self).setUp() ctx = context.make_admin_context() # Can be identical for all clusters. trust_attrs = { 'trustee_username': '******', 'trustee_password': '******', 'trust_id': '39d920ca-67c6-4047-b57a-01e9e16bb96f', } trust_attrs.update({ 'id': 1, 'stack_id': '11', 'status': cluster_status.CREATE_IN_PROGRESS }) cluster1 = utils.get_test_cluster(**trust_attrs) trust_attrs.update({ 'id': 2, 'stack_id': '22', 'status': cluster_status.DELETE_IN_PROGRESS }) cluster2 = utils.get_test_cluster(**trust_attrs) trust_attrs.update({ 'id': 3, 'stack_id': '33', 'status': cluster_status.UPDATE_IN_PROGRESS }) cluster3 = utils.get_test_cluster(**trust_attrs) trust_attrs.update({ 'id': 4, 'stack_id': '44', 'status': cluster_status.CREATE_COMPLETE }) cluster4 = utils.get_test_cluster(**trust_attrs) trust_attrs.update({ 'id': 5, 'stack_id': '55', 'status': cluster_status.ROLLBACK_IN_PROGRESS }) cluster5 = utils.get_test_cluster(**trust_attrs) self.cluster1 = objects.Cluster(ctx, **cluster1) self.cluster2 = objects.Cluster(ctx, **cluster2) self.cluster3 = objects.Cluster(ctx, **cluster3) self.cluster4 = objects.Cluster(ctx, **cluster4) self.cluster5 = objects.Cluster(ctx, **cluster5)
def cluster_post_data(**kw): kw.update({'for_api_use': True}) cluster = utils.get_test_cluster(**kw) cluster['create_timeout'] = kw.get('create_timeout', 15) cluster['merge_labels'] = kw.get('merge_labels', False) internal = cluster_controller.ClusterPatchType.internal_attrs() return remove_internal(cluster, internal)
def setUp(self): super(MonitorsTestCase, self).setUp() cluster = utils.get_test_cluster(node_addresses=['1.2.3.4'], api_address='https://5.6.7.8:2376', master_addresses=['10.0.0.6']) self.cluster = objects.Cluster(self.context, **cluster) self.monitor = swarm_monitor.SwarmMonitor(self.context, self.cluster) self.v2_monitor = swarm_v2_monitor.SwarmMonitor( self.context, self.cluster) self.k8s_monitor = k8s_monitor.K8sMonitor(self.context, self.cluster) self.mesos_monitor = mesos_monitor.MesosMonitor( self.context, self.cluster) p = mock.patch( 'magnum.drivers.swarm_fedora_atomic_v1.monitor.' 'SwarmMonitor.metrics_spec', new_callable=mock.PropertyMock) self.mock_metrics_spec = p.start() self.mock_metrics_spec.return_value = self.test_metrics_spec self.addCleanup(p.stop) p2 = mock.patch( 'magnum.drivers.swarm_fedora_atomic_v2.monitor.' 'SwarmMonitor.metrics_spec', new_callable=mock.PropertyMock) self.mock_metrics_spec_v2 = p2.start() self.mock_metrics_spec_v2.return_value = self.test_metrics_spec self.addCleanup(p2.stop)
def setUp(self): super(RPCAPITestCase, self).setUp() self.fake_cluster = dbutils.get_test_cluster(driver='fake-driver') self.fake_nodegroups = dbutils.get_nodegroups_for_cluster() self.fake_certificate = objects.Certificate.from_db_cluster( self.fake_cluster) self.fake_certificate.csr = 'fake-csr'
def bay_post_data(**kw): bay = utils.get_test_cluster(**kw) bay['baymodel_id'] = kw.get('baymodel_id', bay['cluster_template_id']) bay['bay_create_timeout'] = kw.get('bay_create_timeout', 15) del bay['cluster_template_id'] del bay['create_timeout'] internal = bay_controller.BayPatchType.internal_attrs() return remove_internal(bay, internal)
def setUp(self): super(TestHandler, self).setUp() self.handler = cluster_conductor.Handler() cluster_template_dict = utils.get_test_cluster_template() self.cluster_template = objects.ClusterTemplate( self.context, **cluster_template_dict) self.cluster_template.create() cluster_dict = utils.get_test_cluster(node_count=1) self.cluster = objects.Cluster(self.context, **cluster_dict) self.cluster.create()
def setUp(self): super(NeutronTest, self).setUp() cluster_dict = utils.get_test_cluster(node_count=1) nodegroups_dict = utils.get_nodegroups_for_cluster(node_count=1) self.cluster = objects.Cluster(self.context, **cluster_dict) self.nodegroups = [ objects.NodeGroup(self.context, **nodegroups_dict['master']), objects.NodeGroup(self.context, **nodegroups_dict['worker']) ]
def test_create(self, mock_openstack_client_class, mock_driver, mock_cm, mock_trust_manager, mock_heat_poller_class): timeout = 15 mock_poller = mock.MagicMock() mock_poller.poll_and_check.return_value = loopingcall.LoopingCallDone() mock_heat_poller_class.return_value = mock_poller osc = mock.sentinel.osc def return_keystone(): return self.keystone_client osc.keystone = return_keystone mock_openstack_client_class.return_value = osc mock_dr = mock.MagicMock() mock_driver.return_value = mock_dr def create_stack_side_effect(context, osc, cluster, timeout): return {'stack': {'id': 'stack-id'}} mock_dr.create_stack.side_effect = create_stack_side_effect # Just create a new cluster, since the one in setUp is already # created and the previous solution seems kind of hacky. cluster_dict = utils.get_test_cluster(node_count=1) cluster = objects.Cluster(self.context, **cluster_dict) node_count = 1 master_count = 1 del cluster_dict['id'] del cluster_dict['uuid'] cluster_obj = objects.Cluster(self.context, **cluster_dict) cluster = self.handler.cluster_create(self.context, cluster_obj, master_count, node_count, timeout) notifications = fake_notifier.NOTIFICATIONS self.assertEqual(1, len(notifications)) self.assertEqual( 'magnum.cluster.create', notifications[0].event_type) self.assertEqual( taxonomy.OUTCOME_PENDING, notifications[0].payload['outcome']) mock_dr.create_cluster.assert_called_once_with(self.context, cluster, timeout) mock_cm.generate_certificates_to_cluster.assert_called_once_with( cluster, context=self.context) self.assertEqual(cluster_status.CREATE_IN_PROGRESS, cluster.status) mock_trust_manager.create_trustee_and_trust.assert_called_once_with( osc, cluster) self.assertEqual(2, len(cluster.nodegroups)) self.assertEqual(node_count, cluster.node_count) self.assertEqual(master_count, cluster.master_count) self.assertEqual(node_count, cluster.default_ng_worker.node_count) self.assertEqual(master_count, cluster.default_ng_master.node_count)
def setUp(self): super(TestClusterObject, self).setUp() self.fake_cluster = utils.get_test_cluster() self.fake_cluster['trust_id'] = 'trust_id' self.fake_cluster['trustee_username'] = '******' self.fake_cluster['trustee_user_id'] = 'trustee_user_id' self.fake_cluster['trustee_password'] = '******' self.fake_cluster['coe_version'] = 'fake-coe-version' self.fake_cluster['container_version'] = 'fake-container-version' cluster_template_id = self.fake_cluster['cluster_template_id'] self.fake_cluster_template = objects.ClusterTemplate( uuid=cluster_template_id)
def test_create(self, mock_openstack_client_class, mock_driver, mock_cm, mock_trust_manager, mock_heat_poller_class): timeout = 15 mock_poller = mock.MagicMock() mock_poller.poll_and_check.return_value = loopingcall.LoopingCallDone() mock_heat_poller_class.return_value = mock_poller osc = mock.sentinel.osc def return_keystone(): return self.keystone_client osc.keystone = return_keystone mock_openstack_client_class.return_value = osc mock_dr = mock.MagicMock() mock_driver.return_value = mock_dr def create_stack_side_effect(context, osc, cluster, timeout): return {'stack': {'id': 'stack-id'}} mock_dr.create_stack.side_effect = create_stack_side_effect # Just create a new cluster, since the one in setUp is already # created and the previous solution seems kind of hacky. cluster_dict = utils.get_test_cluster(node_count=1) cluster = objects.Cluster(self.context, **cluster_dict) node_count = 1 master_count = 1 del cluster_dict['id'] del cluster_dict['uuid'] cluster_obj = objects.Cluster(self.context, **cluster_dict) cluster = self.handler.cluster_create(self.context, cluster_obj, master_count, node_count, timeout) notifications = fake_notifier.NOTIFICATIONS self.assertEqual(1, len(notifications)) self.assertEqual('magnum.cluster.create', notifications[0].event_type) self.assertEqual(taxonomy.OUTCOME_PENDING, notifications[0].payload['outcome']) mock_dr.create_cluster.assert_called_once_with(self.context, cluster, timeout) mock_cm.generate_certificates_to_cluster.assert_called_once_with( cluster, context=self.context) self.assertEqual(cluster_status.CREATE_IN_PROGRESS, cluster.status) mock_trust_manager.create_trustee_and_trust.assert_called_once_with( osc, cluster) self.assertEqual(2, len(cluster.nodegroups)) self.assertEqual(node_count, cluster.node_count) self.assertEqual(master_count, cluster.master_count) self.assertEqual(node_count, cluster.default_ng_worker.node_count) self.assertEqual(master_count, cluster.default_ng_master.node_count)
def setUp(self): super(TestClusterObject, self).setUp() self.fake_cluster = utils.get_test_cluster() self.fake_cluster['trust_id'] = 'trust_id' self.fake_cluster['trustee_username'] = '******' self.fake_cluster['trustee_user_id'] = 'trustee_user_id' self.fake_cluster['trustee_password'] = '******' self.fake_cluster['coe_version'] = 'fake-coe-version' self.fake_cluster['container_version'] = 'fake-container-version' cluster_template_id = self.fake_cluster['cluster_template_id'] self.fake_cluster_template = objects.ClusterTemplate( uuid=cluster_template_id) self.fake_cluster['keypair'] = 'keypair1'
def get_test_cluster(context, **kw): """Return a Cluster object with appropriate attributes. NOTE: The object leaves the attributes marked as changed, such that a create() could be used to commit it to the DB. """ db_cluster = db_utils.get_test_cluster(**kw) # Let DB generate ID if it isn't specified explicitly if 'id' not in kw: del db_cluster['id'] cluster = objects.Cluster(context) for key in db_cluster: setattr(cluster, key, db_cluster[key]) return cluster
def setUp(self): super(MonitorsTestCase, self).setUp() cluster = utils.get_test_cluster(node_addresses=['1.2.3.4'], api_address='https://5.6.7.8:2376', master_addresses=['10.0.0.6']) self.cluster = objects.Cluster(self.context, **cluster) self.monitor = swarm_monitor.SwarmMonitor(self.context, self.cluster) self.k8s_monitor = k8s_monitor.K8sMonitor(self.context, self.cluster) self.mesos_monitor = mesos_monitor.MesosMonitor(self.context, self.cluster) p = mock.patch('magnum.conductor.swarm_monitor.SwarmMonitor.' 'metrics_spec', new_callable=mock.PropertyMock) self.mock_metrics_spec = p.start() self.mock_metrics_spec.return_value = self.test_metrics_spec self.addCleanup(p.stop)
def setUp(self): super(TestClusterObject, self).setUp() self.fake_cluster = utils.get_test_cluster() self.fake_cluster['trust_id'] = 'trust_id' self.fake_cluster['trustee_username'] = '******' self.fake_cluster['trustee_user_id'] = 'trustee_user_id' self.fake_cluster['trustee_password'] = '******' self.fake_cluster['coe_version'] = 'fake-coe-version' self.fake_cluster['container_version'] = 'fake-container-version' cluster_template_id = self.fake_cluster['cluster_template_id'] self.fake_cluster_template = objects.ClusterTemplate( uuid=cluster_template_id) self.fake_cluster['keypair'] = 'keypair1' self.fake_cluster['docker_volume_size'] = 3 self.fake_cluster['labels'] = {} self.fake_cluster['health_status'] = 'HEALTHY' self.fake_cluster['health_status_reason'] = {}
def setUp(self): super(TestClusterObject, self).setUp() self.fake_cluster = utils.get_test_cluster() self.fake_nodegroups = utils.get_nodegroups_for_cluster() self.fake_cluster['trust_id'] = 'trust_id' self.fake_cluster['trustee_username'] = '******' self.fake_cluster['trustee_user_id'] = 'trustee_user_id' self.fake_cluster['trustee_password'] = '******' self.fake_cluster['coe_version'] = 'fake-coe-version' self.fake_cluster['container_version'] = 'fake-container-version' cluster_template_id = self.fake_cluster['cluster_template_id'] self.fake_cluster_template = objects.ClusterTemplate( uuid=cluster_template_id) self.fake_cluster['keypair'] = 'keypair1' self.fake_cluster['docker_volume_size'] = 3 self.fake_cluster['labels'] = {} self.fake_cluster['health_status'] = 'HEALTHY' self.fake_cluster['health_status_reason'] = {}
def test_get_nodegroup_list_with_filters(self): cluster_dict = utils.get_test_cluster(id=1, uuid=uuidutils.generate_uuid()) cluster = self.dbapi.create_cluster(cluster_dict) group1 = utils.create_test_nodegroup(name='group-one', cluster_id=cluster.uuid, flavor_id=1, uuid=uuidutils.generate_uuid(), node_count=1) group2 = utils.create_test_nodegroup(name='group-two', cluster_id=cluster.uuid, flavor_id=1, uuid=uuidutils.generate_uuid(), node_count=1) group3 = utils.create_test_nodegroup(name='group-four', cluster_id=cluster.uuid, flavor_id=2, uuid=uuidutils.generate_uuid(), node_count=3) filters = {'name': 'group-one'} res = self.dbapi.list_cluster_nodegroups(self.context, cluster.uuid, filters=filters) self.assertEqual([group1.id], [r.id for r in res]) filters = {'node_count': 1} res = self.dbapi.list_cluster_nodegroups(self.context, cluster.uuid, filters=filters) self.assertEqual([group1.id, group2.id], [r.id for r in res]) filters = {'flavor_id': 2, 'node_count': 3} res = self.dbapi.list_cluster_nodegroups(self.context, cluster.uuid, filters=filters) self.assertEqual([group3.id], [r.id for r in res]) filters = {'name': 'group-five'} res = self.dbapi.list_cluster_nodegroups(self.context, cluster.uuid, filters=filters) self.assertEqual([], [r.id for r in res])
def setUp(self): super(TestHandler, self).setUp() self.handler = cluster_conductor.Handler() cluster_template_dict = utils.get_test_cluster_template() self.cluster_template = objects.ClusterTemplate( self.context, **cluster_template_dict) self.cluster_template.create() self.cluster_dict = utils.get_test_cluster(node_count=1) self.nodegroups_dict = utils.get_nodegroups_for_cluster(node_count=1) del self.nodegroups_dict['master']['id'] del self.nodegroups_dict['worker']['id'] self.cluster = objects.Cluster(self.context, **self.cluster_dict) self.master_count = self.cluster.master_count self.node_count = self.cluster.node_count self.cluster.create() self.master = objects.NodeGroup(self.context, **self.nodegroups_dict['master']) self.worker = objects.NodeGroup(self.context, **self.nodegroups_dict['worker'])
def test_get_nodegroup_list_with_filters(self): cluster_dict = utils.get_test_cluster( id=1, uuid=uuidutils.generate_uuid()) cluster = self.dbapi.create_cluster(cluster_dict) group1 = utils.create_test_nodegroup( name='group-one', cluster_id=cluster.uuid, flavor_id=1, uuid=uuidutils.generate_uuid(), node_count=1) group2 = utils.create_test_nodegroup( name='group-two', cluster_id=cluster.uuid, flavor_id=1, uuid=uuidutils.generate_uuid(), node_count=1) group3 = utils.create_test_nodegroup( name='group-four', cluster_id=cluster.uuid, flavor_id=2, uuid=uuidutils.generate_uuid(), node_count=3) filters = {'name': 'group-one'} res = self.dbapi.list_cluster_nodegroups( self.context, cluster.uuid, filters=filters) self.assertEqual([group1.id], [r.id for r in res]) filters = {'node_count': 1} res = self.dbapi.list_cluster_nodegroups( self.context, cluster.uuid, filters=filters) self.assertEqual([group1.id, group2.id], [r.id for r in res]) filters = {'flavor_id': 2, 'node_count': 3} res = self.dbapi.list_cluster_nodegroups( self.context, cluster.uuid, filters=filters) self.assertEqual([group3.id], [r.id for r in res]) filters = {'name': 'group-five'} res = self.dbapi.list_cluster_nodegroups( self.context, cluster.uuid, filters=filters) self.assertEqual([], [r.id for r in res])
def setUp(self): super(TestHandler, self).setUp() self.handler = cluster_conductor.Handler() cluster_template_dict = utils.get_test_cluster_template() self.cluster_template = objects.ClusterTemplate( self.context, **cluster_template_dict) self.cluster_template.create() self.cluster_dict = utils.get_test_cluster(node_count=1) self.nodegroups_dict = utils.get_nodegroups_for_cluster( node_count=1) del self.nodegroups_dict['master']['id'] del self.nodegroups_dict['worker']['id'] self.cluster = objects.Cluster(self.context, **self.cluster_dict) self.master_count = self.cluster.master_count self.node_count = self.cluster.node_count self.cluster.create() self.master = objects.NodeGroup( self.context, **self.nodegroups_dict['master']) self.worker = objects.NodeGroup( self.context, **self.nodegroups_dict['worker'])
def setUp(self): super(MonitorsTestCase, self).setUp() self.requests_mock = self.useFixture(fixture.Fixture()) cluster = utils.get_test_cluster(node_addresses=['1.2.3.4'], api_address='https://5.6.7.8:2376', master_addresses=['10.0.0.6'], labels={}) self.cluster = objects.Cluster(self.context, **cluster) cluster_template = (utils.get_test_cluster_template( master_lb_enabled=False)) self.cluster.cluster_template = (objects.ClusterTemplate( self.context, **cluster_template)) nodegroups = utils.get_nodegroups_for_cluster( node_addresses=['1.2.3.4'], master_addresses=['10.0.0.6']) self.nodegroups = [ objects.NodeGroup(self.context, **nodegroups['master']), objects.NodeGroup(self.context, **nodegroups['worker']) ] self.monitor = swarm_monitor.SwarmMonitor(self.context, self.cluster) self.v2_monitor = swarm_v2_monitor.SwarmMonitor( self.context, self.cluster) self.k8s_monitor = k8s_monitor.K8sMonitor(self.context, self.cluster) self.mesos_monitor = mesos_monitor.MesosMonitor( self.context, self.cluster) p = mock.patch( 'magnum.drivers.swarm_fedora_atomic_v1.monitor.' 'SwarmMonitor.metrics_spec', new_callable=mock.PropertyMock) self.mock_metrics_spec = p.start() self.mock_metrics_spec.return_value = self.test_metrics_spec self.addCleanup(p.stop) p2 = mock.patch( 'magnum.drivers.swarm_fedora_atomic_v2.monitor.' 'SwarmMonitor.metrics_spec', new_callable=mock.PropertyMock) self.mock_metrics_spec_v2 = p2.start() self.mock_metrics_spec_v2.return_value = self.test_metrics_spec self.addCleanup(p2.stop)
def test_create_with_environment( self, mock_short_id, mock_openstack_client_class, mock_driver, mock_extract_tmpl_def, mock_cert_manager, mock_trust_manager, mock_get_template_contents, mock_process_mult, mock_heat_poller_class): timeout = 15 mock_poller = mock.MagicMock() mock_poller.poll_and_check.return_value = loopingcall.LoopingCallDone() mock_heat_poller_class.return_value = mock_poller mock_driver.return_value = k8s_atomic_dr.Driver() mock_short_id.return_value = 'short_id' mock_extract_tmpl_def.return_value = ('the/template/path.yaml', { 'heat_param_1': 'foo', 'heat_param_2': 'bar' }, ['env_file_1', 'env_file_2']) mock_get_template_contents.return_value = ({ 'tmpl_file_1': 'some content', 'tmpl_file_2': 'some more content' }, 'some template yaml') def do_mock_process_mult(env_paths=None, env_list_tracker=None): self.assertEqual(env_list_tracker, []) for f in env_paths: env_list_tracker.append('file:///' + f) env_map = {path: 'content of ' + path for path in env_list_tracker} return (env_map, None) mock_process_mult.side_effect = do_mock_process_mult mock_hc = mock.Mock() mock_hc.stacks.create.return_value = {'stack': {'id': 'stack-id'}} osc = mock.Mock() osc.heat.return_value = mock_hc mock_openstack_client_class.return_value = osc # NOTE(ttsiouts): self.cluster is already created so it's # a bad idea to use it and try to create it again... Instead # get a new object and use it. cluster_dict = utils.get_test_cluster( node_count=1, uuid='f6a99187-6f42-4fbb-aa6f-18407c0ee50e') del cluster_dict['id'] cluster = objects.Cluster(self.context, **cluster_dict) node_count = cluster.node_count master_count = cluster.master_count self.handler.cluster_create(self.context, cluster, master_count, node_count, timeout) mock_extract_tmpl_def.assert_called_once_with(self.context, cluster, nodegroups=None) mock_get_template_contents.assert_called_once_with( 'the/template/path.yaml') mock_process_mult.assert_called_once_with( env_paths=['the/template/env_file_1', 'the/template/env_file_2'], env_list_tracker=mock.ANY) mock_hc.stacks.create.assert_called_once_with( environment_files=[ 'file:///the/template/env_file_1', 'file:///the/template/env_file_2' ], files={ 'tmpl_file_1': 'some content', 'tmpl_file_2': 'some more content', 'file:///the/template/env_file_1': 'content of file:///the/template/env_file_1', 'file:///the/template/env_file_2': 'content of file:///the/template/env_file_2' }, parameters={ 'is_cluster_stack': True, 'heat_param_1': 'foo', 'heat_param_2': 'bar' }, stack_name=('%s-short_id' % cluster.name), template='some template yaml', timeout_mins=timeout) self.assertEqual(node_count, cluster.node_count) self.assertEqual(node_count, cluster.default_ng_worker.node_count) self.assertEqual(master_count, cluster.master_count) self.assertEqual(master_count, cluster.default_ng_master.node_count)
def cluster_post_data(**kw): cluster = utils.get_test_cluster(**kw) cluster['create_timeout'] = kw.get('create_timeout', 15) internal = cluster_controller.ClusterPatchType.internal_attrs() return remove_internal(cluster, internal)
def setUp(self): super(PeriodicTestCase, self).setUp() self.context = context.make_admin_context() # Can be identical for all clusters. trust_attrs = { 'trustee_username': '******', 'trustee_password': '******', 'trust_id': '39d920ca-67c6-4047-b57a-01e9e16bb96f', } uuid = uuidutils.generate_uuid() trust_attrs.update({'id': 1, 'stack_id': '11', 'uuid': uuid, 'status': cluster_status.CREATE_IN_PROGRESS, 'status_reason': 'no change'}) cluster1 = utils.get_test_cluster(**trust_attrs) ngs1 = utils.get_nodegroups_for_cluster() uuid = uuidutils.generate_uuid() trust_attrs.update({'id': 2, 'stack_id': '22', 'uuid': uuid, 'status': cluster_status.DELETE_IN_PROGRESS, 'status_reason': 'no change'}) cluster2 = utils.get_test_cluster(**trust_attrs) ngs2 = utils.get_nodegroups_for_cluster() uuid = uuidutils.generate_uuid() trust_attrs.update({'id': 3, 'stack_id': '33', 'uuid': uuid, 'status': cluster_status.UPDATE_IN_PROGRESS, 'status_reason': 'no change'}) cluster3 = utils.get_test_cluster(**trust_attrs) ngs3 = utils.get_nodegroups_for_cluster() uuid = uuidutils.generate_uuid() trust_attrs.update({'id': 4, 'stack_id': '44', 'uuid': uuid, 'status': cluster_status.DELETE_IN_PROGRESS, 'status_reason': 'no change'}) cluster4 = utils.get_test_cluster(**trust_attrs) ngs4 = utils.get_nodegroups_for_cluster() uuid = uuidutils.generate_uuid() trust_attrs.update({'id': 5, 'stack_id': '55', 'uuid': uuid, 'status': cluster_status.ROLLBACK_IN_PROGRESS, 'status_reason': 'no change'}) cluster5 = utils.get_test_cluster(**trust_attrs) ngs5 = utils.get_nodegroups_for_cluster() self.nodegroups1 = [ objects.NodeGroup(self.context, **ngs1['master']), objects.NodeGroup(self.context, **ngs1['worker']) ] self.nodegroups2 = [ objects.NodeGroup(self.context, **ngs2['master']), objects.NodeGroup(self.context, **ngs2['worker']) ] self.nodegroups3 = [ objects.NodeGroup(self.context, **ngs3['master']), objects.NodeGroup(self.context, **ngs3['worker']) ] self.nodegroups4 = [ objects.NodeGroup(self.context, **ngs4['master']), objects.NodeGroup(self.context, **ngs4['worker']) ] self.nodegroups5 = [ objects.NodeGroup(self.context, **ngs5['master']), objects.NodeGroup(self.context, **ngs5['worker']) ] self.cluster1 = objects.Cluster(self.context, **cluster1) self.cluster2 = objects.Cluster(self.context, **cluster2) self.cluster3 = objects.Cluster(self.context, **cluster3) self.cluster4 = objects.Cluster(self.context, **cluster4) self.cluster5 = objects.Cluster(self.context, **cluster5) # This is used to mock the get_cluster_nodegroups from magnum.db.api. # It's not the greatest way to do it, But we have to populate the # dictionary in the runtime (or have statically defined uuids per NG). global cluster_ngs cluster_ngs = { self.cluster1.uuid: self.nodegroups1, self.cluster2.uuid: self.nodegroups2, self.cluster3.uuid: self.nodegroups3, self.cluster4.uuid: self.nodegroups4, self.cluster5.uuid: self.nodegroups5 } # these tests are based on the basic behavior of our standard # Heat-based drivers, but drivers based on other orchestration # methods should generally behave in a similar fashion as far # as the actual calls go. It is up to the driver implementor # to ensure their implementation of update_cluster_status behaves # as expected regardless of how the periodic updater task works self.mock_heat_client = mock.MagicMock() self.stack1 = fake_stack( id='11', stack_status=cluster_status.CREATE_COMPLETE, stack_status_reason='fake_reason_11') self.stack2 = fake_stack( id='22', stack_status=cluster_status.DELETE_IN_PROGRESS, stack_status_reason='fake_reason_11') self.stack3 = fake_stack( id='33', stack_status=cluster_status.UPDATE_COMPLETE, stack_status_reason='fake_reason_33') self.stack5 = fake_stack( id='55', stack_status=cluster_status.ROLLBACK_COMPLETE, stack_status_reason='fake_reason_55') self.mock_heat_client.stacks.list.return_value = [ self.stack1, self.stack2, self.stack3, self.stack5] self.get_stacks = { '11': self.stack1, '22': self.stack2, '33': self.stack3, '55': self.stack5 } self.mock_driver = mock.MagicMock(spec=driver.Driver) def _mock_update_status(context, cluster): try: stack = self.get_stacks[cluster.stack_id] except KeyError: cluster.status_reason = "Stack %s not found" % cluster.stack_id if cluster.status == "DELETE_IN_PROGRESS": cluster.status = cluster_status.DELETE_COMPLETE else: cluster.status = cluster.status.replace("IN_PROGRESS", "FAILED") cluster.status = cluster.status.replace("COMPLETE", "FAILED") else: if cluster.status != stack.stack_status: cluster.status = stack.stack_status cluster.status_reason = stack.stack_status_reason self.mock_driver.update_cluster_status.side_effect = ( _mock_update_status)
def setUp(self): super(OctaviaTest, self).setUp() cluster_dict = utils.get_test_cluster(node_count=1) self.cluster = objects.Cluster(self.context, **cluster_dict)
def test_create_with_environment(self, mock_short_id, mock_openstack_client_class, mock_driver, mock_extract_tmpl_def, mock_cert_manager, mock_trust_manager, mock_get_template_contents, mock_process_mult, mock_heat_poller_class): timeout = 15 mock_poller = mock.MagicMock() mock_poller.poll_and_check.return_value = loopingcall.LoopingCallDone() mock_heat_poller_class.return_value = mock_poller mock_driver.return_value = k8s_atomic_dr.Driver() mock_short_id.return_value = 'short_id' mock_extract_tmpl_def.return_value = ( 'the/template/path.yaml', {'heat_param_1': 'foo', 'heat_param_2': 'bar'}, ['env_file_1', 'env_file_2']) mock_get_template_contents.return_value = ( {'tmpl_file_1': 'some content', 'tmpl_file_2': 'some more content'}, 'some template yaml') def do_mock_process_mult(env_paths=None, env_list_tracker=None): self.assertEqual(env_list_tracker, []) for f in env_paths: env_list_tracker.append('file:///' + f) env_map = {path: 'content of ' + path for path in env_list_tracker} return (env_map, None) mock_process_mult.side_effect = do_mock_process_mult mock_hc = mock.Mock() mock_hc.stacks.create.return_value = {'stack': {'id': 'stack-id'}} osc = mock.Mock() osc.heat.return_value = mock_hc mock_openstack_client_class.return_value = osc # NOTE(ttsiouts): self.cluster is already created so it's # a bad idea to use it and try to create it again... Instead # get a new object and use it. cluster_dict = utils.get_test_cluster( node_count=1, uuid='f6a99187-6f42-4fbb-aa6f-18407c0ee50e') del cluster_dict['id'] cluster = objects.Cluster(self.context, **cluster_dict) node_count = cluster.node_count master_count = cluster.master_count self.handler.cluster_create(self.context, cluster, master_count, node_count, timeout) mock_extract_tmpl_def.assert_called_once_with(self.context, cluster) mock_get_template_contents.assert_called_once_with( 'the/template/path.yaml') mock_process_mult.assert_called_once_with( env_paths=['the/template/env_file_1', 'the/template/env_file_2'], env_list_tracker=mock.ANY) mock_hc.stacks.create.assert_called_once_with( environment_files=['file:///the/template/env_file_1', 'file:///the/template/env_file_2'], files={ 'tmpl_file_1': 'some content', 'tmpl_file_2': 'some more content', 'file:///the/template/env_file_1': 'content of file:///the/template/env_file_1', 'file:///the/template/env_file_2': 'content of file:///the/template/env_file_2' }, parameters={'heat_param_1': 'foo', 'heat_param_2': 'bar'}, stack_name=('%s-short_id' % cluster.name), template='some template yaml', timeout_mins=timeout) self.assertEqual(node_count, cluster.node_count) self.assertEqual(node_count, cluster.default_ng_worker.node_count) self.assertEqual(master_count, cluster.master_count) self.assertEqual(master_count, cluster.default_ng_master.node_count)
def setUp(self): super(PeriodicTestCase, self).setUp() self.context = context.make_admin_context() # Can be identical for all clusters. trust_attrs = { 'trustee_username': '******', 'trustee_password': '******', 'trust_id': '39d920ca-67c6-4047-b57a-01e9e16bb96f', } trust_attrs.update({ 'id': 1, 'stack_id': '11', 'status': cluster_status.CREATE_IN_PROGRESS, 'status_reason': 'no change' }) cluster1 = utils.get_test_cluster(**trust_attrs) trust_attrs.update({ 'id': 2, 'stack_id': '22', 'status': cluster_status.DELETE_IN_PROGRESS, 'status_reason': 'no change' }) cluster2 = utils.get_test_cluster(**trust_attrs) trust_attrs.update({ 'id': 3, 'stack_id': '33', 'status': cluster_status.UPDATE_IN_PROGRESS, 'status_reason': 'no change' }) cluster3 = utils.get_test_cluster(**trust_attrs) trust_attrs.update({ 'id': 4, 'stack_id': '44', 'status': cluster_status.DELETE_IN_PROGRESS, 'status_reason': 'no change' }) cluster4 = utils.get_test_cluster(**trust_attrs) trust_attrs.update({ 'id': 5, 'stack_id': '55', 'status': cluster_status.ROLLBACK_IN_PROGRESS, 'status_reason': 'no change' }) cluster5 = utils.get_test_cluster(**trust_attrs) self.cluster1 = objects.Cluster(self.context, **cluster1) self.cluster2 = objects.Cluster(self.context, **cluster2) self.cluster3 = objects.Cluster(self.context, **cluster3) self.cluster4 = objects.Cluster(self.context, **cluster4) self.cluster5 = objects.Cluster(self.context, **cluster5) # these tests are based on the basic behavior of our standard # Heat-based drivers, but drivers based on other orchestration # methods should generally behave in a similar fashion as far # as the actual calls go. It is up to the driver implementor # to ensure their implementation of update_cluster_status behaves # as expected regardless of how the periodic updater task works self.mock_heat_client = mock.MagicMock() self.stack1 = fake_stack(id='11', stack_status=cluster_status.CREATE_COMPLETE, stack_status_reason='fake_reason_11') self.stack2 = fake_stack( id='22', stack_status=cluster_status.DELETE_IN_PROGRESS, stack_status_reason='fake_reason_11') self.stack3 = fake_stack(id='33', stack_status=cluster_status.UPDATE_COMPLETE, stack_status_reason='fake_reason_33') self.stack5 = fake_stack(id='55', stack_status=cluster_status.ROLLBACK_COMPLETE, stack_status_reason='fake_reason_55') self.mock_heat_client.stacks.list.return_value = [ self.stack1, self.stack2, self.stack3, self.stack5 ] self.get_stacks = { '11': self.stack1, '22': self.stack2, '33': self.stack3, '55': self.stack5 } self.mock_driver = mock.MagicMock(spec=driver.Driver) def _mock_update_status(context, cluster): try: stack = self.get_stacks[cluster.stack_id] except KeyError: cluster.status_reason = "Stack %s not found" % cluster.stack_id if cluster.status == "DELETE_IN_PROGRESS": cluster.status = cluster_status.DELETE_COMPLETE else: cluster.status = cluster.status.replace( "IN_PROGRESS", "FAILED") cluster.status = cluster.status.replace( "COMPLETE", "FAILED") else: if cluster.status != stack.stack_status: cluster.status = stack.stack_status cluster.status_reason = stack.stack_status_reason self.mock_driver.update_cluster_status.side_effect = ( _mock_update_status)
def setUp(self): super(PeriodicTestCase, self).setUp() self.context = context.make_admin_context() # Can be identical for all clusters. trust_attrs = { 'trustee_username': '******', 'trustee_password': '******', 'trust_id': '39d920ca-67c6-4047-b57a-01e9e16bb96f', } uuid = uuidutils.generate_uuid() trust_attrs.update({ 'id': 1, 'stack_id': '11', 'uuid': uuid, 'status': cluster_status.CREATE_IN_PROGRESS, 'status_reason': 'no change', 'keypair': 'keipair1', 'health_status': None }) cluster1 = utils.get_test_cluster(**trust_attrs) ngs1 = utils.get_nodegroups_for_cluster() uuid = uuidutils.generate_uuid() trust_attrs.update({ 'id': 2, 'stack_id': '22', 'uuid': uuid, 'status': cluster_status.DELETE_IN_PROGRESS, 'status_reason': 'no change', 'keypair': 'keipair1', 'health_status': None }) cluster2 = utils.get_test_cluster(**trust_attrs) ngs2 = utils.get_nodegroups_for_cluster() uuid = uuidutils.generate_uuid() trust_attrs.update({ 'id': 3, 'stack_id': '33', 'uuid': uuid, 'status': cluster_status.UPDATE_IN_PROGRESS, 'status_reason': 'no change', 'keypair': 'keipair1', 'health_status': None }) cluster3 = utils.get_test_cluster(**trust_attrs) ngs3 = utils.get_nodegroups_for_cluster() uuid = uuidutils.generate_uuid() trust_attrs.update({ 'id': 4, 'stack_id': '44', 'uuid': uuid, 'status': cluster_status.DELETE_IN_PROGRESS, 'status_reason': 'no change', 'keypair': 'keipair1', 'health_status': None }) cluster4 = utils.get_test_cluster(**trust_attrs) ngs4 = utils.get_nodegroups_for_cluster() uuid = uuidutils.generate_uuid() trust_attrs.update({ 'id': 5, 'stack_id': '55', 'uuid': uuid, 'status': cluster_status.ROLLBACK_IN_PROGRESS, 'status_reason': 'no change', 'keypair': 'keipair1', 'health_status': None }) cluster5 = utils.get_test_cluster(**trust_attrs) ngs5 = utils.get_nodegroups_for_cluster() self.nodegroups1 = [ objects.NodeGroup(self.context, **ngs1['master']), objects.NodeGroup(self.context, **ngs1['worker']) ] self.nodegroups2 = [ objects.NodeGroup(self.context, **ngs2['master']), objects.NodeGroup(self.context, **ngs2['worker']) ] self.nodegroups3 = [ objects.NodeGroup(self.context, **ngs3['master']), objects.NodeGroup(self.context, **ngs3['worker']) ] self.nodegroups4 = [ objects.NodeGroup(self.context, **ngs4['master']), objects.NodeGroup(self.context, **ngs4['worker']) ] self.nodegroups5 = [ objects.NodeGroup(self.context, **ngs5['master']), objects.NodeGroup(self.context, **ngs5['worker']) ] self.cluster1 = objects.Cluster(self.context, **cluster1) self.cluster2 = objects.Cluster(self.context, **cluster2) self.cluster3 = objects.Cluster(self.context, **cluster3) self.cluster4 = objects.Cluster(self.context, **cluster4) self.cluster5 = objects.Cluster(self.context, **cluster5) # This is used to mock the get_cluster_nodegroups from magnum.db.api. # It's not the greatest way to do it, But we have to populate the # dictionary in the runtime (or have statically defined uuids per NG). global cluster_ngs cluster_ngs = { self.cluster1.uuid: self.nodegroups1, self.cluster2.uuid: self.nodegroups2, self.cluster3.uuid: self.nodegroups3, self.cluster4.uuid: self.nodegroups4, self.cluster5.uuid: self.nodegroups5 } # these tests are based on the basic behavior of our standard # Heat-based drivers, but drivers based on other orchestration # methods should generally behave in a similar fashion as far # as the actual calls go. It is up to the driver implementor # to ensure their implementation of update_cluster_status behaves # as expected regardless of how the periodic updater task works self.mock_heat_client = mock.MagicMock() self.stack1 = fake_stack(id='11', stack_status=cluster_status.CREATE_COMPLETE, stack_status_reason='fake_reason_11') self.stack2 = fake_stack( id='22', stack_status=cluster_status.DELETE_IN_PROGRESS, stack_status_reason='fake_reason_11') self.stack3 = fake_stack(id='33', stack_status=cluster_status.UPDATE_COMPLETE, stack_status_reason='fake_reason_33') self.stack5 = fake_stack(id='55', stack_status=cluster_status.ROLLBACK_COMPLETE, stack_status_reason='fake_reason_55') self.mock_heat_client.stacks.list.return_value = [ self.stack1, self.stack2, self.stack3, self.stack5 ] self.get_stacks = { '11': self.stack1, '22': self.stack2, '33': self.stack3, '55': self.stack5 } self.mock_driver = mock.MagicMock(spec=driver.Driver) def _mock_update_status(context, cluster): try: stack = self.get_stacks[cluster.stack_id] except KeyError: cluster.status_reason = "Stack %s not found" % cluster.stack_id if cluster.status == "DELETE_IN_PROGRESS": cluster.status = cluster_status.DELETE_COMPLETE else: cluster.status = cluster.status.replace( "IN_PROGRESS", "FAILED") cluster.status = cluster.status.replace( "COMPLETE", "FAILED") else: if cluster.status != stack.stack_status: cluster.status = stack.stack_status cluster.status_reason = stack.stack_status_reason self.mock_driver.update_cluster_status.side_effect = ( _mock_update_status)
def setUp(self): super(PeriodicTestCase, self).setUp() self.context = context.make_admin_context() # Can be identical for all clusters. trust_attrs = { 'trustee_username': '******', 'trustee_password': '******', 'trust_id': '39d920ca-67c6-4047-b57a-01e9e16bb96f', } trust_attrs.update({'id': 1, 'stack_id': '11', 'status': cluster_status.CREATE_IN_PROGRESS, 'status_reason': 'no change'}) cluster1 = utils.get_test_cluster(**trust_attrs) trust_attrs.update({'id': 2, 'stack_id': '22', 'status': cluster_status.DELETE_IN_PROGRESS, 'status_reason': 'no change'}) cluster2 = utils.get_test_cluster(**trust_attrs) trust_attrs.update({'id': 3, 'stack_id': '33', 'status': cluster_status.UPDATE_IN_PROGRESS, 'status_reason': 'no change'}) cluster3 = utils.get_test_cluster(**trust_attrs) trust_attrs.update({'id': 4, 'stack_id': '44', 'status': cluster_status.DELETE_IN_PROGRESS, 'status_reason': 'no change'}) cluster4 = utils.get_test_cluster(**trust_attrs) trust_attrs.update({'id': 5, 'stack_id': '55', 'status': cluster_status.ROLLBACK_IN_PROGRESS, 'status_reason': 'no change'}) cluster5 = utils.get_test_cluster(**trust_attrs) self.cluster1 = objects.Cluster(self.context, **cluster1) self.cluster2 = objects.Cluster(self.context, **cluster2) self.cluster3 = objects.Cluster(self.context, **cluster3) self.cluster4 = objects.Cluster(self.context, **cluster4) self.cluster5 = objects.Cluster(self.context, **cluster5) # these tests are based on the basic behavior of our standard # Heat-based drivers, but drivers based on other orchestration # methods should generally behave in a similar fashion as far # as the actual calls go. It is up to the driver implementor # to ensure their implementation of update_cluster_status behaves # as expected regardless of how the periodic updater task works self.mock_heat_client = mock.MagicMock() self.stack1 = fake_stack( id='11', stack_status=cluster_status.CREATE_COMPLETE, stack_status_reason='fake_reason_11') self.stack2 = fake_stack( id='22', stack_status=cluster_status.DELETE_IN_PROGRESS, stack_status_reason='fake_reason_11') self.stack3 = fake_stack( id='33', stack_status=cluster_status.UPDATE_COMPLETE, stack_status_reason='fake_reason_33') self.stack5 = fake_stack( id='55', stack_status=cluster_status.ROLLBACK_COMPLETE, stack_status_reason='fake_reason_55') self.mock_heat_client.stacks.list.return_value = [ self.stack1, self.stack2, self.stack3, self.stack5] self.get_stacks = { '11': self.stack1, '22': self.stack2, '33': self.stack3, '55': self.stack5 } self.mock_driver = mock.MagicMock(spec=driver.Driver) def _mock_update_status(context, cluster): try: stack = self.get_stacks[cluster.stack_id] except KeyError: cluster.status_reason = "Stack %s not found" % cluster.stack_id if cluster.status == "DELETE_IN_PROGRESS": cluster.status = cluster_status.DELETE_COMPLETE else: cluster.status = cluster.status.replace("IN_PROGRESS", "FAILED") cluster.status = cluster.status.replace("COMPLETE", "FAILED") else: if cluster.status != stack.stack_status: cluster.status = stack.stack_status cluster.status_reason = stack.stack_status_reason self.mock_driver.update_cluster_status.side_effect = ( _mock_update_status)