def test_NIC_adds_by_agent(self): meta = self.env.default_metadata() self.env.set_interfaces_in_meta(meta, [ {'name': 'eth0', 'mac': '12345', 'current_speed': 1}]) node = self.env.create_node(api=True, meta=meta) meta['interfaces'].append({'name': 'new_nic', 'mac': '643'}) node_data = {'mac': node['mac'], 'is_agent': True, 'meta': meta} resp = self.app.put( reverse('NodeCollectionHandler'), json.dumps([node_data]), headers=self.default_headers) self.assertEquals(resp.status, 200) resp = self.app.get( reverse('NodeNICsHandler', kwargs={'node_id': node['id']}), headers=self.default_headers) self.assertEquals(resp.status, 200) response = json.loads(resp.body) self.assertEquals(len(response), len(meta['interfaces'])) for nic in meta['interfaces']: filtered_nics = filter( lambda i: i['mac'] == nic['mac'], response ) resp_nic = filtered_nics[0] self.assertEquals(resp_nic['mac'], nic['mac']) self.assertEquals(resp_nic['current_speed'], nic.get('current_speed')) self.assertEquals(resp_nic['max_speed'], nic.get('max_speed')) for conn in ('assigned_networks', 'allowed_networks'): self.assertEquals(resp_nic[conn], [])
def test_network_checking_fails_if_public_floating_not_on_one_nic(self): self.find_net_by_name('public')["vlan_start"] = 111 self.find_net_by_name('floating')["vlan_start"] = 111 self.update_nova_networks_success(self.cluster.id, self.nets) node_db = self.env.nodes[0] resp = self.app.get(reverse('NodeNICsHandler', kwargs={'node_id': node_db.id}), headers=self.default_headers) nics = json.loads(resp.body) for nic in nics: for net in nic['assigned_networks']: if net['name'] == 'fuelweb_admin': admin_nic = nic else: other_nic = nic if net['name'] == 'public': public = net other_nic['assigned_networks'].remove(public) admin_nic['assigned_networks'].append(public) resp = self.app.put(reverse('NodeNICsHandler', kwargs={'node_id': node_db.id}), json.dumps(nics), headers=self.default_headers) self.assertEquals(resp.status, 200) task = self.set_cluster_changes_w_error(self.cluster.id) self.assertIn( "Public and floating networks are not assigned to the " "same physical interface. These networks must be assigned " "to the same physical interface. Affected nodes:\nUntitled", task['message'] )
def test_node_timestamp_updated_only_by_agent(self): node = self.env.create_node(api=False) timestamp = node.timestamp resp = self.app.put( reverse('NodeCollectionHandler'), json.dumps([ {'mac': node.mac, 'status': 'discover', 'manufacturer': 'old'} ]), headers=self.default_headers) self.assertEquals(resp.status, 200) node = self.db.query(Node).get(node.id) self.assertEquals(node.timestamp, timestamp) resp = self.app.put( reverse('NodeCollectionHandler'), json.dumps([ {'mac': node.mac, 'status': 'discover', 'manufacturer': 'new', 'is_agent': True} ]), headers=self.default_headers) self.assertEquals(resp.status, 200) node = self.db.query(Node).get(node.id) self.assertNotEquals(node.timestamp, timestamp) self.assertEquals('new', node.manufacturer)
def test_cluster_node_list_update(self): node1 = self.env.create_node(api=False) node2 = self.env.create_node(api=False) cluster = self.env.create_cluster(api=False) resp = self.app.put( reverse('ClusterHandler', kwargs={'obj_id': cluster.id}), jsonutils.dumps({'nodes': [node1.id]}), headers=self.default_headers, expect_errors=True ) self.assertEqual(resp.status_code, 200) nodes = self.db.query(Node).filter(Node.cluster == cluster).all() self.assertEqual(1, len(nodes)) self.assertEqual(nodes[0].id, node1.id) resp = self.app.put( reverse('ClusterHandler', kwargs={'obj_id': cluster.id}), jsonutils.dumps({'nodes': [node2.id]}), headers=self.default_headers ) self.assertEqual(resp.status_code, 200) nodes = self.db.query(Node).filter(Node.cluster == cluster) self.assertEqual(1, nodes.count())
def test_discovered_node_unified_name(self): node_mac = self.env.generate_random_mac() def node_name_test(mac): self.env.create_node( api=True, **{'mac': mac} ) node = json.loads( self.app.get(reverse('NodeCollectionHandler')).body )[0] self.assertEqual(node['name'], 'Untitled ({0})'.format(node_mac[-5:])) node_name_test(node_mac.upper()) node_id = json.loads( self.app.get(reverse('NodeCollectionHandler')).body )[0]['id'] self.app.delete( reverse('NodeHandler', {'obj_id': node_id}) ) node_name_test(node_mac.lower())
def test_network_verify_if_old_task_is_running(self): resp = self.app.get( reverse( 'NovaNetworkConfigurationHandler', kwargs={'cluster_id': self.env.clusters[0].id} ), headers=self.default_headers ) nets = resp.body self.env.create_task( name="verify_networks", status=consts.TASK_STATUSES.running, cluster_id=self.env.clusters[0].id ) resp = self.app.put( reverse( 'NovaNetworkConfigurationVerifyHandler', kwargs={'cluster_id': self.env.clusters[0].id}), nets, headers=self.default_headers, expect_errors=True ) self.assertEqual(400, resp.status_code)
def test_get_handler_with_invalid_data(self): meta = self.env.default_metadata() meta["interfaces"] = [] node = self.env.create_node(api=True, meta=meta) meta_list = [ {'interfaces': None}, {'interfaces': {}} ] for nic_meta in meta_list: meta = self.env.default_metadata() meta.update(nic_meta) node_data = {'mac': node['mac'], 'is_agent': True, 'meta': meta} resp = self.app.put( reverse('NodeCollectionHandler'), json.dumps([node_data]), expect_errors=True, headers=self.default_headers ) self.assertEquals(resp.status, 400) resp = self.app.get( reverse('NodeNICsHandler', kwargs={'node_id': node['id']}), headers=self.default_headers ) self.assertEquals(resp.status, 200) response = json.loads(resp.body) self.assertEquals(response, [])
def test_network_verify_if_old_task_is_running(self, macs_mock): macs_mock.return_value = self.master_macs resp = self.app.get( reverse( 'NovaNetworkConfigurationHandler', kwargs={'cluster_id': self.env.clusters[0].id} ), headers=self.default_headers ) nets = resp.body self.env.create_task( name="verify_networks", status="running", cluster_id=self.env.clusters[0].id ) resp = self.app.put( reverse( 'NovaNetworkConfigurationVerifyHandler', kwargs={'cluster_id': self.env.clusters[0].id}), nets, headers=self.default_headers, expect_errors=True ) self.assertEquals(400, resp.status)
def test_put_handler_with_one_node(self): cluster = self.env.create_cluster(api=True) mac = '123' meta = {'interfaces': [ {'name': 'eth0', 'mac': mac}, {'name': 'eth1', 'mac': '654'}, ]} node = self.env.create_node(api=True, meta=meta, mac=mac, cluster_id=cluster['id']) resp = self.app.get( reverse('NodeNICsHandler', kwargs={'node_id': node['id']}), headers=self.default_headers) self.assertEquals(resp.status, 200) response = json.loads(resp.body) a_nets = filter(lambda nic: nic['mac'] == mac, response)[0]['assigned_networks'] for resp_nic in response: if resp_nic['mac'] == mac: resp_nic['assigned_networks'] = [] else: resp_nic['assigned_networks'] = a_nets node_json = {'id': node['id'], 'interfaces': response} resp = self.app.put( reverse('NodeCollectionNICsHandler'), json.dumps([node_json]), headers=self.default_headers) self.assertEquals(resp.status, 200) new_response = json.loads(resp.body) self.assertEquals(new_response, [node_json])
def test_NIC_adds_by_agent(self): meta = self.env.default_metadata() self.env.set_interfaces_in_meta(meta, [ {'name': 'eth0', 'mac': '12345', 'current_speed': 1, 'state': 'up'}]) node = self.env.create_node(api=True, meta=meta) meta['interfaces'].append({'name': 'new_nic', 'mac': '643'}) node_data = {'mac': node['mac'], 'meta': meta} resp = self.app.put( reverse('NodeAgentHandler'), jsonutils.dumps(node_data), headers=self.default_headers) self.assertEqual(resp.status_code, 200) resp = self.app.get( reverse('NodeNICsHandler', kwargs={'node_id': node['id']}), headers=self.default_headers) self.assertEqual(resp.status_code, 200) self.assertEqual(len(resp.json_body), len(meta['interfaces'])) for nic in meta['interfaces']: filtered_nics = filter( lambda i: i['mac'] == nic['mac'], resp.json_body ) resp_nic = filtered_nics[0] self.assertEqual(resp_nic['mac'], nic['mac']) self.assertEqual(resp_nic['current_speed'], nic.get('current_speed')) self.assertEqual(resp_nic['max_speed'], nic.get('max_speed')) self.assertEqual(resp_nic['state'], nic.get('state')) for conn in ('assigned_networks', ): self.assertEqual(resp_nic[conn], [])
def test_attributes_update(self): cluster_id = self.env.create_cluster(api=True)["id"] resp = self.app.get( reverse("ClusterAttributesHandler", kwargs={"cluster_id": cluster_id}), headers=self.default_headers ) self.assertEquals(200, resp.status) resp = self.app.put( reverse("ClusterAttributesHandler", kwargs={"cluster_id": cluster_id}), params=json.dumps({"editable": {"foo": "bar"}}), headers=self.default_headers, ) self.assertEquals(200, resp.status) attrs = self.db.query(Attributes).filter(Attributes.cluster_id == cluster_id).first() self.assertEquals("bar", attrs.editable["foo"]) # 400 on generated update resp = self.app.put( reverse("ClusterAttributesHandler", kwargs={"cluster_id": cluster_id}), params=json.dumps({"generated": {"foo": "bar"}}), headers=self.default_headers, expect_errors=True, ) self.assertEquals(400, resp.status) # 400 if editable is not dict resp = self.app.put( reverse("ClusterAttributesHandler", kwargs={"cluster_id": cluster_id}), params=json.dumps({"editable": ["foo", "bar"]}), headers=self.default_headers, expect_errors=True, ) self.assertEquals(400, resp.status)
def test_node_agent_api(self): self.env.create_node( api=False, status='provisioning', meta=self.env.default_metadata() ) node_db = self.env.nodes[0] resp = self.app.put( reverse('NodeAgentHandler'), jsonutils.dumps( {'mac': node_db.mac, 'status': 'discover', 'manufacturer': 'new'} ), headers=self.default_headers ) self.assertEqual(resp.status_code, 200) node_id = '080000000003' resp = self.app.post( reverse('NodeCollectionHandler'), jsonutils.dumps({'id': node_id, 'mac': self.env.generate_random_mac(), 'status': 'discover'}), headers=self.default_headers) self.assertEqual(201, resp.status_code)
def test_put_handler_with_one_node(self): cluster = self.env.create_cluster(api=True) mac = self.env.generate_random_mac() meta = {} self.env.set_interfaces_in_meta(meta, [ {'name': 'eth0', 'mac': mac}, {'name': 'eth1', 'mac': self.env.generate_random_mac()}]) node = self.env.create_node(api=True, meta=meta, mac=mac, cluster_id=cluster['id']) resp = self.app.get( reverse('NodeNICsHandler', kwargs={'node_id': node['id']}), headers=self.default_headers) self.assertEquals(resp.status_code, 200) response = json.loads(resp.body) a_nets = filter(lambda nic: nic['mac'] == mac, response)[0]['assigned_networks'] for resp_nic in response: if resp_nic['mac'] == mac: resp_nic['assigned_networks'] = [] else: resp_nic['assigned_networks'].extend(a_nets) resp_nic['assigned_networks'].sort() nodes_list = [{'id': node['id'], 'interfaces': response}] resp = self.app.put( reverse('NodeCollectionNICsHandler'), json.dumps(nodes_list), headers=self.default_headers) self.assertEquals(resp.status_code, 200) new_response = json.loads(resp.body) self.assertEquals(new_response, nodes_list)
def test_network_assignment_when_node_added(self): cluster = self.env.create_cluster(api=True) mac = self.env.generate_random_mac() meta = self.env.default_metadata() self.env.set_interfaces_in_meta( meta, [{'name': 'eth0', 'mac': mac}, {'name': 'eth1', 'mac': self.env.generate_random_mac()}]) node = self.env.create_node(api=True, meta=meta, mac=mac) resp = self.app.put( reverse('NodeCollectionHandler'), jsonutils.dumps([{'id': node['id'], 'cluster_id': cluster['id']}]), headers=self.default_headers ) self.assertEqual(resp.status_code, 200) resp = self.app.get( reverse('NodeNICsHandler', kwargs={'node_id': node['id']}), headers=self.default_headers) self.assertEqual(resp.status_code, 200) response = resp.json_body for resp_nic in response: net_names = [net['name'] for net in resp_nic['assigned_networks']] if resp_nic['mac'] == mac: self.assertTrue("fuelweb_admin" in net_names) else: self.assertTrue("public" in net_names) self.assertGreater(len(resp_nic['assigned_networks']), 0)
def test_network_changing_adds_pending_changes(self): cluster = self.env.create_cluster(api=True) cluster_db = self.env.clusters[0] objects.Cluster.clear_pending_changes(cluster_db) all_changes = self.db.query(ClusterChanges).all() self.assertEqual(len(all_changes), 0) resp = self.app.get( reverse( 'NovaNetworkConfigurationHandler', kwargs={'cluster_id': cluster['id']}), headers=self.default_headers ) net_id = resp.json_body['networks'][0]["id"] resp = self.app.put( reverse( 'NovaNetworkConfigurationHandler', kwargs={'cluster_id': cluster['id']}), jsonutils.dumps({'networks': [{ "id": net_id, "access": "restricted"} ]}), headers=self.default_headers ) pending_changes = self.db.query(ClusterChanges).filter_by( name="networks" ).all() self.assertEqual(len(pending_changes), 1)
def test_forced_task_deletion(self): self.env.create( nodes_kwargs=[ {"roles": ["controller"]} ] ) task = Task( name='deployment', cluster=self.env.clusters[0], status='running', progress=10 ) self.db.add(task) self.db.commit() resp = self.app.delete( reverse( 'TaskHandler', kwargs={'obj_id': task.id} ) + "?force=0", headers=self.default_headers, expect_errors=True ) self.assertEqual(resp.status, 400) resp = self.app.delete( reverse( 'TaskHandler', kwargs={'obj_id': task.id} ) + "?force=1", headers=self.default_headers ) self.assertEquals(resp.status, 204)
def test_attributes_update_patch(self): cluster_id = self.env.create_cluster(api=True)['id'] cluster_db = self.env.clusters[0] resp = self.app.get( reverse( 'ClusterAttributesHandler', kwargs={'cluster_id': cluster_id}), headers=self.default_headers ) self.assertEquals(200, resp.status_code) resp = self.app.patch( reverse( 'ClusterAttributesHandler', kwargs={'cluster_id': cluster_id}), params=json.dumps({ 'editable': { "foo": "bar" }, }), headers=self.default_headers ) self.assertEquals(200, resp.status_code) attrs = objects.Cluster.get_attributes(cluster_db) self.assertEquals("bar", attrs.editable["foo"]) attrs.editable.pop('foo') self.assertNotEqual(attrs.editable, {})
def check_info_handler(self, handler_name, get_info): # updating provisioning info orchestrator_data = {"field": "test"} put_resp = self.app.put( reverse(handler_name, kwargs={'cluster_id': self.cluster.id}), json.dumps(orchestrator_data), headers=self.default_headers) self.assertEquals(put_resp.status, 200) self.assertEquals(get_info(), orchestrator_data) # getting provisioning info get_resp = self.app.get( reverse(handler_name, kwargs={'cluster_id': self.cluster.id}), headers=self.default_headers) self.assertEquals(get_resp.status, 200) self.datadiff(orchestrator_data, json.loads(get_resp.body)) # deleting provisioning info delete_resp = self.app.delete( reverse(handler_name, kwargs={'cluster_id': self.cluster.id}), headers=self.default_headers) self.assertEquals(delete_resp.status, 202) self.assertEqual(get_info(), {})
def test_network_assignment_when_node_added(self): cluster = self.env.create_cluster(api=True) mac = self.env.generate_random_mac() meta = self.env.default_metadata() self.env.set_interfaces_in_meta( meta, [{"name": "eth0", "mac": mac}, {"name": "eth1", "mac": self.env.generate_random_mac()}] ) node = self.env.create_node(api=True, meta=meta, mac=mac) resp = self.app.put( reverse("NodeCollectionHandler"), jsonutils.dumps([{"id": node["id"], "cluster_id": cluster["id"]}]), headers=self.default_headers, ) self.assertEqual(resp.status_code, 200) resp = self.app.get(reverse("NodeNICsHandler", kwargs={"node_id": node["id"]}), headers=self.default_headers) self.assertEqual(resp.status_code, 200) response = jsonutils.loads(resp.body) for resp_nic in response: net_names = [net["name"] for net in resp_nic["assigned_networks"]] if resp_nic["mac"] == mac: self.assertTrue("fuelweb_admin" in net_names) else: self.assertTrue("public" in net_names) self.assertGreater(len(resp_nic["assigned_networks"]), 0)
def test_roles_add_duplicated_to_db_directly(self): self.env.create_release() resp = self.app.get( reverse('ReleaseCollectionHandler'), headers=self.default_headers ) release_json = json.loads(resp.body)[0] old_roles = list(release_json["roles"]) role = Role(name=old_roles[0], release_id=release_json["id"]) added = True try: db().add(role) db().commit() except IntegrityError: db.rollback() added = False self.assertFalse(added) resp = self.app.get( reverse('ReleaseCollectionHandler'), headers=self.default_headers ) release_json = json.loads(resp.body)[0] new_roles = list(release_json["roles"]) self.assertEqual(old_roles, new_roles)
def test_NIC_updates_by_agent(self): meta = self.env.default_metadata() self.env.set_interfaces_in_meta(meta, [ {'name': 'eth0', 'mac': '12345', 'current_speed': 1}]) node = self.env.create_node(api=True, meta=meta) new_meta = self.env.default_metadata() self.env.set_interfaces_in_meta(new_meta, [ {'name': 'new_nic', 'mac': '12345', 'current_speed': 10, 'max_speed': 10}]) node_data = {'mac': node['mac'], 'is_agent': True, 'meta': new_meta} resp = self.app.put( reverse('NodeCollectionHandler'), json.dumps([node_data]), headers=self.default_headers) self.assertEquals(resp.status, 200) resp = self.app.get( reverse('NodeNICsHandler', kwargs={'node_id': node['id']}), headers=self.default_headers) self.assertEquals(resp.status, 200) response = json.loads(resp.body) self.assertEquals(len(response), 1) resp_nic = response[0] nic = new_meta['interfaces'][0] self.assertEquals(resp_nic['mac'], nic['mac']) self.assertEquals(resp_nic['current_speed'], nic['current_speed']) self.assertEquals(resp_nic['max_speed'], nic['max_speed']) for conn in ('assigned_networks', 'allowed_networks'): self.assertEquals(resp_nic[conn], [])
def test_deletion_during_deployment(self): self.env.create(cluster_kwargs={}, nodes_kwargs=[{"status": "ready", "pending_addition": True}]) cluster_id = self.env.clusters[0].id resp = self.app.put( reverse("ClusterChangesHandler", kwargs={"cluster_id": cluster_id}), headers=self.default_headers ) deploy_uuid = json.loads(resp.body)["uuid"] resp = self.app.delete( reverse("ClusterHandler", kwargs={"cluster_id": cluster_id}), headers=self.default_headers ) timeout = 120 timer = time.time() while True: task_deploy = self.db.query(Task).filter_by(uuid=deploy_uuid).first() task_delete = self.db.query(Task).filter_by(cluster_id=cluster_id, name="cluster_deletion").first() if not task_delete: break self.db.expire(task_deploy) self.db.expire(task_delete) if (time.time() - timer) > timeout: break time.sleep(0.24) cluster_db = self.db.query(Cluster).get(cluster_id) self.assertIsNone(cluster_db)
def test_stats_sending_enabled(self): self.assertEqual(StatsSender().must_send_stats(), False) resp = self.app.get( reverse("MasterNodeSettingsHandler"), headers=self.default_headers) self.assertEqual(200, resp.status_code) data = resp.json_body # emulate user confirmed settings in UI data["settings"]["statistics"]["user_choice_saved"]["value"] = True resp = self.app.put( reverse("MasterNodeSettingsHandler"), headers=self.default_headers, params=jsonutils.dumps(data) ) self.assertEqual(200, resp.status_code) self.assertEqual(StatsSender().must_send_stats(), True) # emulate user disabled statistics sending data["settings"]["statistics"]["send_anonymous_statistic"]["value"] = \ False resp = self.app.put( reverse("MasterNodeSettingsHandler"), headers=self.default_headers, params=jsonutils.dumps(data) ) self.assertEqual(200, resp.status_code) self.assertEqual(StatsSender().must_send_stats(), False)
def test_partial_user_contacts_info(self): resp = self.app.get( reverse("MasterNodeSettingsHandler"), headers=self.default_headers) self.assertEqual(200, resp.status_code) data = resp.json_body # emulate user enabled contact info sending to support team data["settings"]["statistics"]["user_choice_saved"]["value"] = True data["settings"]["statistics"]["send_user_info"]["value"] = \ True name = "user" email = "*****@*****.**" data["settings"]["statistics"]["name"]["value"] = name data["settings"]["statistics"]["email"]["value"] = email resp = self.app.put( reverse("MasterNodeSettingsHandler"), headers=self.default_headers, params=jsonutils.dumps(data) ) self.assertEqual(200, resp.status_code) self.assertDictEqual( InstallationInfo().get_installation_info()['user_information'], { 'contact_info_provided': True, 'name': name, 'email': email, 'company': '' } )
def test_attributes_set_defaults(self): cluster = self.env.create_cluster(api=True) cluster_db = self.env.clusters[0] # Change editable attributes. resp = self.app.put( reverse( 'ClusterAttributesHandler', kwargs={'cluster_id': cluster['id']}), params=json.dumps({ 'editable': { "foo": "bar" }, }), headers=self.default_headers, expect_errors=True ) self.assertEquals(200, resp.status_code) attrs = objects.Cluster.get_attributes(cluster_db) self.assertEquals("bar", attrs.editable["foo"]) # Set attributes to defaults. resp = self.app.put( reverse( 'ClusterAttributesDefaultsHandler', kwargs={'cluster_id': cluster['id']}), headers=self.default_headers ) self.assertEquals(200, resp.status_code) release = self.db.query(Release).get( cluster['release_id'] ) self.assertEquals( json.loads(resp.body)['editable'], release.attributes_metadata['editable'] )
def test_get_handler_with_incompleted_iface_data(self): meta = self.env.default_metadata() meta["interfaces"] = [] node = self.env.create_node(api=True, meta=meta) meta_clean_list = [ {'interfaces': [{'name': '', 'mac': '00:00:00'}]}, {'interfaces': [{'name': 'eth0', 'mac': ''}]}, {'interfaces': [{'mac': '00:00:00'}]}, {'interfaces': [{'name': 'eth0'}]} ] for nic_meta in meta_clean_list: meta = self.env.default_metadata() meta.update(nic_meta) node_data = {'mac': node['mac'], 'meta': meta} resp = self.app.put( reverse('NodeAgentHandler'), jsonutils.dumps(node_data), expect_errors=True, headers=self.default_headers ) self.assertEqual(resp.status_code, 200) resp = self.app.get( reverse('NodeNICsHandler', kwargs={'node_id': node['id']}), headers=self.default_headers ) self.assertEqual(resp.json_body, [])
def test_NIC_updates_by_agent(self): meta = self.env.default_metadata() self.env.set_interfaces_in_meta(meta, [ {'name': 'eth0', 'mac': '12345', 'current_speed': 1, 'state': 'up'}]) node = self.env.create_node(api=True, meta=meta) new_meta = self.env.default_metadata() self.env.set_interfaces_in_meta(new_meta, [ {'name': 'new_nic', 'mac': '12345', 'current_speed': 10, 'max_speed': 10, 'state': 'down'}]) node_data = {'mac': node['mac'], 'meta': new_meta} resp = self.app.put( reverse('NodeAgentHandler'), jsonutils.dumps(node_data), headers=self.default_headers) self.assertEqual(resp.status_code, 200) resp = self.app.get( reverse('NodeNICsHandler', kwargs={'node_id': node['id']}), headers=self.default_headers) self.assertEqual(resp.status_code, 200) self.assertEqual(len(resp.json_body), 1) resp_nic = resp.json_body[0] nic = new_meta['interfaces'][0] self.assertEqual(resp_nic['mac'], nic['mac']) self.assertEqual(resp_nic['current_speed'], nic['current_speed']) self.assertEqual(resp_nic['max_speed'], nic['max_speed']) self.assertEqual(resp_nic['state'], nic['state']) for conn in ('assigned_networks', ): self.assertEqual(resp_nic[conn], [])
def test_release_put_orchestrator_data_w_masks(self): release = self.env.create_release(api=False) orchestrator_data = { 'repo_metadata': { '5.1': 'http://{MASTER_IP}:8080/centos/x86_64', '5.1-user': '******', }, 'puppet_modules_source': 'rsync://{MASTER_IP}:/puppet/modules/', 'puppet_manifests_source': 'rsync://{MASTER_IP}:/puppet/manifests/' } resp = self.app.put( reverse('ReleaseHandler', kwargs={'obj_id': release.id}), params=jsonutils.dumps({'orchestrator_data': orchestrator_data}), headers=self.default_headers) self.assertEqual(200, resp.status_code) resp = self.app.get( reverse('ReleaseHandler', kwargs={'obj_id': release.id}), headers=self.default_headers) self.assertEqual(200, resp.status_code) orchestrator_data = resp.json_body['orchestrator_data'] self.assertEqual(orchestrator_data['repo_metadata'], { '5.1': 'http://127.0.0.1:8080/centos/x86_64', '5.1-user': '******'}) self.assertEqual( orchestrator_data['puppet_modules_source'], 'rsync://127.0.0.1:/puppet/modules/') self.assertEqual( orchestrator_data['puppet_manifests_source'], 'rsync://127.0.0.1:/puppet/manifests/')
def test_roles_failed_to_delete_assigned(self): self.env.create( cluster_kwargs={}, nodes_kwargs=[ {"status": "ready", "roles": ["controller"]} ] ) resp = self.app.get( reverse('ReleaseCollectionHandler'), headers=self.default_headers ) release_json = json.loads(resp.body)[0] old_roles = set(release_json["roles"]) old_roles.remove("controller") release_json["roles"] = list(old_roles) resp = self.app.put( reverse( 'ReleaseHandler', kwargs={ "release_id": release_json["id"] } ), json.dumps(release_json), headers=self.default_headers, expect_errors=True ) self.assertEqual(resp.status, 400) self.assertEqual( resp.body, "Cannot delete roles already assigned to nodes: controller" )
def test_deletion_during_deployment(self): self.env.create( cluster_kwargs={ "name": u"Вася" }, nodes_kwargs=[ {"status": "ready", "pending_addition": True}, ] ) cluster_id = self.env.clusters[0].id self.app.put( reverse( 'ClusterChangesHandler', kwargs={'cluster_id': cluster_id}), headers=self.default_headers ) self.app.delete( reverse( 'ClusterHandler', kwargs={'obj_id': cluster_id}), headers=self.default_headers ) timeout = 10 timer = time.time() while True: c = self.db.query(Cluster).get(cluster_id) if not c: break if (time.time() - timer) > timeout: raise Exception("Cluster deletion timeout") time.sleep(0.24) cluster_db = self.db.query(Cluster).get(cluster_id) self.assertIsNone(cluster_db)
def test_get_handler_with_invalid_speed_data(self): meta = self.env.default_metadata() meta["interfaces"] = [] node = self.env.create_node(api=True, meta=meta) meta_clean_list = [ {'interfaces': [{'name': 'eth0', 'mac': '00:00:00', 'max_speed': -100}]}, {'interfaces': [{'name': 'eth0', 'mac': '00:00:00', 'current_speed': -100}]}, {'interfaces': [{'name': 'eth0', 'mac': '00:00:00', 'current_speed': '100'}]}, {'interfaces': [{'name': 'eth0', 'mac': '00:00:00', 'max_speed': 10.0}]}, {'interfaces': [{'name': 'eth0', 'mac': '00:00:00', 'max_speed': '100'}]}, {'interfaces': [{'name': 'eth0', 'mac': '00:00:00', 'current_speed': 10.0}]} ] for nic_meta in meta_clean_list: meta = self.env.default_metadata() meta.update(nic_meta) node_data = {'mac': node['mac'], 'is_agent': True, 'meta': meta} resp = self.app.put( reverse('NodeCollectionHandler'), json.dumps([node_data]), expect_errors=True, headers=self.default_headers ) self.assertEquals(resp.status, 200) ifaces = json.loads(resp.body)[0]["meta"]["interfaces"] self.assertEquals( ifaces, [ {'name': 'eth0', 'mac': '00:00:00', 'max_speed': None, 'current_speed': None} ] )
def test_deletion_cluster_task_manager(self): self.env.create( cluster_kwargs={}, nodes_kwargs=[ {"status": "ready", "progress": 100}, {"roles": ["compute"], "status": "ready", "progress": 100}, {"roles": ["compute"], "pending_addition": True}, ] ) cluster_id = self.env.clusters[0].id cluster_name = self.env.clusters[0].name resp = self.app.delete( reverse( 'ClusterHandler', kwargs={'cluster_id': cluster_id}), headers=self.default_headers ) self.assertEquals(202, resp.status) timer = time.time() timeout = 15 clstr = self.db.query(Cluster).get(cluster_id) while clstr: time.sleep(1) try: self.db.refresh(clstr) except Exception: break if time.time() - timer > timeout: raise Exception("Cluster deletion seems to be hanged") notification = self.db.query(Notification)\ .filter(Notification.topic == "done")\ .filter(Notification.message == "Environment '%s' and all its " "nodes are deleted" % cluster_name).first() self.assertIsNotNone(notification) tasks = self.db.query(Task).all() self.assertEqual(tasks, [])
def test_delete_bond_and_networks_state_on_unassigmnet(self): """Test verifies that 1. bond configuration will be deleted 2. network unassigned from node interfaces when node unnasigned from cluster """ cluster = self.env.create(nodes_kwargs=[{}]) node = self.env.nodes[0] node.bond_interfaces.append( NodeBondInterface(name='ovs-bond0', slaves=node.nic_interfaces)) self.db.flush() resp = self.app.post(reverse('NodeUnassignmentHandler', kwargs={'cluster_id': cluster['id']}), json.dumps([{ 'id': node.id }]), headers=self.default_headers) self.assertEqual(resp.status_code, 200) self.assertEqual(node.bond_interfaces, []) for interface in node.interfaces: self.assertEqual(interface.assigned_networks_list, [])
def test_network_verify_fails_if_untagged_intersection(self, mocked_rpc): resp = self.app.get(reverse( 'NovaNetworkConfigurationHandler', kwargs={'cluster_id': self.env.clusters[0].id}), headers=self.default_headers) self.assertEqual(200, resp.status_code) nets = jsonutils.loads(resp.body) for net in nets['networks']: if net['name'] in ('storage', ): net['vlan_start'] = None task = self.env.launch_verify_networks(nets) self.env.wait_error(task, 30) self.assertIn( 'Some untagged networks are assigned to the same physical ' 'interface. You should assign them to different physical ' 'interfaces. Affected:\n', task.message) for n in self.env.nodes: self.assertIn('"storage"', task.message) self.assertEqual(mocked_rpc.called, False)
def test_vlan_manager(self): cluster = self.create_env('multinode') data = {'net_manager': 'VlanManager'} url = reverse('NovaNetworkConfigurationHandler', kwargs={'cluster_id': cluster.id}) self.app.put(url, json.dumps(data), headers=self.default_headers, expect_errors=False) facts = self.serializer.serialize(cluster, cluster.nodes) for fact in facts: self.assertEquals(fact['vlan_interface'], 'eth0') self.assertEquals(fact['fixed_interface'], 'eth0') self.assertEquals( fact['novanetwork_parameters']['network_manager'], 'VlanManager') self.assertEquals( fact['novanetwork_parameters']['num_networks'], 1) self.assertEquals( fact['novanetwork_parameters']['vlan_start'], 103) self.assertEquals( fact['novanetwork_parameters']['network_size'], 256)
def test_getting_default_nic_information_for_node(self): cluster = self.env.create_cluster(api=True) macs = ('123', 'abc') meta = self.env.default_metadata() self.env.set_interfaces_in_meta(meta, [{ 'name': 'eth0', 'mac': macs[0] }, { 'name': 'eth1', 'mac': macs[1] }]) node = self.env.create_node(api=True, meta=meta, mac=macs[0], cluster_id=cluster['id']) resp = self.app.get(reverse('NodeNICsDefaultHandler', kwargs={'node_id': node['id']}), headers=self.default_headers) resp_macs = map(lambda interface: interface["mac"], json.loads(resp.body)) self.assertEquals(resp.status, 200) self.assertItemsEqual(macs, resp_macs)
def test_all_api_urls_404_or_405(self): urls = { 'ClusterHandler': { 'cluster_id': 1 }, 'NodeHandler': { 'node_id': 1 }, 'ReleaseHandler': { 'release_id': 1 }, } for handler in urls: test_url = reverse(handler, urls[handler]) resp = self.app.get(test_url, expect_errors=True) self.assertTrue(resp.status in [404, 405]) resp = self.app.delete(test_url, expect_errors=True) self.assertTrue(resp.status in [404, 405]) resp = self.app.put(test_url, expect_errors=True) self.assertTrue(resp.status in [404, 405]) resp = self.app.post(test_url, expect_errors=True) self.assertTrue(resp.status in [404, 405])
def test_node_get_with_cluster_and_assigned_ip_addrs(self): self.env.create( cluster_kwargs={}, nodes_kwargs=[ {"pending_addition": True, "api": True}, {"pending_addition": True, "api": True} ] ) self.env.network_manager.assign_ips( [n.id for n in self.env.nodes], "management" ) resp = self.app.get( reverse('NodeCollectionHandler'), headers=self.default_headers ) self.assertEquals(200, resp.status) response = json.loads(resp.body) self.assertEquals(2, len(response))
def test_spawn_vms(self): cluster = self.env.create( nodes_kwargs=[ {"status": "ready", "pending_addition": True, "roles": ["virt"]}, ] ) cluster.nodes[0].vms_conf = [{'id': 1, 'cluster_id': cluster.id}] resp = self.app.put( reverse( 'SpawnVmsHandler', kwargs={'cluster_id': cluster.id}), headers=self.default_headers ) deploy_uuid = resp.json_body['uuid'] task_deploy = objects.Task.get_by_uuid(deploy_uuid) self.assertEqual(task_deploy.name, consts.TASK_NAMES.spawn_vms) self.assertNotEqual(task_deploy.status, consts.TASK_STATUSES.error) self.assertEqual(len(task_deploy.subtasks), 2)
def test_network_verify_fails_if_admin_intersection( self, mocked_rpc, macs_mock): macs_mock.return_value = self.master_macs resp = self.app.get(reverse( 'NovaNetworkConfigurationHandler', kwargs={'cluster_id': self.env.clusters[0].id}), headers=self.default_headers) self.assertEquals(200, resp.status) nets = json.loads(resp.body) admin_ng = self.env.network_manager.get_admin_network_group() nets['networks'][-2]['cidr'] = admin_ng.cidr task = self.env.launch_verify_networks(nets) self.env.wait_error(task, 30) self.assertIn("Address space intersection between networks:\n", task.message) self.assertIn("admin (PXE)", task.message) self.assertIn("fixed", task.message) self.assertEquals(mocked_rpc.called, False)
def test_node_get_with_cluster(self): self.env.create( cluster_kwargs={"api": False}, nodes_kwargs=[ {"cluster_id": None}, {}, ] ) cluster = self.env.clusters[0] resp = self.app.get( reverse('NodeCollectionHandler'), params={'cluster_id': cluster.id}, headers=self.default_headers ) self.assertEquals(200, resp.status) response = json.loads(resp.body) self.assertEquals(1, len(response)) self.assertEquals( self.env.nodes[1].id, response[0]['id'] )
def test_version_handler(self): with nested( patch('nailgun.utils.glob.glob', Mock(return_value=["test.yaml"])), patch('__builtin__.open', mock_open(read_data='test_data'), create=True)): resp = self.app.get(reverse('VersionHandler'), headers=self.default_headers) self.assertEqual(200, resp.status_code) self.assertEqual( resp.json_body, { "release": "0.1b", "nailgun_sha": "12345", "astute_sha": "Unknown build", "fuellib_sha": "Unknown build", "ostf_sha": "Unknown build", "auth_required": True, "release_versions": { "test": "test_data" } })
def test_do_not_create_notification_if_disks_meta_is_empty(self): def get_notifications_count(**kwargs): return objects.NotificationCollection.count( objects.NotificationCollection.filter_by(None, **kwargs)) # add node to environment: this makes us possible to reach # buggy code self.env.create(nodes_kwargs=[ { 'roles': ['controller'], 'pending_addition': True }, ]) # prepare data to put node = self.env.nodes[0] node.meta['disks'] = [] node = { 'id': node.id, 'meta': node.meta, 'mac': node.mac, 'status': node.status } # get node info before_count = get_notifications_count(node_id=node['id']) # put new info for i in range(5): response = self.app.put(reverse('NodeAgentHandler'), jsonutils.dumps(node), headers=self.default_headers) self.assertEqual(response.status_code, 200) # check there's not create notification after_count = get_notifications_count(node_id=node['id']) self.assertEqual(before_count, after_count)
def test_cluster_locking_after_deployment(self): self.env.create(cluster_kwargs={"mode": "ha_compact"}, nodes_kwargs=[ { "pending_addition": True }, { "pending_addition": True }, { "pending_deletion": True }, ]) supertask = self.env.launch_deployment() self.env.wait_ready(supertask, 60) test_nets = json.loads( self.env.nova_networks_get(self.env.clusters[0].id).body) resp_nova_net = self.env.nova_networks_put(self.env.clusters[0].id, test_nets, expect_errors=True) resp_neutron_net = self.env.neutron_networks_put( self.env.clusters[0].id, test_nets, expect_errors=True) resp_cluster = self.app.put(reverse( 'ClusterAttributesHandler', kwargs={'cluster_id': self.env.clusters[0].id}), json.dumps({'editable': { "foo": "bar" }}), headers=self.default_headers, expect_errors=True) self.assertEquals(resp_nova_net.status, 403) # it's 400 because we used Nova network self.assertEquals(resp_neutron_net.status, 400) self.assertEquals(resp_cluster.status, 403)
def test_version_handler(self): resp = self.app.get( reverse('FuelKeyHandler'), headers=self.default_headers ) fuel_release = "0.1" key_data = { "sha": "12345", "release": fuel_release, "uuid": "uuid" } signature = base64.b64encode(json.dumps(key_data)) key_data["signature"] = signature self.assertEqual(200, resp.status_code) response = json.loads(resp.body) self.assertEqual( response, {"key": base64.b64encode(json.dumps(key_data))} ) resp_data = json.loads(base64.b64decode(response["key"])) self.assertEqual(resp_data["release"], fuel_release)
def test_reset_cluster_name_when_unassign_node(self): self.env.create( nodes_kwargs=[ {'pending_roles': ['controller'], 'pending_addition': True, 'name': 'new_node'}]) node = self.env.nodes[0] default_name = 'Untitled ({0})'.format(node.mac[-5:]) resp = self.app.put( reverse('NodeCollectionHandler'), json.dumps([{'id': node.id, 'cluster_id': None, 'pending_roles': []}]), headers=self.default_headers) self.assertEquals(200, resp.status) response = json.loads(resp.body) self.assertEquals(1, len(response)) self.assertEquals(node.id, response[0]['id']) self.assertEquals(node.name, default_name) self.assertEquals(node.cluster, None) self.assertEquals(node.pending_roles, [])
def test_assigned_networks_when_node_added(self): mac = '123' meta = self.env.default_metadata() meta['interfaces'] = [ {'name': 'eth0', 'mac': mac}, {'name': 'eth1', 'mac': '654'}] node = self.env.create_node(api=True, meta=meta, mac=mac) self.env.create_cluster(api=True, nodes=[node['id']]) resp = self.app.get( reverse('NodeNICsHandler', kwargs={'node_id': node['id']}), headers=self.default_headers) self.assertEquals(resp.status, 200) response = json.loads(resp.body) for resp_nic in response: if resp_nic['mac'] == mac: self.assertGreater(len(resp_nic['assigned_networks']), 0) else: self.assertEquals(resp_nic['assigned_networks'], [])
def test_log_package_handler_ok(self): task = json.dumps({ "status": "running", "name": "dump", "progress": 0, "message": None, "id": 1, "uuid": "00000000-0000-0000-0000-000000000000" }) tm_patcher = patch('nailgun.api.handlers.logs.DumpTaskManager') th_patcher = patch('nailgun.api.handlers.logs.TaskHandler') tm_mocked = tm_patcher.start() th_mocked = th_patcher.start() tm_instance = tm_mocked.return_value tm_instance.execute.return_value = task th_mocked.render.side_effect = lambda x: x resp = self.app.put(reverse('LogPackageHandler'), "[]", headers=self.default_headers) tm_patcher.stop() th_patcher.stop() self.assertEquals(task, resp.body) self.assertEquals(resp.status, 200)
def assertHttpPut(self, name, arguments, data, expected_status): """Helper assert for checking HTTP PUT. :param name: a handler name, for reversing url :param arguments: arguments for reversing url :param data: a data to be PUT :param expected_status: expected HTTP response code """ response = self.app.put(base.reverse(name, kwargs=arguments), jsonutils.dumps(data), headers=self.default_headers) if not isinstance(expected_status, list): expected_status = [expected_status] self.assertIn(response.status_code, expected_status) # Heuristic checking if response is of task type is_task = 'progress' in response.json_body and \ 'status' in response.json_body and \ 'uuid' in response.json_body if is_task: self.assertNotEqual(response.json_body['status'], consts.TASK_STATUSES.error)
def test_node_volumes_clears_after_deletion_from_cluster(self): cluster = self.env.create_cluster(api=True) self.env.create_node( api=True, cluster_id=cluster["id"] ) node_db = self.env.nodes[0] node_disks_changes = self.db.query(ClusterChanges).filter_by( name="disks", node_id=node_db.id ).all() self.assertEqual(len(node_disks_changes), 1) self.app.put( reverse('NodeCollectionHandler'), jsonutils.dumps([{"id": node_db.id, "cluster_id": None}]), headers=self.default_headers ) self.env.refresh_clusters() node_disks_changes = self.db.query(ClusterChanges).filter_by( name="disks", node_id=node_db.id ).all() self.assertEqual(len(node_disks_changes), 0)
def test_get_particular_role_for_cluster_w_plugin(self): plugin_data = self.env.get_default_plugin_metadata() plugin_data['roles_metadata'] = self.ROLES plugin_data['volumes_metadata'] = self.VOLUMES plugin = objects.Plugin.create(plugin_data) self.cluster.plugins.append(plugin) self.db.flush() plugin_adapter = adapters.wrap_plugin(plugin) role = self.app.get( url=base.reverse('ClusterRolesHandler', { 'cluster_id': self.cluster.id, 'role_name': 'test_role' })).json self.assertEqual(role['name'], 'test_role') self.assertDictEqual( role['meta'], plugin_adapter.normalized_roles_metadata['test_role']) self.assertItemsEqual( role['volumes_roles_mapping'], plugin_adapter.volumes_metadata['volumes_roles_mapping'] ['test_role'])
def test_network_checking_fails_if_admin_intersection(self): self.env.create(cluster_kwargs={}, nodes_kwargs=[ { "pending_addition": True }, ]) cluster = self.env.clusters[0] nets = self.env.generate_ui_networks(cluster.id) nets['networks'][-1]["cidr"] = settings.NET_EXCLUDE[0] resp = self.app.put(reverse('NetworkConfigurationHandler', kwargs={'cluster_id': cluster.id}), json.dumps(nets), headers=self.default_headers, expect_errors=True) self.assertEquals(resp.status, 202) task = json.loads(resp.body) self.assertEquals(task['status'], 'error') self.assertEquals(task['progress'], 100) self.assertEquals(task['name'], 'check_networks') self.assertEquals( task['message'], "Intersection with admin " "network(s) '{0}' found".format(settings.NET_EXCLUDE))
def add_disk_to_node(self, node, size): new_meta = node.meta.copy() last_disk = [d['name'][-1] for d in new_meta['disks']][-1] new_disk = string.letters.index(last_disk) + 1 new_meta['disks'].append({ # convert mbytes to bytes 'size': size * (1024**2), 'model': 'SAMSUNG B00B135', 'name': 'sd%s' % string.letters[new_disk], 'disk': 'disk/id/%s00b135' % string.letters[new_disk] }) self.app.put(reverse('NodeAgentHandler'), jsonutils.dumps({ 'mac': node.mac, 'meta': new_meta }), headers=self.default_headers)
def test_network_checking(self): self.env.create(cluster_kwargs={}, nodes_kwargs=[ { "pending_addition": True }, ]) cluster = self.env.clusters[0] nets = self.env.generate_ui_networks(cluster.id) resp = self.app.put(reverse('NetworkConfigurationHandler', kwargs={'cluster_id': cluster.id}), json.dumps(nets), headers=self.default_headers) self.assertEquals(resp.status, 202) task = json.loads(resp.body) self.assertEquals(task['status'], 'ready') self.assertEquals(task['progress'], 100) self.assertEquals(task['name'], 'check_networks') ngs_created = self.db.query(NetworkGroup).filter( NetworkGroup.name.in_([n['name'] for n in nets['networks']])).all() self.assertEquals(len(ngs_created), len(nets['networks']))
def test_node_volumes_modification_adds_pending_changes(self): cluster = self.env.create_cluster(api=True) node = self.env.create_node( api=True, cluster_id=cluster["id"] ) node_db = self.env.nodes[0] node_disks_changes = self.db.query(ClusterChanges).filter_by( name="disks", node_id=node_db.id ).all() self.assertEquals(len(node_disks_changes), 1) resp = self.app.get( reverse( 'ClusterHandler', kwargs={'cluster_id': cluster['id']}), headers=self.default_headers ) response = json.loads(resp.body) self.assertIn( ["disks", node_db.id], response["changes"] )
def test_log_package_handler_ok(self): task = { "status": "running", "name": "dump", "progress": 0, "message": None, "id": 1, "uuid": "00000000-0000-0000-0000-000000000000" } tm_patcher = mock.patch('nailgun.api.v1.handlers.logs.DumpTaskManager') th_patcher = mock.patch('nailgun.api.v1.handlers.logs.objects.Task') tm_mocked = tm_patcher.start() th_mocked = th_patcher.start() tm_instance = tm_mocked.return_value tm_instance.execute.return_value = mock.Mock(**task) th_mocked.to_json.side_effect = lambda x: task resp = self.app.put(reverse('LogPackageHandler'), "[]", headers=self.default_headers) tm_patcher.stop() th_patcher.stop() self.assertEqual(resp.status_code, 202) self.assertDictEqual(task, resp.json_body)
def test_batch_node_deletion_and_attributes_clearing(self): self.env.create(nodes_kwargs=[ { "pending_addition": True }, ]) self.env.launch_deployment() cluster = self.env.clusters[0] node_ids = [node.id for node in cluster.nodes] url = reverse('NodeCollectionHandler') query_str = 'ids={0}'.format(','.join(map(str, node_ids))) resp = self.app.delete('{0}?{1}'.format(url, query_str), headers=self.default_headers) self.assertEqual(202, resp.status_code) task = objects.Task.get_by_uuid(resp.json_body['uuid']) self.env.wait_ready(task) node_query = self.db.query(Node).filter_by(cluster_id=cluster.id) self.assertEquals(node_query.count(), 0)
def test_redeploy_nodes_in_ready_status_if_cluster_attrs_were_changed( self ): self.env.create( nodes_kwargs=[ {"status": "ready"}, {"pending_addition": True}]) cluster_db = self.env.clusters[0] cluster_db.clear_pending_changes() resp = self.app.put( reverse( 'ClusterAttributesHandler', kwargs={'cluster_id': cluster_db.id}), headers=self.default_headers, params=json.dumps({'cluster_type': 'both'}) ) self.assertEquals(resp.status, 200) self.assertEquals(len(cluster_db.changes), 1) supertask = self.env.launch_deployment() self.assertEquals(supertask.name, 'deploy') self.assertIn(supertask.status, ('running', 'ready')) self.env.wait_for_nodes_status( self.env.nodes, ['provisioned', 'provisioning']) self.env.wait_ready(supertask) self.env.refresh_nodes() for n in self.env.nodes: self.assertEquals(n.status, 'ready') self.assertEquals(n.progress, 100) self.env.db.refresh(cluster_db) self.assertEquals(len(cluster_db.changes), 0)
def test_occurs_error_not_enough_ip_addresses(self): self.env.create(cluster_kwargs={}, nodes_kwargs=[{ 'pending_addition': True }, { 'pending_addition': True }, { 'pending_addition': True }]) cluster = self.env.clusters[0] public_network = self.db.query(NetworkGroup).filter_by( name='public').first() net_data = { "networks": [{ 'id': public_network.id, 'cidr': '220.0.1.0/24', 'gateway': '220.0.1.1', 'ip_ranges': [['220.0.1.2', '220.0.1.3']] }] } self.app.put(reverse('NovaNetworkConfigurationHandler', kwargs={'cluster_id': cluster.id}), json.dumps(net_data), headers=self.default_headers, expect_errors=True) task = self.env.launch_deployment() self.assertEquals(task.status, 'error') self.assertEquals( task.message, 'Not enough IP addresses. Public network must have at least ' '3 IP addresses for the current environment.')
def test_get_default_nic_networkgroups(self): cluster = self.env.create_cluster(api=True) node = self.env.create_node(api=True) node_db = self.env.nodes[0] main_nic_id = self.env.network_manager.get_main_nic(node_db.id) other_iface = self.db.query(NodeNICInterface).filter_by( node_id=node['id'] ).filter( not_(NodeNICInterface.mac == node_db.mac) ).first() self.app.put( reverse('NodeCollectionHandler'), json.dumps([{ 'mac': other_iface.mac, 'meta': node_db.meta, 'is_agent': True, 'cluster_id': cluster["id"] }]), headers=self.default_headers, expect_errors=True ) new_main_nic_id = self.env.network_manager.get_main_nic(node_db.id) self.assertEquals(new_main_nic_id, other_iface.id) self.assertEquals( [n.id for n in other_iface.assigned_networks], self.env.network_manager.get_default_nic_networkgroups( node_db.id, other_iface.id ) ) self.assertEquals( self.db.query(NodeNICInterface).get(main_nic_id).assigned_networks, [] )