def test_openstack_config_execute_force(self, _): # Turn node 2 into provisioned state self.env.nodes[2].status = consts.NODE_STATUSES.provisioned self.db.flush() # Try to update OpenStack configuration for cluster data = {"cluster_id": self.clusters[0].id} resp = self.app.put( reverse("OpenstackConfigExecuteHandler"), jsonutils.dumps(data), headers=self.default_headers, expect_errors=True, ) # Request shouldn't pass a validation self.assertEqual(resp.status_code, 400) self.assertEqual( "Nodes '{0}' are not in status 'ready' and " "can not be updated directly." "".format(self.env.nodes[2].uid), resp.json_body["message"], ) # Try to update OpenStack configuration for cluster with 'force' key data = {"cluster_id": self.clusters[0].id, "force": True} resp = self.app.put( reverse("OpenstackConfigExecuteHandler"), jsonutils.dumps(data), headers=self.default_headers ) # Update OpenStack configuration executed successfully self.assertEqual(resp.status_code, 202)
def test_get_deployment_tasks_task_based(self): resp = self.app.get( reverse('ClusterDeploymentTasksHandler', kwargs={'obj_id': self.cluster.id}), params={'start': 'task'}, headers=self.default_headers, expect_errors=True ) self.assertEqual(400, resp.status_code) self.assertEqual( resp.json_body['message'], 'Both "start" and "end" parameters are not allowed for task-based ' 'deployment.') resp = self.app.get( reverse('ClusterDeploymentTasksHandler', kwargs={'obj_id': self.cluster.id}), params={'end': 'task'}, headers=self.default_headers, expect_errors=True ) self.assertEqual(400, resp.status_code) self.assertEqual( resp.json_body['message'], 'Both "start" and "end" parameters are not allowed for task-based ' 'deployment.')
def test_not_acceptable_if_cluster_has_not_support_vmware(self): resp = self.app.get( reverse( 'VmwareAttributesHandler', kwargs={'cluster_id': self.cluster['id']}), headers=self.default_headers, expect_errors=True ) self.assertEqual(400, resp.status_code) self.assertEqual( "Cluster doesn't support vmware configuration", resp.json_body["message"] ) resp = self.app.put( reverse( 'VmwareAttributesHandler', kwargs={'cluster_id': self.cluster['id']}), params=jsonutils.dumps({ "editable": { "value": {"foo": "bar"} } }), headers=self.default_headers, expect_errors=True ) self.assertEqual(400, resp.status_code) self.assertEqual( "Cluster doesn't support vmware configuration", resp.json_body["message"] )
def test_roles_failed_to_delete_assigned(self): self.env.create( nodes_kwargs=[ {"status": "ready", "roles": ["controller"]} ] ) resp = self.app.get( reverse('ReleaseCollectionHandler'), headers=self.default_headers ) release_json = resp.json_body[0] old_roles = set(release_json["roles"]) old_roles.remove("controller") release_json["roles"] = list(old_roles) resp = self.app.put( reverse( 'ReleaseHandler', kwargs={ "obj_id": release_json["id"] } ), jsonutils.dumps(release_json), headers=self.default_headers, expect_errors=True ) self.assertEqual(resp.status_code, 400) self.assertEqual( resp.json_body["message"], "Cannot delete roles already assigned to nodes: controller" )
def test_network_changing_adds_pending_changes(self): cluster = self.env.create_cluster(api=True) cluster_db = self.env.clusters[0] objects.Cluster.clear_pending_changes(cluster_db) all_changes = self.db.query(ClusterChanges).all() self.assertEqual(len(all_changes), 0) resp = self.app.get( reverse( 'NovaNetworkConfigurationHandler', kwargs={'cluster_id': cluster['id']}), headers=self.default_headers ) net_id = resp.json_body['networks'][0]["id"] resp = self.app.put( reverse( 'NovaNetworkConfigurationHandler', kwargs={'cluster_id': cluster['id']}), jsonutils.dumps({'networks': [{ "id": net_id, "gateway": "10.0.0.1"} ]}), headers=self.default_headers ) pending_changes = self.db.query(ClusterChanges).filter_by( name="networks" ).all() self.assertEqual(len(pending_changes), 1)
def test_attributes_vcenter_neutron_fails(self): cluster_id = self.env.create_cluster(api=True, net_provider='neutron')['id'] resp = self.app.get( reverse( 'ClusterAttributesHandler', kwargs={'cluster_id': cluster_id}), headers=self.default_headers ) self.assertEqual(200, resp.status_code) resp = self.app.patch( reverse( 'ClusterAttributesHandler', kwargs={'cluster_id': cluster_id}), params=jsonutils.dumps({ 'editable': { 'common': { 'use_vcenter': { 'type': 'hidden', 'value': True, 'weight': 30, }, }, }, }), headers=self.default_headers, expect_errors=True ) self.assertEqual(400, resp.status_code) self.assertEqual('vCenter requires Nova Network to be set ' 'as a network provider', resp.json_body.get('message'))
def test_attributes_set_defaults(self): cluster = self.env.create_cluster(api=True) cluster_db = self.env.clusters[0] # Change editable attributes. resp = self.app.put( reverse( 'ClusterAttributesHandler', kwargs={'cluster_id': cluster['id']}), params=jsonutils.dumps({ 'editable': { "foo": "bar" }, }), headers=self.default_headers, expect_errors=True ) self.assertEqual(200, resp.status_code, resp.body) attrs = objects.Cluster.get_attributes(cluster_db) self.assertEqual("bar", attrs.editable["foo"]) # Set attributes to defaults. resp = self.app.put( reverse( 'ClusterAttributesDefaultsHandler', kwargs={'cluster_id': cluster['id']}), headers=self.default_headers ) self.assertEqual(200, resp.status_code) release = self.db.query(Release).get( cluster['release_id'] ) self._compare_editable( release.attributes_metadata['editable'], resp.json_body['editable'], cluster_db )
def test_attributes_update_patch(self): cluster_id = self.env.create_cluster(api=True)['id'] cluster_db = self.env.clusters[0] resp = self.app.get( reverse( 'ClusterAttributesHandler', kwargs={'cluster_id': cluster_id}), headers=self.default_headers ) self.assertEqual(200, resp.status_code) resp = self.app.patch( reverse( 'ClusterAttributesHandler', kwargs={'cluster_id': cluster_id}), params=jsonutils.dumps({ 'editable': { "foo": "bar" }, }), headers=self.default_headers ) self.assertEqual(200, resp.status_code) attrs = objects.Cluster.get_attributes(cluster_db) self.assertEqual("bar", attrs.editable["foo"]) attrs.editable.pop('foo') self.assertNotEqual(attrs.editable, {})
def test_failing_attributes_put(self): cluster_id = self.env.create_cluster(api=True)['id'] resp = self.app.get( reverse( 'ClusterAttributesHandler', kwargs={'cluster_id': cluster_id}), headers=self.default_headers ) self.assertEqual(200, resp.status_code) resp = self.app.patch( reverse( 'ClusterAttributesHandler', kwargs={'cluster_id': cluster_id}), params=jsonutils.dumps({ 'editable': { 'storage': { 'osd_pool_size': { 'description': 'desc', 'label': 'OSD Pool Size', 'type': 'text', 'value': True, 'weight': 80, }, }, }, }), headers=self.default_headers, expect_errors=True ) self.assertEqual(400, resp.status_code)
def test_openstack_config_delete(self): obj_id = self.configs[0].id resp = self.app.delete( reverse('OpenstackConfigHandler', {'obj_id': obj_id}), expect_errors=True) self.assertEqual(resp.status_code, 204) resp = self.app.get( reverse('OpenstackConfigHandler', {'obj_id': obj_id}), headers=self.default_headers) self.assertEqual(resp.status_code, 200) self.assertEqual(resp.json_body['is_active'], False) # Try delete already deleted object resp = self.app.delete( reverse('OpenstackConfigHandler', {'obj_id': obj_id}), headers=self.default_headers, expect_errors=True) self.assertEqual(resp.status_code, 400) self.assertEqual( resp.json_body['message'], "Configuration '{0}' has been already disabled.".format(obj_id))
def test_network_assignment_when_node_added(self): cluster = self.env.create_cluster( api=True, editable_attributes={'public_network_assignment': { 'assign_to_all_nodes': {'value': True}}}) mac = self.env.generate_random_mac() meta = self.env.default_metadata() self.env.set_interfaces_in_meta( meta, [{'name': 'eth0', 'mac': mac}, {'name': 'eth1', 'mac': self.env.generate_random_mac()}]) node = self.env.create_node(api=True, meta=meta, mac=mac) resp = self.app.put( reverse('NodeCollectionHandler'), jsonutils.dumps([{'id': node['id'], 'cluster_id': cluster['id']}]), headers=self.default_headers ) self.assertEqual(resp.status_code, 200) resp = self.app.get( reverse('NodeNICsHandler', kwargs={'node_id': node['id']}), headers=self.default_headers) self.assertEqual(resp.status_code, 200) response = resp.json_body for resp_nic in response: net_names = [net['name'] for net in resp_nic['assigned_networks']] if resp_nic['mac'] == mac: self.assertIn("fuelweb_admin", net_names) else: self.assertIn("public", net_names) self.assertGreater(len(resp_nic['assigned_networks']), 0)
def test_get_handler_with_incompleted_iface_data(self): meta = self.env.default_metadata() meta["interfaces"] = [] node = self.env.create_node(api=True, meta=meta) meta_clean_list = [ {'interfaces': [{'name': '', 'mac': '00:00:00:00:00:00'}]}, {'interfaces': [{'mac': '00:00:00:00:00:00'}]}, {'interfaces': [{'name': 'eth0'}]} ] for nic_meta in meta_clean_list: meta = self.env.default_metadata() meta.update(nic_meta) node_data = {'mac': node['mac'], 'meta': meta} resp = self.app.put( reverse('NodeAgentHandler'), jsonutils.dumps(node_data), expect_errors=True, headers=self.default_headers ) self.assertEqual(resp.status_code, 200) resp = self.app.get( reverse('NodeNICsHandler', kwargs={'node_id': node['id']}), headers=self.default_headers ) self.assertEqual(resp.json_body, [])
def test_openstack_config_upload_override_multinode(self): data = { 'cluster_id': self.clusters[0].id, 'node_ids': [self.nodes[1].id, self.nodes[2].id], 'configuration': { 'nova_config': 'overridden_value' } } resp = self.app.post( reverse('OpenstackConfigCollectionHandler'), jsonutils.dumps(data), headers=self.default_headers) self.assertEqual(resp.status_code, 201) configs = resp.json_body self.assertEqual(configs[0]['node_id'], self.nodes[1].id) self.assertEqual(configs[1]['node_id'], self.nodes[2].id) resp = self.app.get( reverse('OpenstackConfigHandler', {'obj_id': self.configs[1].id}), headers=self.default_headers) self.assertEqual(resp.status_code, 200) self.assertEqual(resp.json_body['is_active'], False) resp = self.app.get( reverse('OpenstackConfigHandler', {'obj_id': self.configs[2].id}), headers=self.default_headers) self.assertEqual(resp.status_code, 200) self.assertEqual(resp.json_body['is_active'], False)
def test_NIC_updates_by_agent(self): meta = self.env.default_metadata() self.env.set_interfaces_in_meta(meta, [ {'name': 'eth0', 'mac': '00:00:00:00:00:00', 'current_speed': 1, 'state': 'up'}]) node = self.env.create_node(api=True, meta=meta) new_meta = self.env.default_metadata() self.env.set_interfaces_in_meta(new_meta, [ {'name': 'new_nic', 'mac': '00:00:00:00:00:00', 'current_speed': 10, 'max_speed': 10, 'state': 'down'}]) node_data = {'mac': node['mac'], 'meta': new_meta} resp = self.app.put( reverse('NodeAgentHandler'), jsonutils.dumps(node_data), headers=self.default_headers) self.assertEqual(resp.status_code, 200) resp = self.app.get( reverse('NodeNICsHandler', kwargs={'node_id': node['id']}), headers=self.default_headers) self.assertEqual(resp.status_code, 200) self.assertEqual(len(resp.json_body), 1) resp_nic = resp.json_body[0] nic = new_meta['interfaces'][0] self.assertEqual(resp_nic['mac'], nic['mac']) self.assertEqual(resp_nic['current_speed'], nic['current_speed']) self.assertEqual(resp_nic['max_speed'], nic['max_speed']) self.assertEqual(resp_nic['state'], nic['state']) for conn in ('assigned_networks', ): self.assertEqual(resp_nic[conn], [])
def test_nic_adds_by_agent(self): meta = self.env.default_metadata() self.env.set_interfaces_in_meta(meta, [ {'name': 'eth0', 'mac': '00:00:00:00:00:00', 'current_speed': 1, 'pxe': True, 'state': 'up'}]) node = self.env.create_node(api=True, meta=meta) meta['interfaces'].append({ 'name': 'new_nic', 'mac': '00:00:00:00:00:01'}) node_data = {'mac': node['mac'], 'meta': meta} resp = self.app.put( reverse('NodeAgentHandler'), jsonutils.dumps(node_data), headers=self.default_headers) self.assertEqual(resp.status_code, 200) resp = self.app.get( reverse('NodeNICsHandler', kwargs={'node_id': node['id']}), headers=self.default_headers) self.assertEqual(resp.status_code, 200) self.assertEqual(len(resp.json_body), len(meta['interfaces'])) for nic in meta['interfaces']: filtered_nics = filter( lambda i: i['mac'] == nic['mac'], resp.json_body ) resp_nic = filtered_nics[0] self.assertEqual(resp_nic['mac'], nic['mac']) self.assertEqual(resp_nic['current_speed'], nic.get('current_speed')) self.assertEqual(resp_nic['max_speed'], nic.get('max_speed')) self.assertEqual(resp_nic['state'], nic.get('state')) for conn in ('assigned_networks', ): self.assertEqual(resp_nic[conn], [])
def test_network_verify_if_old_task_is_running(self): resp = self.app.get( reverse( 'NovaNetworkConfigurationHandler', kwargs={'cluster_id': self.env.clusters[0].id} ), headers=self.default_headers ) nets = resp.body self.env.create_task( name="verify_networks", status=consts.TASK_STATUSES.running, cluster_id=self.env.clusters[0].id ) resp = self.app.put( reverse( 'NovaNetworkConfigurationVerifyHandler', kwargs={'cluster_id': self.env.clusters[0].id}), nets, headers=self.default_headers, expect_errors=True ) self.assertEqual(400, resp.status_code)
def test_deletion_during_deployment(self): self.env.create( cluster_kwargs={ "name": u"Вася" }, nodes_kwargs=[ {"status": "ready", "pending_addition": True}, ] ) cluster_id = self.env.clusters[0].id self.app.put( reverse( 'ClusterChangesHandler', kwargs={'cluster_id': cluster_id}), headers=self.default_headers ) self.app.delete( reverse( 'ClusterHandler', kwargs={'obj_id': cluster_id}), headers=self.default_headers ) timeout = 10 timer = time.time() while True: cluster = self.db.query(Cluster).filter_by(id=cluster_id).first() if not cluster: break if (time.time() - timer) > timeout: raise Exception("Cluster deletion timeout") time.sleep(0.24) self.assertIsNone(cluster)
def test_stats_sending_enabled(self): self.assertEqual(objects.MasterNodeSettings.must_send_stats(), False) resp = self.app.get( reverse("MasterNodeSettingsHandler"), headers=self.default_headers) self.assertEqual(200, resp.status_code) data = resp.json_body # emulate user confirmed settings in UI data["settings"]["statistics"]["user_choice_saved"]["value"] = True resp = self.app.put( reverse("MasterNodeSettingsHandler"), headers=self.default_headers, params=jsonutils.dumps(data) ) self.assertEqual(200, resp.status_code) self.assertTrue(objects.MasterNodeSettings.must_send_stats()) # emulate user disabled statistics sending data["settings"]["statistics"]["send_anonymous_statistic"]["value"] = \ False resp = self.app.put( reverse("MasterNodeSettingsHandler"), headers=self.default_headers, params=jsonutils.dumps(data) ) self.assertEqual(200, resp.status_code) self.assertFalse(objects.MasterNodeSettings.must_send_stats())
def test_node_handlers_deletion_bad_request(self): cluster = self.env.create(nodes_kwargs=[ {'roles': ['controller'], 'status': consts.NODE_STATUSES.error} ]) node_to_delete = self.env.create_node( cluster_id=cluster.id, roles=['controller'], status=consts.NODE_STATUSES.ready ) err_msg = ("One of the cluster controllers is in error state, " "please, eliminate the problem prior to proceeding further") resp = self.app.delete( reverse( 'NodeHandler', kwargs={'obj_id': node_to_delete.id}), headers=self.default_headers, expect_errors=True ) self.assertEqual(resp.status_code, 403) self.assertIn(err_msg, resp.body) url = reverse('NodeCollectionHandler') query_str = 'ids={0}'.format(node_to_delete.id) resp = self.app.delete( '{0}?{1}'.format(url, query_str), headers=self.default_headers, expect_errors=True ) self.assertEqual(resp.status_code, 403) self.assertIn(err_msg, resp.body)
def test_change_mac_of_assigned_nics(self): def get_nodes(): resp = self.app.get( reverse('NodeCollectionHandler', kwargs={'cluster_id': cluster.id}), headers=self.default_headers, ) return resp.json_body meta = self.env.default_metadata() meta["interfaces"] = [ {'name': 'eth0', 'mac': self.env.generate_random_mac(), 'pxe': True}, {'name': 'eth1', 'mac': self.env.generate_random_mac()}, {'name': 'eth2', 'mac': self.env.generate_random_mac()}, {'name': 'eth3', 'mac': self.env.generate_random_mac()}, {'name': 'eth4', 'mac': self.env.generate_random_mac()}, ] cluster = self.env.create(nodes_kwargs=[{'api': True, 'meta': meta}]) # check all possible handlers for handler in ('NodeAgentHandler', 'NodeHandler', 'NodeCollectionHandler'): # create node and check it availability nodes_data = get_nodes() self.assertEqual(len(nodes_data), 1) node_db = objects.Node.get_by_uid(nodes_data[0]['id']) # change mac address of interfaces except admin one adm_eth = self.env.network_manager._get_interface_by_network_name( node_db, 'fuelweb_admin') for iface in nodes_data[0]['meta']['interfaces']: if iface['name'] != adm_eth.name: iface['mac'] = self.env.generate_random_mac() # prepare put request data = { 'id': nodes_data[0]['id'], 'meta': nodes_data[0]['meta'], } if handler in ('NodeCollectionHandler', ): data = [data] if handler in ('NodeHandler', ): endpoint = reverse(handler, kwargs={'obj_id': data['id']}) else: endpoint = reverse(handler) self.app.put( endpoint, jsonutils.dumps(data), headers=self.default_headers, ) # check the node is visible for api nodes_data = get_nodes() self.assertEqual(len(nodes_data), 1)
def test_public_network_assigment_to_wrong_node(self): cluster = self.env.create(api=True) node = self.env.create_node( api=True, cluster_id=cluster.id, roles=['controller']) resp = self.app.get( reverse("NodeNICsHandler", kwargs={"node_id": node['id']})) networks = resp.json[1]['assigned_networks'] compute = self.env.create_node( api=True, cluster_id=cluster.id, roles=['compute']) resp = self.app.get( reverse("NodeNICsHandler", kwargs={"node_id": compute['id']})) data = resp.json data[1]['assigned_networks'] = networks resp = self.app.put( reverse("NodeNICsHandler", kwargs={"node_id": compute['id']}), jsonutils.dumps(data), expect_errors=True, headers=self.default_headers) self.assertEqual(resp.status_code, 400) message = jsonutils.loads(resp.body)['message'] self.assertEqual( message, 'Trying to assign public network to Node \'%d\' which should ' 'not have public network' % compute['id'])
def test_update_node_with_wrong_ip(self): node = self.env.create_node( api=False, ip='10.20.0.2', status=consts.NODE_STATUSES.deploying) ipaddress = '192.168.0.10' self.app.put( reverse('NodeAgentHandler'), jsonutils.dumps({'id': node.id, 'ip': ipaddress, 'status': consts.NODE_STATUSES.discover}), headers=self.default_headers) self.assertEqual(node.ip, ipaddress) self.assertEqual(node.status, consts.NODE_STATUSES.error) notif = self.db.query(Notification).filter_by( node_id=node.id, topic='error' ).first() self.assertRegexpMatches(notif.message, "that does not match any Admin network") admin_ng = objects.NetworkGroup.get_admin_network_group(node) ipaddress = str(netaddr.IPRange(admin_ng.ip_ranges[0].first, admin_ng.ip_ranges[0].last)[1]) self.app.put( reverse('NodeAgentHandler'), jsonutils.dumps({'id': node.id, 'ip': ipaddress}), headers=self.default_headers) self.assertEqual(node.ip, ipaddress) self.assertEqual(node.status, consts.NODE_STATUSES.discover)
def test_network_assignment_when_node_added(self): cluster = self.env.create_cluster( api=True, editable_attributes={"public_network_assignment": {"assign_to_all_nodes": {"value": True}}} ) mac = self.env.generate_random_mac() meta = self.env.default_metadata() self.env.set_interfaces_in_meta( meta, [{"name": "eth0", "mac": mac}, {"name": "eth1", "mac": self.env.generate_random_mac()}] ) node = self.env.create_node(api=True, meta=meta, mac=mac) resp = self.app.put( reverse("NodeCollectionHandler"), jsonutils.dumps([{"id": node["id"], "cluster_id": cluster["id"]}]), headers=self.default_headers, ) self.assertEqual(resp.status_code, 200) resp = self.app.get(reverse("NodeNICsHandler", kwargs={"node_id": node["id"]}), headers=self.default_headers) self.assertEqual(resp.status_code, 200) response = resp.json_body for resp_nic in response: net_names = [net["name"] for net in resp_nic["assigned_networks"]] if resp_nic["mac"] == mac: self.assertTrue("fuelweb_admin" in net_names) else: self.assertTrue("public" in net_names) self.assertGreater(len(resp_nic["assigned_networks"]), 0)
def test_log_entry_collection_handler(self): node_ip = "10.20.30.40" log_entries = [ [time.strftime(settings.UI_LOG_DATE_FORMAT), "LEVEL111", "text1"], [time.strftime(settings.UI_LOG_DATE_FORMAT), "LEVEL222", "text2"], ] self.env.create_cluster() cluster = self.env.clusters[0] node = self.env.create_node(cluster_id=cluster.id, ip=node_ip) self._create_logfile_for_node(settings.LOGS[0], log_entries) self._create_logfile_for_node(settings.LOGS[1], log_entries, node) resp = self.app.get( reverse("LogEntryCollectionHandler"), params={"source": settings.LOGS[0]["id"]}, headers=self.default_headers, ) self.assertEqual(200, resp.status_code) response = resp.json_body response["entries"].reverse() self.assertEqual(response["entries"], log_entries) resp = self.app.get( reverse("LogEntryCollectionHandler"), params={"node": node.id, "source": settings.LOGS[1]["id"]}, headers=self.default_headers, ) self.assertEqual(200, resp.status_code) response = resp.json_body response["entries"].reverse() self.assertEqual(response["entries"], log_entries)
def test_cluster_node_list_update(self): node1 = self.env.create_node(api=False, hostname='name1') cluster = self.env.create_cluster(api=False) resp = self.app.put( reverse('ClusterHandler', kwargs={'obj_id': cluster.id}), jsonutils.dumps({'nodes': [node1.id]}), headers=self.default_headers, expect_errors=True ) self.assertEqual(resp.status_code, 200) node2 = self.env.create_node(api=False, hostname='name1') nodes = self.db.query(Node).filter(Node.cluster == cluster).all() self.assertEqual(1, len(nodes)) self.assertEqual(nodes[0].id, node1.id) resp = self.app.put( reverse('ClusterHandler', kwargs={'obj_id': cluster.id}), jsonutils.dumps({'nodes': [node2.id]}), headers=self.default_headers ) self.assertEqual(resp.status_code, 200) self.assertEqual('node-{0}'.format(node1.id), node1.hostname) nodes = self.db.query(Node).filter(Node.cluster == cluster) self.assertEqual(1, nodes.count())
def test_get_notification_status(self): resp = self.app.get( reverse( 'NotificationCollectionStatsHandler', ), headers=self.default_headers ) self.assertEqual({'total': 0, 'read': 0, 'unread': 0}, resp.json_body) self.assertEqual(200, resp.status_code) self.env.create_notification() resp = self.app.get( reverse( 'NotificationCollectionStatsHandler', ), headers=self.default_headers ) self.assertEqual({'total': 1, 'read': 0, 'unread': 1}, resp.json_body) self.env.create_notification(status='read') self.env.create_notification(status='read') resp = self.app.get( reverse( 'NotificationCollectionStatsHandler', ), headers=self.default_headers ) self.assertEqual({'total': 3, 'read': 2, 'unread': 1}, resp.json_body)
def test_partial_user_contacts_info(self): resp = self.app.get( reverse("MasterNodeSettingsHandler"), headers=self.default_headers) self.assertEqual(200, resp.status_code) data = resp.json_body # emulate user enabled contact info sending to support team data["settings"]["statistics"]["user_choice_saved"]["value"] = True data["settings"]["statistics"]["send_user_info"]["value"] = \ True name = "user" email = "*****@*****.**" data["settings"]["statistics"]["name"]["value"] = name data["settings"]["statistics"]["email"]["value"] = email resp = self.app.put( reverse("MasterNodeSettingsHandler"), headers=self.default_headers, params=jsonutils.dumps(data) ) self.assertEqual(200, resp.status_code) self.assertDictEqual( InstallationInfo().get_installation_info()['user_information'], { 'contact_info_provided': True, 'name': name, 'email': email, 'company': '' } )
def launch_verify_networks(self, data=None): if self.clusters: net_urls = { "nova_network": { "config": "NovaNetworkConfigurationHandler", "verify": "NovaNetworkConfigurationVerifyHandler", }, "neutron": { "config": "NeutronNetworkConfigurationHandler", "verify": "NeutronNetworkConfigurationVerifyHandler", }, } provider = self.clusters[0].net_provider if data: nets = jsonutils.dumps(data) else: resp = self.app.get( reverse(net_urls[provider]["config"], kwargs={"cluster_id": self.clusters[0].id}), headers=self.default_headers, ) self.tester.assertEqual(200, resp.status_code) nets = resp.body resp = self.app.put( reverse(net_urls[provider]["verify"], kwargs={"cluster_id": self.clusters[0].id}), nets, headers=self.default_headers, ) task_uuid = resp.json_body["uuid"] return self.db.query(Task).filter_by(uuid=task_uuid).first() else: raise NotImplementedError("Nothing to verify - try creating cluster")
def test_roles_add_duplicated_to_db_directly(self): self.env.create_release() resp = self.app.get( reverse('ReleaseCollectionHandler'), headers=self.default_headers ) release_json = resp.json_body[0] old_roles = list(release_json["roles"]) role = Role(name=old_roles[0], release_id=release_json["id"]) added = True try: self.db.add(role) self.db.commit() except IntegrityError: self.db.rollback() added = False self.assertFalse(added) resp = self.app.get( reverse('ReleaseCollectionHandler'), headers=self.default_headers ) release_json = resp.json_body[0] new_roles = list(release_json["roles"]) self.assertEqual(old_roles, new_roles)
def test_enable_sriov_failed_with_non_kvm_hypervisor(self): node = self.env.create_node(api=True, roles=['compute']) self.env.create_cluster( api=True, nodes=[node['id']], editable_attributes={ 'common': { 'libvirt_type': { 'value': consts.HYPERVISORS.qemu } } } ) resp = self.app.get( reverse('NodeNICsHandler', kwargs={'node_id': node['id']}), headers=self.default_headers) self.assertEqual(resp.status_code, 200) nics = resp.json_body nics[0]['interface_properties']['sriov']['enabled'] = True resp = self.app.put( reverse("NodeNICsHandler", kwargs={"node_id": node['id']}), jsonutils.dumps(nics), expect_errors=True, headers=self.default_headers) self.assertEqual(resp.status_code, 400) self.assertEqual( "Only KVM hypervisor works with SR-IOV.", resp.json_body['message'] )
def test_openstack_config_put(self): resp = self.app.put(reverse('OpenstackConfigHandler', {'obj_id': self.configs[0].id}), expect_errors=True) self.assertEqual(resp.status_code, 405)
def test_end_passed_correctly_for_cluster(self): self.assert_passed_correctly( reverse('ClusterDeploymentTasksHandler', kwargs={'obj_id': self.cluster.id}), end='task')
def test_start_end_passed_correctly_release(self): self.assert_passed_correctly(reverse( 'ReleaseDeploymentTasksHandler', kwargs={'obj_id': self.cluster.release.id}), end='task', start='another_task')
def test_delete_tasks(self): resp = self.app.delete(reverse('ClusterDeploymentTasksHandler', kwargs={'obj_id': self.cluster.id}), headers=self.default_headers, expect_errors=True) self.assertEqual(resp.status_code, 405)
def delete(self, cluster_id): return self.app.delete( reverse('ClusterHandler', kwargs={'obj_id': cluster_id}), headers=self.default_headers )
def _make_filter_url(cls, **kwargs): return '{0}?{1}'.format(reverse('OpenstackConfigCollectionHandler'), urlparse.urlencode(kwargs))
def test_openstack_config_delete_fail_deploy_running(self): deploy_task_id = self.create_running_deployment_task() resp = self.app.delete(reverse('OpenstackConfigHandler', {'obj_id': self.configs[0].id}), expect_errors=True) self.check_fail_deploy_running(deploy_task_id, resp)
def test_if_cluster_creates_correct_networks(self): release = Release() release.version = "1111-6.0" release.name = u"release_name_" + str(release.version) release.description = u"release_desc" + str(release.version) release.operating_system = "CentOS" release.networks_metadata = self.env.get_default_networks_metadata() release.attributes_metadata = { "editable": { "keystone": { "admin_tenant": "admin" } }, "generated": { "mysql": { "root_password": "" } } } release.vmware_attributes_metadata = {} self.db.add(release) self.db.commit() resp = self.app.post(reverse('ClusterCollectionHandler'), jsonutils.dumps({ 'name': 'cluster-name', 'release': release.id, }), headers=self.default_headers) self.assertEqual(201, resp.status_code) nets = self.db.query(NetworkGroup).filter( not_(NetworkGroup.name == "fuelweb_admin")).all() obtained = [] for net in nets: obtained.append({ 'release': net.release, 'name': net.name, 'vlan_id': net.vlan_start, 'cidr': net.cidr, 'gateway': net.gateway }) expected = [{ 'release': release.id, 'name': u'public', 'vlan_id': None, 'cidr': '172.16.0.0/24', 'gateway': '172.16.0.1' }, { 'release': release.id, 'name': u'fixed', 'vlan_id': None, 'cidr': None, 'gateway': None }, { 'release': release.id, 'name': u'storage', 'vlan_id': 102, 'cidr': '192.168.1.0/24', 'gateway': None }, { 'release': release.id, 'name': u'management', 'vlan_id': 101, 'cidr': '192.168.0.0/24', 'gateway': None }] self.assertItemsEqual(expected, obtained)
def test_check_public_networks(self): cluster = self.cluster self.env.create_nodes( 2, api=True, roles=['controller'], cluster_id=cluster.id) self.env.create_nodes( 2, api=True, roles=['compute'], cluster_id=cluster.id) # we have 3 controllers now self.assertEqual( sum('controller' in n.all_roles for n in self.env.nodes), 3 ) attrs = cluster.attributes.editable self.assertEqual( attrs['public_network_assignment']['assign_to_all_nodes']['value'], False ) self.assertFalse( objects.Cluster.should_assign_public_to_all_nodes(cluster)) resp = self.env.neutron_networks_get(cluster.id) nets = resp.json_body # not enough IPs for 3 nodes and 2 VIPs self.find_net_by_name(nets, 'public')['ip_ranges'] = \ [["172.16.0.2", "172.16.0.5"]] resp = self.env.neutron_networks_put(cluster.id, nets) self.assertEqual(resp.status_code, 200) self.assertRaises( errors.NetworkCheckError, task.CheckBeforeDeploymentTask._check_public_network, self.task) # enough IPs for 3 nodes and 2 VIPs self.find_net_by_name(nets, 'public')['ip_ranges'] = \ [["172.16.0.2", "172.16.0.6"]] resp = self.env.neutron_networks_put(cluster.id, nets) self.assertEqual(resp.status_code, 200) self.assertNotRaises( errors.NetworkCheckError, task.CheckBeforeDeploymentTask._check_public_network, self.task) attrs['public_network_assignment']['assign_to_all_nodes']['value'] = \ True resp = self.app.patch( reverse( 'ClusterAttributesHandler', kwargs={'cluster_id': cluster.id}), params=jsonutils.dumps({'editable': attrs}), headers=self.default_headers ) self.assertEqual(200, resp.status_code) self.assertTrue( objects.Cluster.should_assign_public_to_all_nodes(cluster)) self.assertRaises( errors.NetworkCheckError, task.CheckBeforeDeploymentTask._check_public_network, self.task)
def test_cluster_list_empty(self): resp = self.app.get(reverse('ClusterCollectionHandler'), headers=self.default_headers) self.assertEqual(200, resp.status_code) self.assertEqual([], resp.json_body)
def delete_handler(self, handler_name, handler_kwargs={}): resp = self.app.delete(reverse(handler_name, kwargs=handler_kwargs), headers=self.default_headers) self.assertIn(resp.status_code, (200, 202, 204)) return resp
def test_graph_update(self): resp = self.app.put(reverse('DeploymentGraphHandler', kwargs={'obj_id': self.custom_graph.id}), jsonutils.dumps({ 'name': 'updated-graph-name', 'node_filter': '$.status != "new"', 'tasks': [{ 'id': 'test-task2', 'type': 'puppet', 'version': '2.0.0' }] }), headers=self.default_headers) self.assertEqual(200, resp.status_code) self.assertEqual( { 'name': 'updated-graph-name', 'node_filter': '$.status != "new"', 'tasks': [{ 'id': 'test-task2', 'type': 'puppet', 'task_name': 'test-task2', 'version': '2.0.0' }], 'relations': [{ 'model': 'cluster', 'model_id': self.cluster.id, 'type': 'custom-graph' }], 'id': self.custom_graph.id }, resp.json_body) resp = self.app.patch(reverse('DeploymentGraphHandler', kwargs={'obj_id': self.custom_graph.id}), jsonutils.dumps({ 'name': 'updated-graph-name2', 'on_stop': {} }), headers=self.default_headers) self.assertEqual(200, resp.status_code) self.assertEqual( { 'name': 'updated-graph-name2', 'node_filter': '$.status != "new"', 'on_stop': {}, 'tasks': [{ 'id': 'test-task2', 'type': 'puppet', 'task_name': 'test-task2', 'version': '2.0.0' }], 'relations': [{ 'model': 'cluster', 'model_id': self.cluster.id, 'type': 'custom-graph' }], 'id': self.custom_graph.id }, resp.json_body)
def patch_handler(self, handler_name, request_params, handler_kwargs={}): resp = self.app.patch(reverse(handler_name, kwargs=handler_kwargs), params=jsonutils.dumps(request_params), headers=self.default_headers) self.assertIn(resp.status_code, (200, 202)) return resp
def test_wrong_cluster(self): resp = self.app.get(reverse(self.handler_name, kwargs={'cluster_id': 99999}), headers=self.default_headers, expect_errors=True) self.assertEqual(404, resp.status_code)
def test_neutron_assignment_when_network_cfg_changed_then_node_added(self): cluster = self.env.create_cluster(api=True, net_provider='neutron', net_segment_type='vlan') resp = self.env.neutron_networks_get(cluster['id']) nets = resp.json_body for net in nets['networks']: if net['name'] == 'management': net['vlan_start'] = None resp = self.env.neutron_networks_put(cluster['id'], nets) self.assertEqual(resp.status_code, 200) task = resp.json_body self.assertEqual(task['status'], consts.TASK_STATUSES.ready) mac = self.env.generate_random_mac() meta = self.env.default_metadata() self.env.set_interfaces_in_meta( meta, [{ 'name': 'eth0', 'mac': mac }, { 'name': 'eth1', 'mac': self.env.generate_random_mac() }, { 'name': 'eth2', 'mac': self.env.generate_random_mac() }]) node = self.env.create_node(api=True, meta=meta, mac=mac) resp = self.app.put(reverse('NodeCollectionHandler'), jsonutils.dumps([{ 'id': node['id'], 'cluster_id': cluster['id'] }]), headers=self.default_headers) self.assertEqual(resp.status_code, 200) resp = self.app.get(reverse('NodeNICsHandler', kwargs={'node_id': node['id']}), headers=self.default_headers) self.assertEqual(resp.status_code, 200) response = resp.json_body net_name_per_nic = [['fuelweb_admin', 'storage', 'private'], ['public'], ['management']] for i, nic in enumerate(sorted(response, key=lambda x: x['name'])): net_names = set([net['name'] for net in nic['assigned_networks']]) self.assertEqual(set(net_name_per_nic[i]), net_names) for net in nets['networks']: if net['name'] == 'public': net['vlan_start'] = 111 if net['name'] == 'management': net['vlan_start'] = 112 resp = self.env.neutron_networks_put(cluster['id'], nets) self.assertEqual(resp.status_code, 200) task = resp.json_body self.assertEqual(task['status'], consts.TASK_STATUSES.ready) mac = self.env.generate_random_mac() meta = self.env.default_metadata() self.env.set_interfaces_in_meta( meta, [{ 'name': 'eth0', 'mac': mac }, { 'name': 'eth1', 'mac': self.env.generate_random_mac() }, { 'name': 'eth2', 'mac': self.env.generate_random_mac() }]) node = self.env.create_node(api=True, meta=meta, mac=mac) resp = self.app.put(reverse('NodeCollectionHandler'), jsonutils.dumps([{ 'id': node['id'], 'cluster_id': cluster['id'] }]), headers=self.default_headers) self.assertEqual(resp.status_code, 200) resp = self.app.get(reverse('NodeNICsHandler', kwargs={'node_id': node['id']}), headers=self.default_headers) self.assertEqual(resp.status_code, 200) response = resp.json_body net_name_per_nic = [[ 'fuelweb_admin', 'storage', 'public', 'management', 'private' ], [], []] for i, nic in enumerate(sorted(response, key=lambda x: x['name'])): net_names = set([net['name'] for net in nic['assigned_networks']]) self.assertEqual(set(net_name_per_nic[i]), net_names)
def post_handler(self, handler_name, obj_data, handler_kwargs={}): resp = self.app.post(reverse(handler_name, kwargs=handler_kwargs), jsonutils.dumps(obj_data), headers=self.default_headers) self.assertIn(resp.status_code, (200, 201)) return resp
def make_action_url(self, handler_name, node_uids): return reverse( handler_name, kwargs={'cluster_id': self.cluster.id}) + \ make_query(nodes=node_uids)
def get_handler(self, handler_name, handler_kwargs={}): resp = self.app.get(reverse(handler_name, kwargs=handler_kwargs), headers=self.default_headers) self.assertEqual(200, resp.status_code) return resp
def test_deletion_clusters_one_by_one(self): self.env.create(nodes_kwargs=[ { "roles": ["compute"], "status": "ready", "progress": 100 }, { "roles": ["compute"], "status": "ready", "progress": 100 }, { "roles": ["compute"], "status": "ready", "progress": 100 }, { "roles": ["controller"], "status": "ready", "progress": 100 }, { "roles": ["controller"], "status": "ready", "progress": 100 }, { "roles": ["cinder"], "status": "ready", "progress": 100 }, ]) cluster1_id = self.env.clusters[0].id self.env.create_cluster(api=True) cluster2_id = self.env.clusters[1].id cluster_names = [cluster.name for cluster in self.env.clusters] resp = self.app.delete(reverse('ClusterHandler', kwargs={'obj_id': cluster1_id}), headers=self.default_headers) self.assertEqual(202, resp.status_code) resp = self.app.delete(reverse('ClusterHandler', kwargs={'obj_id': cluster2_id}), headers=self.default_headers) self.assertEqual(202, resp.status_code) timer = time.time() timeout = 15 clstr1 = self.db.query(models.Cluster).get(cluster1_id) clstr2 = self.db.query(models.Cluster).get(cluster2_id) while clstr1 or clstr2: time.sleep(1) try: self.db.refresh(clstr1 or clstr2) except Exception: break if time.time() - timer > timeout: raise Exception("Cluster deletion seems to be hanged") for name in cluster_names: notification = self.db.query(models.Notification)\ .filter(models.Notification.topic == "done")\ .filter(models.Notification.message == "Environment '%s' and " "all its nodes are deleted" % name) self.assertIsNotNone(notification) tasks = self.db.query(models.Task).all() self.assertEqual(tasks, [])
def get_serialized_tasks(self, cluster_id, **kwargs): uri = reverse( "SerializedTasksHandler", kwargs={'cluster_id': cluster_id}) + \ make_query(**kwargs) return self.app.get(uri, expect_errors=True)
def test_put_handler_with_one_node(self): cluster = self.env.create_cluster(api=True) mac = self.env.generate_random_mac() meta = {} self.env.set_interfaces_in_meta( meta, [{ 'name': 'eth0', 'mac': mac, 'pxe': True }, { 'name': 'eth1', 'mac': self.env.generate_random_mac() }]) node = self.env.create_node(api=True, meta=meta, mac=mac, cluster_id=cluster['id']) resp_get = self.app.get(reverse('NodeNICsHandler', kwargs={'node_id': node['id']}), headers=self.default_headers) self.assertEqual(resp_get.status_code, 200) a_nets = filter(lambda nic: nic['mac'] == mac, resp_get.json_body)[0]['assigned_networks'] for resp_nic in resp_get.json_body: if resp_nic['mac'] == mac: resp_nic['assigned_networks'] = [] else: resp_nic['assigned_networks'].extend(a_nets) resp_nic['assigned_networks'].sort() nodes_list = [{'id': node['id'], 'interfaces': resp_get.json_body}] resp_put = self.app.put(reverse('NodeCollectionNICsHandler'), jsonutils.dumps(nodes_list), headers=self.default_headers) self.assertEqual(resp_put.status_code, 200) self.assertEqual(len(resp_put.json_body), 1) resp_data = resp_put.json_body[0] self.assertEqual(resp_data['id'], nodes_list[0]['id']) def id_key(elem): return elem['id'] for resp_iface, node_iface in zip( sorted(resp_data['interfaces'], key=id_key), sorted(nodes_list[0]['interfaces'], key=id_key)): # Assert that the dicts are equal, except for assigned_networks # which is more complex structure and requires other checks. self.datadiff(resp_iface, node_iface, ignore_keys='assigned_networks') for resp_net, node_net in zip( sorted(resp_iface['assigned_networks'], key=id_key), sorted(node_iface['assigned_networks'], key=id_key)): self.assertDictEqual(resp_net, node_net)
def _get_capacity_log_json(self): resp = self.app.get( reverse('CapacityLogHandler'), headers=self.default_headers ) return resp.json_body
def test_NIC_change_offloading_modes(self): meta = self.env.default_metadata() meta["interfaces"] = [] node = self.env.create_node(api=True, meta=meta) new_meta = self.env.default_metadata() self.env.set_interfaces_in_meta(new_meta, [ {'name': 'new_nic', 'mac': '00:00:00:00:00:00', 'offloading_modes': [ { 'name': 'mode_1', 'state': None, "sub": [] }, { 'name': 'mode_2', 'state': None, "sub": [] }, { 'name': 'mode_3', 'state': None, "sub": [] } ]}]) node_data = {'mac': node['mac'], 'meta': new_meta} resp = self.app.put( reverse('NodeAgentHandler'), jsonutils.dumps(node_data), headers=self.default_headers) self.assertEqual(resp.status_code, 200) resp = self.app.get( reverse('NodeNICsHandler', kwargs={'node_id': node['id']}), headers=self.default_headers) self.assertEqual(resp.status_code, 200) self.assertEqual(len(resp.json_body), 1) resp_nic = resp.json_body[0] nic = new_meta['interfaces'][0] self.assertEqual(resp_nic['offloading_modes'], nic['offloading_modes']) resp = self.app.get( reverse('NodeCollectionHandler', kwargs={'node_id': node['id']}), headers=self.default_headers) self.assertEqual(resp.status_code, 200) self.assertEqual(len(resp.json_body), 1) resp_node = resp.json_body[0] new_nic = { 'name': 'new_nic', 'mac': '00:00:00:00:00:00', 'offloading_modes': [ { 'name': 'mode_1', 'state': True, "sub": [] }, { 'name': 'mode_2', 'state': False, "sub": [] }, { 'name': 'mode_3', 'state': None, "sub": [] } ] } self.env.set_interfaces_in_meta(resp_node["meta"], [ new_nic]) resp_node.pop('group_id') resp = self.app.put( reverse('NodeCollectionHandler'), jsonutils.dumps([resp_node]), headers=self.default_headers) self.assertEqual(resp.status_code, 200) resp = self.app.get( reverse('NodeNICsHandler', kwargs={'node_id': node['id']}), headers=self.default_headers) self.assertEqual(resp.status_code, 200) self.assertEqual(len(resp.json_body), 1) resp_nic = resp.json_body[0] self.assertEqual( resp_nic['offloading_modes'], new_nic['offloading_modes'])
def update_node_roles(roles): resp = self.app.put( reverse('NodeCollectionHandler'), jsonutils.dumps([{'id': node_db.id, 'pending_roles': roles}]), headers=self.default_headers) self.assertEqual(200, resp.status_code)
def test_nic_mac_swap(self): mac_eth0 = '00:11:22:dd:ee:ff' mac_eth1 = 'aa:bb:cc:33:44:55' eth0 = { 'name': 'eth0', 'mac': mac_eth0, 'current_speed': 1, 'state': 'up', 'pxe': True } eth1 = { 'name': 'eth1', 'mac': mac_eth1, 'current_speed': 1, 'state': 'up', 'pxe': False } # prepare metadata with our interfaces meta = self.env.default_metadata() self.env.set_interfaces_in_meta(meta, [eth0, eth1]) # NOTE(prmtl) hack to have all mac set as we want # crete_node() will generate random mac for 1st iface # if we will not set it like that node_mac = meta['interfaces'][0]['mac'] node = self.env.create_node(api=True, meta=meta, mac=node_mac) self.env.create_cluster(api=True, nodes=[node['id']]) resp = self.app.get( reverse('NodeNICsHandler', kwargs={'node_id': node['id']}), headers=self.default_headers) original_nic_info = resp.json # swap macs, make them uppercase to check that we handle that correctly eth0['mac'], eth1['mac'] = eth1['mac'].upper(), eth0['mac'].upper() # update nodes with swapped macs new_meta = self.env.default_metadata() self.env.set_interfaces_in_meta(new_meta, [eth0, eth1]) node_data = {'mac': node['mac'], 'meta': new_meta} self.app.put( reverse('NodeAgentHandler'), jsonutils.dumps(node_data), headers=self.default_headers) # check that networks are assigned to the same interfaces resp = self.app.get( reverse('NodeNICsHandler', kwargs={'node_id': node['id']}), headers=self.default_headers) updated_nic_info = resp.json for orig_iface in original_nic_info: updated_iface = next( iface for iface in updated_nic_info if iface['mac'] == orig_iface['mac']) self.assertEqual( orig_iface['assigned_networks'], orig_iface['assigned_networks']) # nic names were swapped self.assertNotEqual(orig_iface['name'], updated_iface['name'])
def test_remove_assigned_interface(self): def get_nodes(): resp = self.app.get( reverse('NodeCollectionHandler', kwargs={'cluster_id': self.env.clusters[0].id}), headers=self.default_headers, ) return resp.json_body self.env.create(nodes_kwargs=[{'api': True}]) # check all possible handlers for handler in ('NodeAgentHandler', 'NodeHandler', 'NodeCollectionHandler'): # create node and check it availability nodes_data = get_nodes() self.assertEqual(len(nodes_data), 1) node_db = objects.Node.get_by_uid(nodes_data[0]['id']) # remove all interfaces except admin one adm_eth = self.env.network_manager._get_interface_by_network_name( node_db, 'fuelweb_admin') ifaces = list(nodes_data[0]['meta']['interfaces']) nodes_data[0]['meta']['interfaces'] = \ [i for i in ifaces if i['name'] == adm_eth.name] # prepare put request data = { 'id': nodes_data[0]['id'], 'meta': nodes_data[0]['meta'], } if handler in ('NodeCollectionHandler', ): data = [data] if handler in ('NodeHandler', ): endpoint = reverse(handler, kwargs={'obj_id': data['id']}) else: endpoint = reverse(handler) self.app.put( endpoint, jsonutils.dumps(data), headers=self.default_headers, ) # check the node is visible for api nodes_data = get_nodes() self.assertEqual(len(nodes_data), 1) self.assertEqual(len(nodes_data[0]['meta']['interfaces']), 1) # restore removed interfaces nodes_data[0]['meta']['interfaces'] = ifaces self.app.put( reverse( 'NodeAgentHandler', ), jsonutils.dumps({ 'id': nodes_data[0]['id'], 'meta': nodes_data[0]['meta'], }), headers=self.default_headers, ) # check node availability nodes_data = get_nodes() self.assertEqual(len(nodes_data), 1) self.assertItemsEqual(nodes_data[0]['meta']['interfaces'], ifaces)
def node_name_test(mac): self.env.create_node(api=True, **{'mac': mac}) node = self.app.get(reverse('NodeCollectionHandler')).json_body[0] self.assertEqual(node['name'], 'Untitled ({0})'.format(node_mac[-5:]))
def test_get_handler_with_wrong_nodeid(self): resp = self.app.get( reverse('NodeNICsHandler', kwargs={'node_id': 1}), expect_errors=True, headers=self.default_headers) self.assertEqual(resp.status_code, 404)
def test_incremental_older_fetch(self): """Older entries should be fetched incrementally.""" log_entries = [ [ time.strftime(settings.UI_LOG_DATE_FORMAT), 'LEVEL111', 'text1', ], [ time.strftime(settings.UI_LOG_DATE_FORMAT), 'LEVEL222', 'text2', ], [ time.strftime(settings.UI_LOG_DATE_FORMAT), 'LEVEL333', 'text3', ], ] self.env.create_cluster(api=False) self._create_logfile_for_node(settings.LOGS[0], log_entries) total_len = len(''.join(map(self._format_log_entry, log_entries))) resp = self.app.get( reverse('LogEntryCollectionHandler'), params={ 'max_entries': 1, 'source': settings.LOGS[0]['id'], }, headers=self.default_headers ) self.assertEqual(200, resp.status_code) response = resp.json_body self.assertEqual(response['entries'], [log_entries[2]]) self.assertTrue(response['has_more']) self.assertEqual(response['to'], total_len) self.assertEqual( response['from'], total_len - len(self._format_log_entry(log_entries[2]))) resp = self.app.get( reverse('LogEntryCollectionHandler'), params={ 'fetch_older': True, 'from': response['from'], 'to': response['to'], 'max_entries': 1, 'source': settings.LOGS[0]['id'], }, headers=self.default_headers ) self.assertEqual(200, resp.status_code) response = resp.json_body self.assertEqual(response['entries'], [log_entries[1]]) self.assertTrue(response['has_more']) self.assertEqual(response['to'], total_len) self.assertEqual( response['from'], total_len - len(self._format_log_entry(log_entries[2])) - len(self._format_log_entry(log_entries[1]))) # Normal, forward fetch shouldn't affect from and to resp = self.app.get( reverse('LogEntryCollectionHandler'), params={ 'fetch_older': True, 'from': response['from'], 'to': response['to'], 'max_entries': 1, 'source': settings.LOGS[0]['id'], }, headers=self.default_headers ) self.assertEqual(200, resp.status_code) response = resp.json_body self.assertEqual(response['entries'], [log_entries[0]]) self.assertFalse(response['has_more']) self.assertEqual(response['to'], total_len) self.assertEqual(response['from'], 0)
def test_node_update_empty_mac_or_id(self): node = self.env.create_node(api=False) resp = self.app.put( reverse('NodeCollectionHandler'), jsonutils.dumps([{'manufacturer': 'man0'}]), headers=self.default_headers, expect_errors=True) self.assertEqual(resp.status_code, 400) self.assertEqual( resp.json_body["message"], "Neither MAC nor ID is specified" ) resp = self.app.put( reverse('NodeCollectionHandler'), jsonutils.dumps([{'id': node.id, 'mac': None, 'manufacturer': 'man4'}]), headers=self.default_headers, expect_errors=True) self.assertEqual(resp.status_code, 400) self.assertIn( "schema['properties']['mac']", resp.json_body["message"] ) self.assertIn( "None is not of type 'string'", resp.json_body["message"] ) resp = self.app.put( reverse('NodeCollectionHandler'), jsonutils.dumps([{'mac': node.mac, 'manufacturer': 'man5'}]), headers=self.default_headers ) self.assertEqual(resp.status_code, 200) resp = self.app.put( reverse('NodeCollectionHandler'), jsonutils.dumps([{'id': node.id, 'manufacturer': 'man6'}]), headers=self.default_headers ) self.assertEqual(resp.status_code, 200) resp = self.app.put( reverse('NodeCollectionHandler'), jsonutils.dumps([{'mac': node.mac, 'manufacturer': 'man7'}]), headers=self.default_headers) self.assertEqual(resp.status_code, 200) resp = self.app.put( reverse('NodeCollectionHandler'), jsonutils.dumps([{'id': node.id, 'mac': node.mac, 'manufacturer': 'man8'}]), headers=self.default_headers) self.assertEqual(resp.status_code, 200)