def PUT(self, cluster_id): data = jsonutils.loads(web.data()) cluster = self.get_object_or_404(objects.Cluster, cluster_id) self.check_net_provider(cluster) self.check_if_network_configuration_locked(cluster) task_manager = CheckNetworksTaskManager(cluster_id=cluster.id) task = task_manager.execute(data) if task.status != consts.TASK_STATUSES.error: try: if "networks" in data: self.validator.validate_networks_update(jsonutils.dumps(data)) if "networking_parameters" in data: self.validator.validate_neutron_params(jsonutils.dumps(data), cluster_id=cluster_id) objects.Cluster.get_network_manager(cluster).update(cluster, data) except Exception as exc: # set task status to error and update its corresponding data data = {"status": "error", "progress": 100, "message": six.text_type(exc)} objects.Task.update(task, data) logger.error(traceback.format_exc()) raise self.http(202, objects.Task.to_json(task))
def test_cluster_node_list_update(self): node1 = self.env.create_node(api=False) node2 = self.env.create_node(api=False) cluster = self.env.create_cluster(api=False) resp = self.app.put( reverse('ClusterHandler', kwargs={'obj_id': cluster.id}), jsonutils.dumps({'nodes': [node1.id]}), headers=self.default_headers, expect_errors=True ) self.assertEqual(resp.status_code, 200) nodes = self.db.query(Node).filter(Node.cluster == cluster).all() self.assertEqual(1, len(nodes)) self.assertEqual(nodes[0].id, node1.id) resp = self.app.put( reverse('ClusterHandler', kwargs={'obj_id': cluster.id}), jsonutils.dumps({'nodes': [node2.id]}), headers=self.default_headers ) self.assertEqual(resp.status_code, 200) nodes = self.db.query(Node).filter(Node.cluster == cluster) self.assertEqual(1, nodes.count())
def PUT(self, cluster_id): """:returns: JSONized Task object. :http: * 202 (network checking task created) * 404 (cluster not found in db) """ data = jsonutils.loads(web.data()) if data.get("networks"): data["networks"] = [n for n in data["networks"] if n.get("name") != "fuelweb_admin"] cluster = self.get_object_or_404(objects.Cluster, cluster_id) self.check_net_provider(cluster) self.check_if_network_configuration_locked(cluster) task_manager = CheckNetworksTaskManager(cluster_id=cluster.id) task = task_manager.execute(data) if task.status != consts.TASK_STATUSES.error: try: if "networks" in data: self.validator.validate_networks_update(jsonutils.dumps(data)) if "dns_nameservers" in data: self.validator.validate_dns_servers_update(jsonutils.dumps(data)) objects.Cluster.get_network_manager(cluster).update(cluster, data) except Exception as exc: # set task status to error and update its corresponding data data = {"status": consts.TASK_STATUSES.error, "progress": 100, "message": six.text_type(exc)} objects.Task.update(task, data) logger.error(traceback.format_exc()) raise self.http(202, objects.Task.to_json(task))
def test_node_timestamp_updated_only_by_agent(self): node = self.env.create_node(api=False) timestamp = node.timestamp resp = self.app.put( reverse('NodeCollectionHandler'), jsonutils.dumps([ {'mac': node.mac, 'status': 'discover', 'manufacturer': 'old'} ]), headers=self.default_headers) self.assertEqual(resp.status_code, 200) node = self.db.query(Node).get(node.id) self.assertEqual(node.timestamp, timestamp) resp = self.app.put( reverse('NodeAgentHandler'), jsonutils.dumps( {'mac': node.mac, 'status': 'discover', 'manufacturer': 'new'} ), headers=self.default_headers) self.assertEqual(resp.status_code, 200) node = self.db.query(Node).get(node.id) self.assertNotEqual(node.timestamp, timestamp) self.assertEqual('new', node.manufacturer)
def upgrade_releases(connection): select = text( """SELECT id, attributes_metadata, roles_metadata from releases""" ) update = text( """UPDATE releases SET attributes_metadata = :attrs, roles_metadata = :roles, wizard_metadata = :wiz_meta WHERE id = :id""" ) r = connection.execute(select) # reading fixture files in loop is in general a bad idea and as long as # wizard_metadata is the same for all existing releases getting it can # be moved outside of the loop wizard_meta = upgrade_release_wizard_metadata_50_to_51() for release in r: attrs_meta = upgrade_release_attributes_50_to_51(jsonutils.loads(release[1])) roles_meta = upgrade_release_roles_50_to_51(jsonutils.loads(release[2])) connection.execute( update, id=release[0], attrs=jsonutils.dumps(attrs_meta), roles=jsonutils.dumps(roles_meta), wiz_meta=jsonutils.dumps(wizard_meta), )
def test_stats_sending_enabled(self): self.assertEqual(StatsSender().must_send_stats(), False) resp = self.app.get( reverse("MasterNodeSettingsHandler"), headers=self.default_headers) self.assertEqual(200, resp.status_code) data = resp.json_body # emulate user confirmed settings in UI data["settings"]["statistics"]["user_choice_saved"]["value"] = True resp = self.app.put( reverse("MasterNodeSettingsHandler"), headers=self.default_headers, params=jsonutils.dumps(data) ) self.assertEqual(200, resp.status_code) self.assertEqual(StatsSender().must_send_stats(), True) # emulate user disabled statistics sending data["settings"]["statistics"]["send_anonymous_statistic"]["value"] = \ False resp = self.app.put( reverse("MasterNodeSettingsHandler"), headers=self.default_headers, params=jsonutils.dumps(data) ) self.assertEqual(200, resp.status_code) self.assertEqual(StatsSender().must_send_stats(), False)
def test_node_agent_api(self): self.env.create_node( api=False, status='provisioning', meta=self.env.default_metadata() ) node_db = self.env.nodes[0] resp = self.app.put( reverse('NodeAgentHandler'), jsonutils.dumps( {'mac': node_db.mac, 'status': 'discover', 'manufacturer': 'new'} ), headers=self.default_headers ) self.assertEqual(resp.status_code, 200) node_id = '080000000003' resp = self.app.post( reverse('NodeCollectionHandler'), jsonutils.dumps({'id': node_id, 'mac': self.env.generate_random_mac(), 'status': 'discover'}), headers=self.default_headers) self.assertEqual(201, resp.status_code)
def cast(name, message, service=False): logger.debug( "RPC cast to orchestrator:\n{0}".format( jsonutils.dumps(message, indent=4) ) ) #测试使用 file_object = open('/opt/queuemsg.txt', 'w') file_object.write(jsonutils.dumps(message, indent=4)) file_object.close() use_queue = naily_queue if not service else naily_service_queue use_exchange = naily_exchange if not service else naily_service_exchange with Connection(conn_str) as conn: with conn.Producer(serializer='json') as producer: publish = functools.partial(producer.publish, message, exchange=use_exchange, routing_key=name, declare=[use_queue]) try: #pass publish() except amqp_exceptions.PreconditionFailed as e: logger.warning(six.text_type(e)) # (dshulyak) we should drop both exchanges/queues in order # for astute to be able to recover temporary queues utils.delete_entities( conn, naily_service_exchange, naily_service_queue, naily_exchange, naily_queue) publish()
def test_unassignment(self): cluster = self.env.create( cluster_kwargs={"api": True}, nodes_kwargs=[{}] ) node = self.env.nodes[0] # correct unassignment resp = self.app.post( reverse( 'NodeUnassignmentHandler', kwargs={'cluster_id': cluster['id']} ), jsonutils.dumps([{'id': node.id}]), headers=self.default_headers ) self.assertEqual(200, resp.status_code) self.assertEqual(node.cluster, None) self.assertEqual(node.pending_roles, []) #Test with invalid node ids for node_id in (0, node.id + 50): resp = self.app.post( reverse( 'NodeUnassignmentHandler', kwargs={'cluster_id': cluster['id']} ), jsonutils.dumps([{'id': node_id}]), headers=self.default_headers, expect_errors=True ) self.assertEqual(400, resp.status_code) #Test with invalid cluster id resp = self.app.post( reverse( 'NodeUnassignmentHandler', kwargs={'cluster_id': cluster['id'] + 5} ), jsonutils.dumps([{'id': node.id}]), headers=self.default_headers, expect_errors=True ) self.assertEqual(resp.status_code, 404) # Test with wrong cluster id self.env.create( cluster_kwargs={"api": True}, nodes_kwargs=[{}] ) resp = self.app.post( reverse( 'NodeUnassignmentHandler', kwargs={'cluster_id': cluster['id']} ), jsonutils.dumps([{'id': self.env.clusters[1].nodes[0].id}]), headers=self.default_headers, expect_errors=True ) self.assertEqual(resp.status_code, 400)
def test_attributes_update_put(self): cluster_id = self.env.create_cluster(api=True)['id'] cluster_db = self.env.clusters[0] resp = self.app.get( reverse( 'ClusterAttributesHandler', kwargs={'cluster_id': cluster_id}), headers=self.default_headers ) self.assertEqual(200, resp.status_code) resp = self.app.put( reverse( 'ClusterAttributesHandler', kwargs={'cluster_id': cluster_id}), params=jsonutils.dumps({ 'editable': { "foo": "bar" }, }), headers=self.default_headers ) self.assertEqual(200, resp.status_code) attrs = objects.Cluster.get_attributes(cluster_db) self.assertEqual("bar", attrs.editable["foo"]) attrs.editable.pop('foo') self.assertEqual(attrs.editable, {}) # 400 on generated update resp = self.app.put( reverse( 'ClusterAttributesHandler', kwargs={'cluster_id': cluster_id}), params=jsonutils.dumps({ 'generated': { "foo": "bar" }, }), headers=self.default_headers, expect_errors=True ) self.assertEqual(400, resp.status_code) # 400 if editable is not dict resp = self.app.put( reverse( 'ClusterAttributesHandler', kwargs={'cluster_id': cluster_id}), params=jsonutils.dumps({ 'editable': ["foo", "bar"], }), headers=self.default_headers, expect_errors=True ) self.assertEqual(400, resp.status_code)
def GET(self): """Returns Fuel Key data :returns: base64 of FUEL commit SHA, release version and Fuel UUID. :http: * 200 (OK) """ key_data = { "sha": str(settings.VERSION['nailgun_sha']), "release": str(settings.VERSION['release']), "uuid": str(settings.FUEL_KEY) } signature = base64.b64encode(jsonutils.dumps(key_data)) key_data["signature"] = signature return {"key": base64.b64encode(jsonutils.dumps(key_data))}
def test_version_handler(self): resp = self.app.get(reverse("FuelKeyHandler"), headers=self.default_headers) fuel_release = "0.1" key_data = {"sha": "12345", "release": fuel_release, "uuid": "uuid"} signature = base64.b64encode(jsonutils.dumps(key_data)) key_data["signature"] = signature self.assertEqual(200, resp.status_code) response = resp.json_body self.assertEqual(response, {"key": base64.b64encode(jsonutils.dumps(key_data))}) resp_data = jsonutils.loads(base64.b64decode(response["key"])) self.assertEqual(resp_data["release"], fuel_release)
def test_remove_assigned_interface(self): def get_nodes(): resp = self.app.get( reverse("NodeCollectionHandler", kwargs={"cluster_id": self.env.clusters[0].id}), headers=self.default_headers, ) return jsonutils.loads(resp.body) self.env.create(nodes_kwargs=[{"api": True}]) # check all possible handlers for handler in ("NodeAgentHandler", "NodeHandler", "NodeCollectionHandler"): # create node and check it availability nodes_data = get_nodes() self.assertEqual(len(nodes_data), 1) # remove all interfaces except admin one adm_eth = self.env.network_manager._get_interface_by_network_name(nodes_data[0]["id"], "fuelweb_admin") ifaces = list(nodes_data[0]["meta"]["interfaces"]) nodes_data[0]["meta"]["interfaces"] = [i for i in ifaces if i["name"] == adm_eth.name] # prepare put request data = {"id": nodes_data[0]["id"], "meta": nodes_data[0]["meta"]} if handler in ("NodeCollectionHandler",): data = [data] if handler in ("NodeHandler",): endpoint = reverse(handler, kwargs={"obj_id": data["id"]}) else: endpoint = reverse(handler) self.app.put(endpoint, jsonutils.dumps(data), headers=self.default_headers) # check the node is visible for api nodes_data = get_nodes() self.assertEqual(len(nodes_data), 1) self.assertEqual(len(nodes_data[0]["meta"]["interfaces"]), 1) # restore removed interfaces nodes_data[0]["meta"]["interfaces"] = ifaces self.app.put( reverse("NodeAgentHandler"), jsonutils.dumps({"id": nodes_data[0]["id"], "meta": nodes_data[0]["meta"]}), headers=self.default_headers, ) # check node availability nodes_data = get_nodes() self.assertEqual(len(nodes_data), 1) self.assertItemsEqual(nodes_data[0]["meta"]["interfaces"], ifaces)
def PUT(self, cluster_id): """:returns: JSONized Task object. :http: * 202 (network checking task created) * 404 (cluster not found in db) """ data = jsonutils.loads(web.data()) if data.get("networks"): data["networks"] = [ n for n in data["networks"] if n.get("name") != "fuelweb_admin" ] cluster = self.get_object_or_404(Cluster, cluster_id) self.check_net_provider(cluster) self.check_if_network_configuration_locked(cluster) task_manager = CheckNetworksTaskManager(cluster_id=cluster.id) task = task_manager.execute(data) if task.status != 'error': try: if 'networks' in data: self.validator.validate_networks_update( jsonutils.dumps(data) ) if 'dns_nameservers' in data: self.validator.validate_dns_servers_update( jsonutils.dumps(data) ) objects.Cluster.get_network_manager( cluster ).update(cluster, data) except Exception as exc: # set task status to error and update its corresponding data data = {'status': 'error', 'progress': 100, 'message': six.text_type(exc)} objects.Task.update(task, data) logger.error(traceback.format_exc()) #TODO(enchantner): research this behaviour if task.status == 'error': db().rollback() else: db().commit() raise self.http(202, Task.to_json(task))
def test_node_create_mac_validation(self): # entry format: (mac_address, http_response_code) maccaddresses = ( # invalid macaddresses ('60a44c3528ff', 400), ('60:a4:4c:35:28', 400), ('60:a4:4c:35:28:fg', 400), ('76:DC:7C:CA:G4:75', 400), ('76-DC-7C-CA-G4-75', 400), # valid macaddresses ('60:a4:4c:35:28:ff', 201), ('48-2C-6A-1E-59-3D', 201), ) for mac, http_code in maccaddresses: response = self.app.post( reverse('NodeCollectionHandler'), jsonutils.dumps({ 'mac': mac, 'status': 'discover', }), headers=self.default_headers, expect_errors=(http_code != 201) ) self.assertEqual(response.status_code, http_code)
def test_put_handler_with_one_node(self): cluster = self.env.create_cluster(api=True) mac = self.env.generate_random_mac() meta = {} self.env.set_interfaces_in_meta(meta, [ {'name': 'eth0', 'mac': mac}, {'name': 'eth1', 'mac': self.env.generate_random_mac()}]) node = self.env.create_node(api=True, meta=meta, mac=mac, cluster_id=cluster['id']) resp_get = self.app.get( reverse('NodeNICsHandler', kwargs={'node_id': node['id']}), headers=self.default_headers) self.assertEqual(resp_get.status_code, 200) a_nets = filter(lambda nic: nic['mac'] == mac, resp_get.json_body)[0]['assigned_networks'] for resp_nic in resp_get.json_body: if resp_nic['mac'] == mac: resp_nic['assigned_networks'] = [] else: resp_nic['assigned_networks'].extend(a_nets) resp_nic['assigned_networks'].sort() nodes_list = [{'id': node['id'], 'interfaces': resp_get.json_body}] resp_put = self.app.put( reverse('NodeCollectionNICsHandler'), jsonutils.dumps(nodes_list), headers=self.default_headers) self.assertEqual(resp_put.status_code, 200) self.assertEqual(resp_put.json_body, nodes_list)
def test_reset_node_pending_statuses(self): self.env.create( cluster_kwargs={}, nodes_kwargs=[ {"pending_addition": True}, ] ) cluster_db = self.env.clusters[0] node_db = self.env.nodes[0] # deploy environment deploy_task = self.env.launch_deployment() self.env.wait_ready(deploy_task, 60) # mark node as pending_deletion self.app.put( reverse('NodeCollectionHandler'), jsonutils.dumps([{ 'id': node_db.id, 'cluster_id': cluster_db.id, 'pending_deletion': True, }]), headers=self.default_headers ) # reset environment reset_task = self.env.reset_environment() self.env.wait_ready(reset_task, 60) # check node statuses self.env.refresh_nodes() self.assertEqual(node_db.pending_addition, True) self.assertEqual(node_db.pending_deletion, False)
def test_node_update_ext_mac(self): meta = self.env.default_metadata() node1 = self.env.create_node( api=False, mac=meta["interfaces"][0]["mac"], meta={} ) node1_json = { "mac": self.env.generate_random_mac(), "meta": meta } # We want to be sure that new mac is not equal to old one self.assertNotEqual(node1.mac, node1_json["mac"]) # Here we are trying to update node resp = self.app.put( reverse('NodeCollectionHandler'), jsonutils.dumps([node1_json]), headers=self.default_headers, expect_errors=True ) self.assertEqual(resp.status_code, 200) # Here we are checking if node mac is successfully updated self.assertEqual(node1_json["mac"], resp.json_body[0]["mac"]) self.assertEqual(meta, resp.json_body[0]["meta"])
def test_volumes_update_after_roles_assignment(self): self.env.create( nodes_kwargs=[ {"cluster_id": None} ] ) node_db = self.env.nodes[0] original_roles_response = self.get(node_db.id) # adding role assignment_data = [ { "id": node_db.id, "roles": ['compute', 'cinder'] } ] self.app.post( reverse( 'NodeAssignmentHandler', kwargs={'cluster_id': self.env.clusters[0].id} ), jsonutils.dumps(assignment_data), headers=self.default_headers ) modified_roles_response = self.get(node_db.id) self.assertNotEqual(self.get_vgs(original_roles_response), self.get_vgs(modified_roles_response))
def dump_environment_resp(cls, **kwargs): logger.info( "RPC method dump_environment_resp received: %s" % jsonutils.dumps(kwargs) ) task_uuid = kwargs.get('task_uuid') status = kwargs.get('status') progress = kwargs.get('progress') error = kwargs.get('error') msg = kwargs.get('msg') task = objects.Task.get_by_uuid(task_uuid, fail_if_not_found=True) if status == 'error': notifier.notify('error', error) data = {'status': status, 'progress': 100, 'message': error} objects.Task.update(task, data) elif status == 'ready': dumpfile = os.path.basename(msg) notifier.notify('done', 'Snapshot is ready. ' 'Visit Support page to download') data = {'status': status, 'progress': progress, 'message': '/dump/{0}'.format(dumpfile)} objects.Task.update(task, data)
def test_occurs_error_not_enough_osds_for_ceph(self): cluster = self.env.create( nodes_kwargs=[ {'roles': ['controller', 'ceph-osd'], 'pending_addition': True}]) self.app.patch( reverse( 'ClusterAttributesHandler', kwargs={'cluster_id': cluster['id']}), params=jsonutils.dumps({ 'editable': { 'storage': {'volumes_ceph': {'value': True}, 'osd_pool_size': {'value': 3}}}}), headers=self.default_headers) task = self.env.launch_deployment() self.assertEqual(task.status, 'error') self.assertEqual( task.message, 'Number of OSD nodes (1) cannot be less than ' 'the Ceph object replication factor (3). ' 'Please either assign ceph-osd role to more nodes, ' 'or reduce Ceph replication factor in the Settings tab.')
def test_node_disk_amount_regenerates_volumes_info_if_new_disk_added(self): cluster = self.env.create_cluster(api=True) self.env.create_node( api=True, roles=['compute'], # vgs: os, vm cluster_id=cluster['id']) node_db = self.env.nodes[0] response = self.get(node_db.id) self.assertEqual(len(response), 6) new_meta = node_db.meta.copy() new_meta['disks'].append({ 'size': 1000022933376, 'model': 'SAMSUNG B00B135', 'name': 'sda', 'disk': 'disk/id/b00b135'}) self.app.put( reverse('NodeAgentHandler'), jsonutils.dumps({ "mac": node_db.mac, "meta": new_meta}), headers=self.default_headers) self.env.refresh_nodes() response = self.get(node_db.id) self.assertEqual(len(response), 7) # check all groups on all disks vgs = ['os', 'vm'] for disk in response: self.assertEqual(len(disk['volumes']), len(vgs))
def test_get_handler_with_incompleted_iface_data(self): meta = self.env.default_metadata() meta["interfaces"] = [] node = self.env.create_node(api=True, meta=meta) meta_clean_list = [ {'interfaces': [{'name': '', 'mac': '00:00:00'}]}, {'interfaces': [{'name': 'eth0', 'mac': ''}]}, {'interfaces': [{'mac': '00:00:00'}]}, {'interfaces': [{'name': 'eth0'}]} ] for nic_meta in meta_clean_list: meta = self.env.default_metadata() meta.update(nic_meta) node_data = {'mac': node['mac'], 'meta': meta} resp = self.app.put( reverse('NodeAgentHandler'), jsonutils.dumps(node_data), expect_errors=True, headers=self.default_headers ) self.assertEqual(resp.status_code, 200) resp = self.app.get( reverse('NodeNICsHandler', kwargs={'node_id': node['id']}), headers=self.default_headers ) self.assertEqual(resp.json_body, [])
def test_network_assignment_when_node_added(self): cluster = self.env.create_cluster(api=True) mac = self.env.generate_random_mac() meta = self.env.default_metadata() self.env.set_interfaces_in_meta( meta, [{"name": "eth0", "mac": mac}, {"name": "eth1", "mac": self.env.generate_random_mac()}] ) node = self.env.create_node(api=True, meta=meta, mac=mac) resp = self.app.put( reverse("NodeCollectionHandler"), jsonutils.dumps([{"id": node["id"], "cluster_id": cluster["id"]}]), headers=self.default_headers, ) self.assertEqual(resp.status_code, 200) resp = self.app.get(reverse("NodeNICsHandler", kwargs={"node_id": node["id"]}), headers=self.default_headers) self.assertEqual(resp.status_code, 200) response = jsonutils.loads(resp.body) for resp_nic in response: net_names = [net["name"] for net in resp_nic["assigned_networks"]] if resp_nic["mac"] == mac: self.assertTrue("fuelweb_admin" in net_names) else: self.assertTrue("public" in net_names) self.assertGreater(len(resp_nic["assigned_networks"]), 0)
def test_NIC_updates_by_agent(self): meta = self.env.default_metadata() self.env.set_interfaces_in_meta(meta, [ {'name': 'eth0', 'mac': '12345', 'current_speed': 1, 'state': 'up'}]) node = self.env.create_node(api=True, meta=meta) new_meta = self.env.default_metadata() self.env.set_interfaces_in_meta(new_meta, [ {'name': 'new_nic', 'mac': '12345', 'current_speed': 10, 'max_speed': 10, 'state': 'down'}]) node_data = {'mac': node['mac'], 'meta': new_meta} resp = self.app.put( reverse('NodeAgentHandler'), jsonutils.dumps(node_data), headers=self.default_headers) self.assertEqual(resp.status_code, 200) resp = self.app.get( reverse('NodeNICsHandler', kwargs={'node_id': node['id']}), headers=self.default_headers) self.assertEqual(resp.status_code, 200) self.assertEqual(len(resp.json_body), 1) resp_nic = resp.json_body[0] nic = new_meta['interfaces'][0] self.assertEqual(resp_nic['mac'], nic['mac']) self.assertEqual(resp_nic['current_speed'], nic['current_speed']) self.assertEqual(resp_nic['max_speed'], nic['max_speed']) self.assertEqual(resp_nic['state'], nic['state']) for conn in ('assigned_networks', ): self.assertEqual(resp_nic[conn], [])
def test_NIC_adds_by_agent(self): meta = self.env.default_metadata() self.env.set_interfaces_in_meta(meta, [ {'name': 'eth0', 'mac': '12345', 'current_speed': 1, 'state': 'up'}]) node = self.env.create_node(api=True, meta=meta) meta['interfaces'].append({'name': 'new_nic', 'mac': '643'}) node_data = {'mac': node['mac'], 'meta': meta} resp = self.app.put( reverse('NodeAgentHandler'), jsonutils.dumps(node_data), headers=self.default_headers) self.assertEqual(resp.status_code, 200) resp = self.app.get( reverse('NodeNICsHandler', kwargs={'node_id': node['id']}), headers=self.default_headers) self.assertEqual(resp.status_code, 200) self.assertEqual(len(resp.json_body), len(meta['interfaces'])) for nic in meta['interfaces']: filtered_nics = filter( lambda i: i['mac'] == nic['mac'], resp.json_body ) resp_nic = filtered_nics[0] self.assertEqual(resp_nic['mac'], nic['mac']) self.assertEqual(resp_nic['current_speed'], nic.get('current_speed')) self.assertEqual(resp_nic['max_speed'], nic.get('max_speed')) self.assertEqual(resp_nic['state'], nic.get('state')) for conn in ('assigned_networks', ): self.assertEqual(resp_nic[conn], [])
def test_roles_failed_to_delete_assigned(self): self.env.create( nodes_kwargs=[ {"status": "ready", "roles": ["controller"]} ] ) resp = self.app.get( reverse('ReleaseCollectionHandler'), headers=self.default_headers ) release_json = jsonutils.loads(resp.body)[0] old_roles = set(release_json["roles"]) old_roles.remove("controller") release_json["roles"] = list(old_roles) resp = self.app.put( reverse( 'ReleaseHandler', kwargs={ "obj_id": release_json["id"] } ), jsonutils.dumps(release_json), headers=self.default_headers, expect_errors=True ) self.assertEqual(resp.status_code, 400) self.assertEqual( resp.body, "Cannot delete roles already assigned to nodes: controller" )
def test_network_assignment_when_node_added(self): cluster = self.env.create_cluster(api=True) mac = self.env.generate_random_mac() meta = self.env.default_metadata() self.env.set_interfaces_in_meta( meta, [{'name': 'eth0', 'mac': mac}, {'name': 'eth1', 'mac': self.env.generate_random_mac()}]) node = self.env.create_node(api=True, meta=meta, mac=mac) resp = self.app.put( reverse('NodeCollectionHandler'), jsonutils.dumps([{'id': node['id'], 'cluster_id': cluster['id']}]), headers=self.default_headers ) self.assertEqual(resp.status_code, 200) resp = self.app.get( reverse('NodeNICsHandler', kwargs={'node_id': node['id']}), headers=self.default_headers) self.assertEqual(resp.status_code, 200) response = resp.json_body for resp_nic in response: net_names = [net['name'] for net in resp_nic['assigned_networks']] if resp_nic['mac'] == mac: self.assertTrue("fuelweb_admin" in net_names) else: self.assertTrue("public" in net_names) self.assertGreater(len(resp_nic['assigned_networks']), 0)
def test_update(self): c = self.env.create_cluster(api=False) n0 = self.env.create_notification() n1 = self.env.create_notification(cluster_id=c.id) notification_update = [ { 'id': n0.id, 'status': 'read' }, { 'id': n1.id, 'status': 'read' } ] resp = self.app.put( reverse('NotificationCollectionHandler'), jsonutils.dumps(notification_update), headers=self.default_headers ) self.assertEqual(200, resp.status_code) self.assertEqual(len(resp.json_body), 2) if resp.json_body[0]['id'] == n0.id: rn0 = resp.json_body[0] rn1 = resp.json_body[1] else: rn0 = resp.json_body[1] rn1 = resp.json_body[0] self.assertEqual(rn1['cluster'], n1.cluster_id) self.assertEqual(rn1['status'], 'read') self.assertIsNone(rn0.get('cluster', None)) self.assertEqual(rn0['status'], 'read')
def test_partial_user_contacts_info(self): resp = self.app.get( reverse("MasterNodeSettingsHandler"), headers=self.default_headers) self.assertEqual(200, resp.status_code) data = resp.json_body # emulate user enabled contact info sending to support team data["settings"]["statistics"]["user_choice_saved"]["value"] = True data["settings"]["statistics"]["send_user_info"]["value"] = \ True name = "user" email = "*****@*****.**" data["settings"]["statistics"]["name"]["value"] = name data["settings"]["statistics"]["email"]["value"] = email resp = self.app.put( reverse("MasterNodeSettingsHandler"), headers=self.default_headers, params=jsonutils.dumps(data) ) self.assertEqual(200, resp.status_code) self.assertDictEqual( InstallationInfo().get_installation_info()['user_information'], { 'contact_info_provided': True, 'name': name, 'email': email, 'company': '' } )
def test_node_update_empty_mac_or_id(self): node = self.env.create_node(api=False) resp = self.app.put(reverse('NodeCollectionHandler'), jsonutils.dumps([{ 'manufacturer': 'man0' }]), headers=self.default_headers, expect_errors=True) self.assertEqual(resp.status_code, 400) self.assertEqual(resp.body, "Neither MAC nor ID is specified") resp = self.app.put(reverse('NodeCollectionHandler'), jsonutils.dumps([{ 'mac': None, 'manufacturer': 'man1' }]), headers=self.default_headers, expect_errors=True) self.assertEqual(resp.status_code, 400) resp = self.app.put(reverse('NodeCollectionHandler'), jsonutils.dumps([{ 'id': None, 'manufacturer': 'man2' }]), headers=self.default_headers, expect_errors=True) self.assertEqual(resp.status_code, 400) resp = self.app.put(reverse('NodeCollectionHandler'), jsonutils.dumps([{ 'mac': None, 'id': None, 'manufacturer': 'man3' }]), headers=self.default_headers, expect_errors=True) self.assertEqual(resp.status_code, 400) resp = self.app.put(reverse('NodeCollectionHandler'), jsonutils.dumps([{ 'id': node.id, 'mac': None, 'manufacturer': 'man4' }]), headers=self.default_headers, expect_errors=True) self.assertEqual(resp.status_code, 400) resp = self.app.put(reverse('NodeCollectionHandler'), jsonutils.dumps([{ 'mac': node.mac, 'manufacturer': 'man5' }]), headers=self.default_headers) self.assertEqual(resp.status_code, 200) resp = self.app.put(reverse('NodeCollectionHandler'), jsonutils.dumps([{ 'id': node.id, 'manufacturer': 'man6' }]), headers=self.default_headers) self.assertEqual(resp.status_code, 200) resp = self.app.put(reverse('NodeCollectionHandler'), jsonutils.dumps([{ 'mac': node.mac, 'manufacturer': 'man7' }]), headers=self.default_headers) self.assertEqual(resp.status_code, 200) resp = self.app.put(reverse('NodeCollectionHandler'), jsonutils.dumps([{ 'id': node.id, 'mac': node.mac, 'manufacturer': 'man8' }]), headers=self.default_headers) self.assertEqual(resp.status_code, 200)
def get_release(self, release): return jsonutils.dumps(release)
def build_json_response(data): web.header('Content-Type', 'application/json') if type(data) in (dict, list): return jsonutils.dumps(data) return data
def test_neutron_deploy_cast_with_right_args(self, mocked_rpc): self.env.create(release_kwargs={'version': "2014.1.1-5.1"}, cluster_kwargs={ 'net_provider': 'neutron', 'net_segment_type': 'gre' }, nodes_kwargs=[{ 'roles': ['controller'], 'pending_addition': True }, { 'roles': ['controller'], 'pending_addition': True }, { 'roles': ['controller', 'cinder'], 'pending_addition': True }, { 'roles': ['compute', 'cinder'], 'pending_addition': True }, { 'roles': ['compute'], 'pending_addition': True }, { 'roles': ['cinder'], 'pending_addition': True }]) cluster_db = self.env.clusters[0] attrs = cluster_db.attributes.editable attrs['public_network_assignment']['assign_to_all_nodes']['value'] = \ True resp = self.app.patch(reverse('ClusterAttributesHandler', kwargs={'cluster_id': cluster_db.id}), params=jsonutils.dumps({'editable': attrs}), headers=self.default_headers) self.assertEqual(200, resp.status_code) common_attrs = { 'deployment_mode': 'ha_compact', 'management_vip': '192.168.0.1', 'public_vip': '172.16.0.2', 'management_network_range': '192.168.0.0/24', 'storage_network_range': '192.168.1.0/24', 'mp': [{ 'weight': '1', 'point': '1' }, { 'weight': '2', 'point': '2' }], 'quantum': True, 'quantum_settings': {}, 'master_ip': '127.0.0.1', 'use_cinder': True, 'deployment_id': cluster_db.id, 'openstack_version_prev': None, 'openstack_version': cluster_db.release.version, 'fuel_version': cluster_db.fuel_version } common_attrs.update( objects.Release.get_orchestrator_data_dict(cluster_db.release)) cluster_attrs = objects.Attributes.merged_attrs_values( cluster_db.attributes) common_attrs.update(cluster_attrs) L2 = { "base_mac": "fa:16:3e:00:00:00", "segmentation_type": "gre", "phys_nets": {}, "tunnel_id_ranges": "2:65535" } L3 = {"use_namespaces": True} predefined_networks = { "net04_ext": { 'shared': False, 'L2': { 'router_ext': True, 'network_type': 'local', 'physnet': None, 'segment_id': None }, 'L3': { 'subnet': u'172.16.0.0/24', 'enable_dhcp': False, 'nameservers': [], 'floating': '172.16.0.130:172.16.0.254', 'gateway': '172.16.0.1' }, 'tenant': 'admin' }, "net04": { 'shared': False, 'L2': { 'router_ext': False, 'network_type': 'gre', 'physnet': None, 'segment_id': None }, 'L3': { 'subnet': u'192.168.111.0/24', 'enable_dhcp': True, 'nameservers': ['8.8.4.4', '8.8.8.8'], 'floating': None, 'gateway': '192.168.111.1' }, 'tenant': 'admin' } } common_attrs['quantum_settings'].update( L2=L2, L3=L3, predefined_networks=predefined_networks) # Common attrs calculation nodes_list = [] nodes_db = sorted(cluster_db.nodes, key=lambda n: n.id) assigned_ips = {} i = 0 admin_ips = [ '10.20.0.134/24', '10.20.0.133/24', '10.20.0.132/24', '10.20.0.131/24', '10.20.0.130/24', '10.20.0.129/24' ] for node in nodes_db: node_id = node.id admin_ip = admin_ips.pop() for role in sorted(node.roles + node.pending_roles): assigned_ips[node_id] = {} assigned_ips[node_id]['management'] = '192.168.0.%d' % (i + 2) assigned_ips[node_id]['public'] = '172.16.0.%d' % (i + 3) assigned_ips[node_id]['storage'] = '192.168.1.%d' % (i + 1) assigned_ips[node_id]['admin'] = admin_ip nodes_list.append({ 'role': role, 'internal_address': assigned_ips[node_id]['management'], 'public_address': assigned_ips[node_id]['public'], 'storage_address': assigned_ips[node_id]['storage'], 'internal_netmask': '255.255.255.0', 'public_netmask': '255.255.255.0', 'storage_netmask': '255.255.255.0', 'uid': str(node_id), 'swift_zone': str(node_id), 'name': 'node-%d' % node_id, 'fqdn': 'node-%d.%s' % (node_id, settings.DNS_DOMAIN) }) i += 1 controller_nodes = filter(lambda node: node['role'] == 'controller', deepcopy(nodes_list)) common_attrs['nodes'] = nodes_list common_attrs['nodes'][0]['role'] = 'primary-controller' common_attrs['last_controller'] = controller_nodes[-1]['name'] common_attrs['storage']['pg_num'] = 128 common_attrs['test_vm_image'] = { 'container_format': 'bare', 'public': 'true', 'disk_format': 'qcow2', 'img_name': 'TestVM', 'img_path': '/opt/vm/cirros-x86_64-disk.img', 'os_name': 'cirros', 'min_ram': 64, 'glance_properties': ("""--property murano_image_info=""" """'{"title": "Murano Demo", "type": "cirros.demo"}'"""), } # Individual attrs calculation and # merging with common attrs priority_mapping = { 'controller': [600, 600, 500], 'cinder': 700, 'compute': 700 } critical_mapping = { 'primary-controller': True, 'controller': False, 'cinder': False, 'compute': False } deployment_info = [] for node in nodes_db: ips = assigned_ips[node.id] for role in sorted(node.roles): priority = priority_mapping[role] is_critical = critical_mapping[role] if isinstance(priority, list): priority = priority.pop() individual_atts = { 'uid': str(node.id), 'status': node.status, 'role': role, 'online': node.online, 'fail_if_error': is_critical, 'fqdn': 'node-%d.%s' % (node.id, settings.DNS_DOMAIN), 'priority': priority, 'network_scheme': { "version": "1.0", "provider": "ovs", "interfaces": { "eth0": { "L2": { "vlan_splinters": "off" }, "mtu": 1500 }, "eth1": { "L2": { "vlan_splinters": "off" }, "mtu": 1500 }, "eth2": { "L2": { "vlan_splinters": "off" }, "mtu": 1500 }, }, "endpoints": { "br-mgmt": { "IP": [ips['management'] + "/24"] }, "br-ex": { "IP": [ips['public'] + "/24"], "gateway": "172.16.0.1" }, "br-storage": { "IP": [ips['storage'] + "/24"] }, "br-fw-admin": { "IP": [ips['admin']] }, }, "roles": { "management": "br-mgmt", "mesh": "br-mgmt", "ex": "br-ex", "storage": "br-storage", "fw-admin": "br-fw-admin", }, "transformations": [ { "action": "add-br", "name": u"br-eth0" }, { "action": "add-port", "bridge": u"br-eth0", "name": u"eth0" }, { "action": "add-br", "name": u"br-eth1" }, { "action": "add-port", "bridge": u"br-eth1", "name": u"eth1" }, { "action": "add-br", "name": "br-mgmt" }, { "action": "add-br", "name": "br-storage" }, { "action": "add-br", "name": "br-fw-admin" }, { "action": "add-br", "name": "br-ex" }, { "action": "add-patch", "bridges": [u"br-eth0", "br-storage"], "tags": [102, 0] }, { "action": "add-patch", "bridges": [u"br-eth0", "br-mgmt"], "tags": [101, 0] }, { "action": "add-patch", "bridges": [u"br-eth1", "br-fw-admin"], "trunks": [0] }, { "action": "add-patch", "bridges": [u"br-eth0", "br-ex"], "trunks": [0] }, ] } } individual_atts.update(common_attrs) individual_atts['glance']['image_cache_max_size'] = str( manager.calc_glance_cache_size(node.attributes.volumes)) deployment_info.append(deepcopy(individual_atts)) controller_nodes = filter(lambda node: node['role'] == 'controller', deployment_info) controller_nodes[0]['role'] = 'primary-controller' controller_nodes[0]['fail_if_error'] = True supertask = self.env.launch_deployment() deploy_task_uuid = [ x.uuid for x in supertask.subtasks if x.name == 'deployment' ][0] deployment_msg = { 'api_version': '1', 'method': 'deploy', 'respond_to': 'deploy_resp', 'args': {} } deployment_msg['args']['task_uuid'] = deploy_task_uuid deployment_msg['args']['deployment_info'] = deployment_info provision_nodes = [] admin_net = self.env.network_manager.get_admin_network_group() for n in sorted(self.env.nodes, key=lambda n: n.id): udev_interfaces_mapping = ','.join( ['{0}_{1}'.format(i.mac, i.name) for i in n.interfaces]) eth1_mac = [i.mac for i in n.interfaces if i.name == 'eth1'][0] pnd = { 'profile': cluster_attrs['cobbler']['profile'], 'power_type': 'ssh', 'power_user': '******', 'kernel_options': { 'netcfg/choose_interface': eth1_mac, 'udevrules': udev_interfaces_mapping }, 'power_address': n.ip, 'power_pass': settings.PATH_TO_BOOTSTRAP_SSH_KEY, 'name': objects.Node.make_slave_name(n), 'hostname': n.fqdn, 'name_servers': '\"%s\"' % settings.DNS_SERVERS, 'name_servers_search': '\"%s\"' % settings.DNS_SEARCH, 'netboot_enabled': '1', 'ks_meta': { 'fuel_version': cluster_db.fuel_version, 'puppet_auto_setup': 1, 'puppet_master': settings.PUPPET_MASTER_HOST, 'puppet_enable': 0, 'mco_auto_setup': 1, 'install_log_2_syslog': 1, 'mco_pskey': settings.MCO_PSKEY, 'mco_vhost': settings.MCO_VHOST, 'mco_host': settings.MCO_HOST, 'mco_user': settings.MCO_USER, 'mco_password': settings.MCO_PASSWORD, 'mco_connector': settings.MCO_CONNECTOR, 'mco_enable': 1, 'pm_data': { 'ks_spaces': n.attributes.volumes, 'kernel_params': objects.Node.get_kernel_params(n), }, 'auth_key': "\"%s\"" % cluster_attrs.get('auth_key', ''), 'mlnx_vf_num': "16", 'mlnx_plugin_mode': "disabled", 'mlnx_iser_enabled': False, } } orchestrator_data = objects.Release.get_orchestrator_data_dict( cluster_db.release) if orchestrator_data: pnd['ks_meta']['repo_metadata'] = \ orchestrator_data['repo_metadata'] vlan_splinters = cluster_attrs.get('vlan_splinters', None) if vlan_splinters == 'kernel_lt': pnd['ks_meta']['kernel_lt'] = 1 NetworkManager.assign_admin_ips(n.id, 1) admin_ip = self.env.network_manager.get_admin_ip_for_node(n) for i in n.meta.get('interfaces', []): if 'interfaces' not in pnd: pnd['interfaces'] = {} pnd['interfaces'][i['name']] = { 'mac_address': i['mac'], 'static': '0', } if 'interfaces_extra' not in pnd: pnd['interfaces_extra'] = {} pnd['interfaces_extra'][i['name']] = { 'peerdns': 'no', 'onboot': 'no' } if i['mac'] == n.mac: pnd['interfaces'][i['name']]['dns_name'] = n.fqdn pnd['interfaces_extra'][i['name']]['onboot'] = 'yes' pnd['interfaces'][i['name']]['ip_address'] = admin_ip pnd['interfaces'][i['name']]['netmask'] = str( netaddr.IPNetwork(admin_net.cidr).netmask) provision_nodes.append(pnd) provision_task_uuid = filter(lambda t: t.name == 'provision', supertask.subtasks)[0].uuid provision_msg = { 'api_version': '1', 'method': 'provision', 'respond_to': 'provision_resp', 'args': { 'task_uuid': provision_task_uuid, 'provisioning_info': { 'engine': { 'url': settings.COBBLER_URL, 'username': settings.COBBLER_USER, 'password': settings.COBBLER_PASSWORD, 'master_ip': settings.MASTER_IP }, 'nodes': provision_nodes } } } args, kwargs = nailgun.task.manager.rpc.cast.call_args self.assertEqual(len(args), 2) self.assertEqual(len(args[1]), 2) self.datadiff(args[1][0], provision_msg) self.datadiff(args[1][1], deployment_msg)
def PUT(self): #这个函数是个定时执行函数,用来监听所有节点的变化. #120s调用一次 """:returns: node id. :http: * 200 (node are successfully updated) * 304 (node data not changed since last request) * 400 (invalid nodes data specified) * 404 (node not found) """ data = jsonutils.loads(web.data()) meta = data.get('meta', {}) ip = data.get("ip") nodeinstance = self.collection.single.get_by_ip(ip) if nodeinstance is None: logger.info("数据库中不存在ip为 {0} 的机器,使用powerip查询..".format(ip)) #discover状态发送put请求的时候执行. nodeinstance = self.collection.single.get_by_powerip(ip) if nodeinstance is None: logger.info("数据库中不存在ip和powerip为 {0} 的机器,使用mac查询..".format(ip)) #重置环境不会删除表中的nodes的数据,但会提交数据到此处,执行新增会引发mac冲突 self.collection.single.copyfile_to_agent(data["ip"]) data["nochange_powerip"] = True nodeinstance = self.collection.single.get_by_mac_or_uid( data['mac']) if nodeinstance is None: logger.info("数据库中不存在当前提交过来的node数据,调用新增节点函数...") self.collection.single.create(data) return data['mac'] = nodeinstance.mac #ovsbind会提交过来一个不存在的mac地址 # don't update interfaces information, if agent has sent an empty array # 删除集群后,重新发现节点是通过此函数,但是由于删除集群会先删除nodes表中的数据 # 导致此处会出现异常,nodeinstance会为None if meta and len( meta.get('interfaces', [])) == 0 and nodeinstance.meta.get('interfaces'): logger.warning( u'Node {0} has received an empty interfaces array - ' u'interfaces information will not be updated'.format( nodeinstance.human_readable_name)) meta['interfaces'] = nodeinstance.meta['interfaces'] nd = self.checked_data(self.validator.validate_update, data=jsonutils.dumps(data)) node = self.collection.single.get_by_meta(nd) if not node: raise self.http(404, "Can't find node: {0}".format(nd)) node.timestamp = datetime.now() #test copyfile #self.collection.single.copyfile_to_agent(node) if not node.online: #判断如果节点处于离线状态 node.online = True msg = u"节点 '{0}' 已经重新上线".format(node.name) logger.info(msg) notifier.notify("discover", msg, node_id=node.id) db().flush() if 'agent_checksum' in nd and (node.agent_checksum == nd['agent_checksum']): return {'id': node.id, 'cached': True} self.collection.single.update_by_agent(node, nd) return {"id": node.id}