def upgrade_releases(connection): select = text( """SELECT id, attributes_metadata, roles_metadata from releases""" ) update = text( """UPDATE releases SET attributes_metadata = :attrs, roles_metadata = :roles, wizard_metadata = :wiz_meta WHERE id = :id""" ) r = connection.execute(select) # reading fixture files in loop is in general a bad idea and as long as # wizard_metadata is the same for all existing releases getting it can # be moved outside of the loop wizard_meta = upgrade_release_wizard_metadata_50_to_51() for release in r: attrs_meta = upgrade_release_attributes_50_to_51(jsonutils.loads(release[1])) roles_meta = upgrade_release_roles_50_to_51(jsonutils.loads(release[2])) connection.execute( update, id=release[0], attrs=jsonutils.dumps(attrs_meta), roles=jsonutils.dumps(roles_meta), wiz_meta=jsonutils.dumps(wizard_meta), )
def upgrade_releases(connection): select = text( """SELECT id, attributes_metadata, roles_metadata from releases""") update = text( """UPDATE releases SET attributes_metadata = :attrs, roles_metadata = :roles, wizard_metadata = :wiz_meta WHERE id = :id""") r = connection.execute(select) # reading fixture files in loop is in general a bad idea and as long as # wizard_metadata is the same for all existing releases getting it can # be moved outside of the loop for release in r: attrs_meta = upgrade_release_attributes_50_to_51( jsonutils.loads(release[1])) roles_meta = upgrade_release_roles_50_to_51( jsonutils.loads(release[2])) connection.execute( update, id=release[0], attrs=jsonutils.dumps(attrs_meta), roles=jsonutils.dumps(roles_meta), wiz_meta=jsonutils.dumps(_wizard_meta) )
def upgrade_clusters_replaced_info(connection): select = text( """SELECT id, replaced_provisioning_info, replaced_deployment_info FROM clusters""") clusters = connection.execute(select) for cluster in clusters: nodes_select = text("""SELECT id FROM nodes WHERE cluster_id=:id""") nodes = connection.execute(nodes_select, id=cluster[0]) provisioning_info = jsonutils.loads(cluster[1]) deployment_nodes = jsonutils.loads(cluster[2]) provisioning_nodes = provisioning_info.pop('nodes', []) for node in nodes: node_deploy = [ d for d in deployment_nodes if d['uid'] == str(node[0]) ] node_provision = next( (d for d in provisioning_nodes if d['uid'] == str(node[0])), {}) update_node = text("""UPDATE nodes SET replaced_deployment_info = :deploy, replaced_provisioning_info = :provision WHERE id = :id""") connection.execute(update_node, deploy=jsonutils.dumps(node_deploy), provision=jsonutils.dumps(node_provision), id=node[0]) update_cluster = text("""UPDATE clusters SET replaced_deployment_info = :deploy, replaced_provisioning_info = :provision WHERE id = :id""") connection.execute(update_cluster, deploy=jsonutils.dumps({}), provision=jsonutils.dumps(provisioning_info), id=cluster[0])
def test_deploy_neutron_error_not_enough_ip_addresses(self, mocked_rpc): self.env.create(cluster_kwargs={ 'net_provider': 'neutron', 'net_segment_type': 'gre' }, nodes_kwargs=[{ "pending_addition": True }, { "pending_addition": True }, { "pending_addition": True }]) net_data = jsonutils.loads( self.env.neutron_networks_get(self.env.clusters[0].id).body) pub = filter(lambda ng: ng['name'] == 'public', net_data['networks'])[0] pub.update({'ip_ranges': [['172.16.0.10', '172.16.0.11']]}) resp = self.env.neutron_networks_put(self.env.clusters[0].id, net_data) self.assertEqual(resp.status_code, 202) task = jsonutils.loads(resp.body) self.assertEqual(task['status'], 'ready') task = self.env.launch_deployment() self.assertEqual(task.status, 'error') self.assertEqual( task.message, 'Not enough IP addresses. Public network must have at least ' '3 IP addresses for the current environment.')
def test_roles_add_duplicated_to_db_directly(self): self.env.create_release() resp = self.app.get( reverse('ReleaseCollectionHandler'), headers=self.default_headers ) release_json = jsonutils.loads(resp.body)[0] old_roles = list(release_json["roles"]) role = Role(name=old_roles[0], release_id=release_json["id"]) added = True try: self.db.add(role) self.db.commit() except IntegrityError: self.db.rollback() added = False self.assertFalse(added) resp = self.app.get( reverse('ReleaseCollectionHandler'), headers=self.default_headers ) release_json = jsonutils.loads(resp.body)[0] new_roles = list(release_json["roles"]) self.assertEqual(old_roles, new_roles)
def test_put_handler_with_one_node(self): cluster = self.env.create_cluster(api=True) mac = self.env.generate_random_mac() meta = {} self.env.set_interfaces_in_meta(meta, [ {'name': 'eth0', 'mac': mac}, {'name': 'eth1', 'mac': self.env.generate_random_mac()}]) node = self.env.create_node(api=True, meta=meta, mac=mac, cluster_id=cluster['id']) resp = self.app.get( reverse('NodeNICsHandler', kwargs={'node_id': node['id']}), headers=self.default_headers) self.assertEqual(resp.status_code, 200) response = jsonutils.loads(resp.body) a_nets = filter(lambda nic: nic['mac'] == mac, response)[0]['assigned_networks'] for resp_nic in response: if resp_nic['mac'] == mac: resp_nic['assigned_networks'] = [] else: resp_nic['assigned_networks'].extend(a_nets) resp_nic['assigned_networks'].sort() nodes_list = [{'id': node['id'], 'interfaces': response}] resp = self.app.put( reverse('NodeCollectionNICsHandler'), jsonutils.dumps(nodes_list), headers=self.default_headers) self.assertEqual(resp.status_code, 200) new_response = jsonutils.loads(resp.body) self.assertEqual(new_response, nodes_list)
def test_deploy_neutron_gre_w_changed_public_cidr(self, mocked_rpc): self.env.create( cluster_kwargs={'net_provider': 'neutron', 'net_segment_type': 'gre'}, nodes_kwargs=[{"pending_addition": True}, {"pending_addition": True}] ) net_data = jsonutils.loads( self.env.neutron_networks_get(self.env.clusters[0].id).body) pub = filter(lambda ng: ng['name'] == 'public', net_data['networks'])[0] pub.update({'ip_ranges': [['172.16.10.10', '172.16.10.122']], 'cidr': '172.16.10.0/24', 'gateway': '172.16.10.1'}) net_data['networking_parameters']['floating_ranges'] = \ [['172.16.10.130', '172.16.10.254']] resp = self.env.neutron_networks_put(self.env.clusters[0].id, net_data) self.assertEqual(resp.status_code, 202) task = jsonutils.loads(resp.body) self.assertEqual(task['status'], 'ready') self.env.launch_deployment() args, kwargs = nailgun.task.manager.rpc.cast.call_args self.assertEqual(len(args), 2) self.assertEqual(len(args[1]), 2) n_rpc_deploy = args[1][1]['args']['deployment_info'] self.assertEqual(len(n_rpc_deploy), 2) pub_ips = ['172.16.10.11', '172.16.10.12'] for n in n_rpc_deploy: for i, n_common_args in enumerate(n['nodes']): self.assertEqual(n_common_args['public_address'], pub_ips[i])
def test_deploy_neutron_error_not_enough_ip_addresses(self, mocked_rpc): self.env.create( cluster_kwargs={'net_provider': 'neutron', 'net_segment_type': 'gre'}, nodes_kwargs=[{"pending_addition": True}, {"pending_addition": True}, {"pending_addition": True}] ) net_data = jsonutils.loads( self.env.neutron_networks_get(self.env.clusters[0].id).body) pub = filter(lambda ng: ng['name'] == 'public', net_data['networks'])[0] pub.update({'ip_ranges': [['172.16.0.10', '172.16.0.11']]}) resp = self.env.neutron_networks_put(self.env.clusters[0].id, net_data) self.assertEqual(resp.status_code, 202) task = jsonutils.loads(resp.body) self.assertEqual(task['status'], 'ready') task = self.env.launch_deployment() self.assertEqual(task.status, 'error') self.assertEqual( task.message, 'Not enough IP addresses. Public network must have at least ' '3 IP addresses for the current environment.')
def save_oldpass(self,oldclusterSetting,newcontent): oldclusterSetting=jsonutils.loads(oldclusterSetting["cluster_setting"]) settingcontent={} settingcontent['cluster_setting']=jsonutils.loads(newcontent) settingcontent["cluster_setting"]["mysql"]["root_password"]=oldclusterSetting["mysql"]["root_password"] settingcontent["cluster_setting"]["rabbitmq"]["rabbit_password"]=oldclusterSetting["rabbitmq"]["rabbit_password"] settingcontent["cluster_setting"]["mongodb"]["mongo_password"]=oldclusterSetting["mongodb"]["mongo_password"] settingcontent["cluster_setting"]["keystone"]["keystone_mysql_password"]=oldclusterSetting["keystone"]["keystone_mysql_password"] settingcontent["cluster_setting"]["glance"]["glance_mysql_password"]=oldclusterSetting["glance"]["glance_mysql_password"] settingcontent["cluster_setting"]["nova-api"]["nova_mysql_password"]=oldclusterSetting["nova-api"]["nova_mysql_password"] settingcontent["cluster_setting"]["nova-api"]["metadata_secret"]=oldclusterSetting["nova-api"]["metadata_secret"] settingcontent["cluster_setting"]["neutron-server"]["neutron_mysql_password"]=oldclusterSetting["neutron-server"]["neutron_mysql_password"] settingcontent["cluster_setting"]["cinder-api"]["cinder_mysql_password"]=oldclusterSetting["cinder-api"]["cinder_mysql_password"] settingcontent["cluster_setting"]["cinder-storage"]["cinder_mysql_password"]=oldclusterSetting["cinder-storage"]["cinder_mysql_password"] settingcontent["cluster_setting"]["ceilometer"]["ceilometer_mongo_password"]=oldclusterSetting["ceilometer"]["ceilometer_mongo_password"] settingcontent["cluster_setting"]["ceilometer"]["metadata_secret"]=oldclusterSetting["ceilometer"]["metadata_secret"] settingcontent["cluster_setting"]["heat"]["heat_mysql_password"]=oldclusterSetting["heat"]["heat_mysql_password"] settingcontent["cluster_setting"]["global"]["keystone_admin_password"]=oldclusterSetting["global"]["keystone_admin_password"] settingcontent["cluster_setting"]["global"]["keystone_glance_password"]=oldclusterSetting["global"]["keystone_glance_password"] settingcontent["cluster_setting"]["global"]["keystone_nova_password"]=oldclusterSetting["global"]["keystone_nova_password"] settingcontent["cluster_setting"]["global"]["keystone_neutron_password"]=oldclusterSetting["global"]["keystone_neutron_password"] settingcontent["cluster_setting"]["global"]["keystone_ceilometer_password"]=oldclusterSetting["global"]["keystone_ceilometer_password"] settingcontent["cluster_setting"]["global"]["keystone_heat_password"]=oldclusterSetting["global"]["keystone_heat_password"] settingcontent["cluster_setting"]["global"]["keystone_cinder_password"]=oldclusterSetting["global"]["keystone_cinder_password"] settingcontent["cluster_setting"]["global"]["admin_token"]=oldclusterSetting["global"]["admin_token"] settingcontent["cluster_setting"]["global"]["keystone_dbpass"]=oldclusterSetting["global"]["keystone_dbpass"] settingcontent["cluster_setting"]["global"]["glance_dbpass"]=oldclusterSetting["global"]["glance_dbpass"] settingcontent["cluster_setting"]["global"]["nova_dbpass"]=oldclusterSetting["global"]["nova_dbpass"] settingcontent["cluster_setting"]["global"]["neutron_dbpass"]=oldclusterSetting["global"]["neutron_dbpass"] settingcontent["cluster_setting"]["global"]["cinder_dbpass"]=oldclusterSetting["global"]["cinder_dbpass"] settingcontent["cluster_setting"]["global"]["ceilometer_dbpass"]=oldclusterSetting["global"]["ceilometer_dbpass"] settingcontent["cluster_setting"]["global"]["heat_dbpass"]=oldclusterSetting["global"]["heat_dbpass"] settingcontent["cluster_setting"]["haproxy-keepalived"]["virtual_router_id"] = oldclusterSetting["haproxy-keepalived"]["virtual_router_id"] settingcontent["cluster_setting"]["haproxy-keepalived"]["auth_pass"] = oldclusterSetting["haproxy-keepalived"]["auth_pass"] return jsonutils.dumps(settingcontent['cluster_setting'])
def test_verify_networks(self, mocked_rpc): cluster = self.env.create_cluster(api=True) nets = jsonutils.loads(self.env.nova_networks_get(cluster['id']).body) resp = self.env.nova_networks_put(cluster['id'], nets) self.assertEqual(202, resp.status_code) task = jsonutils.loads(resp.body) self.assertEqual(task['status'], 'ready')
def test_netconfig_error_when_admin_cidr_match_other_network_cidr(self): resp = self.env.nova_networks_get(self.cluster['id']) nets = jsonutils.loads(resp.body) resp = self.env.nova_networks_put(self.cluster['id'], nets, expect_errors=True) self.assertEqual(resp.status_code, 202) task = jsonutils.loads(resp.body) self.assertEqual(task['status'], 'error') self.assertEqual(task['progress'], 100) self.assertEqual(task['name'], 'check_networks') self.assertIn("Address space intersection between networks:\n" "admin (PXE), management.", task['message'])
def test_do_not_update_net_segmentation_type(self): resp = self.env.neutron_networks_get(self.cluster.id) data = jsonutils.loads(resp.body) data['networking_parameters']['segmentation_type'] = 'vlan' resp = self.env.neutron_networks_put(self.cluster.id, data, expect_errors=True) self.assertEqual(202, resp.status_code) task = jsonutils.loads(resp.body) self.assertEqual(task['status'], 'error') self.assertEqual( task['message'], "Change of 'segmentation_type' is prohibited" )
def test_version_handler(self): resp = self.app.get(reverse('FuelKeyHandler'), headers=self.default_headers) fuel_release = "0.1" key_data = {"sha": "12345", "release": fuel_release, "uuid": "uuid"} signature = base64.b64encode(jsonutils.dumps(key_data)) key_data["signature"] = signature self.assertEqual(200, resp.status_code) response = jsonutils.loads(resp.body) self.assertEqual(response, {"key": base64.b64encode(jsonutils.dumps(key_data))}) resp_data = jsonutils.loads(base64.b64decode(response["key"])) self.assertEqual(resp_data["release"], fuel_release)
def test_deploy_ha_neutron_gre_w_custom_public_ranges(self, mocked_rpc): self.env.create(cluster_kwargs={ 'mode': 'ha_compact', 'net_provider': 'neutron', 'net_segment_type': 'gre' }, nodes_kwargs=[{ "pending_addition": True }, { "pending_addition": True }, { "pending_addition": True }, { "pending_addition": True }, { "pending_addition": True }]) net_data = jsonutils.loads( self.env.neutron_networks_get(self.env.clusters[0].id).body) pub = filter(lambda ng: ng['name'] == 'public', net_data['networks'])[0] pub.update({ 'ip_ranges': [['172.16.0.10', '172.16.0.12'], ['172.16.0.20', '172.16.0.22']] }) resp = self.env.neutron_networks_put(self.env.clusters[0].id, net_data) self.assertEqual(resp.status_code, 202) task = jsonutils.loads(resp.body) self.assertEqual(task['status'], 'ready') self.env.launch_deployment() args, kwargs = nailgun.task.manager.rpc.cast.call_args self.assertEqual(len(args), 2) self.assertEqual(len(args[1]), 2) n_rpc_deploy = args[1][1]['args']['deployment_info'] self.assertEqual(len(n_rpc_deploy), 5) pub_ips = [ '172.16.0.11', '172.16.0.12', '172.16.0.20', '172.16.0.21', '172.16.0.22' ] for n in n_rpc_deploy: self.assertEqual(n['public_vip'], '172.16.0.10') for i, n_common_args in enumerate(n['nodes']): self.assertEqual(n_common_args['public_address'], pub_ips[i])
def nics_bond_create(self, put_func): self.data.append({ "name": 'ovs-bond0', "type": NETWORK_INTERFACE_TYPES.bond, "mode": OVS_BOND_MODES.balance_slb, "slaves": [{ "name": self.other_nic["name"] }, { "name": self.empty_nic["name"] }], "assigned_networks": self.other_nic["assigned_networks"] }) self.other_nic["assigned_networks"] = [] resp = put_func() self.assertEqual(resp.status_code, 200) resp = self.env.node_nics_get(self.env.nodes[0]["id"]) self.assertEqual(resp.status_code, 200) data = jsonutils.loads(resp.body) bonds = filter( lambda iface: iface["type"] == NETWORK_INTERFACE_TYPES.bond, data) self.assertEqual(len(bonds), 1) self.assertEqual(bonds[0]["name"], 'ovs-bond0')
def test_version_handler(self): with nested( patch( 'nailgun.api.v1.handlers.version.glob.glob', Mock(return_value=["test.yaml"]) ), patch( '__builtin__.open', mock_open(read_data='test_data'), create=True ) ): resp = self.app.get( reverse('VersionHandler'), headers=self.default_headers ) self.assertEqual(200, resp.status_code) self.assertEqual( jsonutils.loads(resp.body), { "release": "0.1b", "nailgun_sha": "12345", "astute_sha": "Unknown build", "fuellib_sha": "Unknown build", "ostf_sha": "Unknown build", "auth_required": True, "release_versions": { "test": "test_data" } } )
def test_multiline_log_entry(self): settings.LOGS[0]['multiline'] = True log_entries = [ [ time.strftime(settings.UI_LOG_DATE_FORMAT), 'LEVEL111', 'text1', ], [ time.strftime(settings.UI_LOG_DATE_FORMAT), 'LEVEL222', 'text\nmulti\nline', ], [ time.strftime(settings.UI_LOG_DATE_FORMAT), 'LEVEL333', 'text3', ], ] self.env.create_cluster(api=False) self._create_logfile_for_node(settings.LOGS[0], log_entries) resp = self.app.get( reverse('LogEntryCollectionHandler'), params={'source': settings.LOGS[0]['id']}, headers=self.default_headers ) self.assertEqual(200, resp.status_code) response = jsonutils.loads(resp.body) response['entries'].reverse() self.assertEqual(response['entries'], log_entries) settings.LOGS[0]['multiline'] = False
def test_get_handler_with_invalid_data(self): meta = self.env.default_metadata() meta["interfaces"] = [] node = self.env.create_node(api=True, meta=meta) meta_list = [ {'interfaces': None}, {'interfaces': {}} ] for nic_meta in meta_list: meta = self.env.default_metadata() meta.update(nic_meta) node_data = {'mac': node['mac'], 'meta': meta} resp = self.app.put( reverse('NodeAgentHandler'), jsonutils.dumps(node_data), expect_errors=True, headers=self.default_headers ) self.assertEqual(resp.status_code, 400) resp = self.app.get( reverse('NodeNICsHandler', kwargs={'node_id': node['id']}), headers=self.default_headers ) self.assertEqual(resp.status_code, 200) response = jsonutils.loads(resp.body) self.assertEqual(response, [])
def _get_cluster_networks(self, cluster_id): nets = jsonutils.loads(self.app.get( reverse('NovaNetworkConfigurationHandler', {"cluster_id": cluster_id}), headers=self.default_headers, ).body)["networks"] return nets
def test_assignment_is_removed_when_delete_node_from_cluster(self): mac = self.env.generate_random_mac() meta = self.env.default_metadata() meta = self.env.default_metadata() self.env.set_interfaces_in_meta( meta, [{ 'name': 'eth0', 'mac': mac }, { 'name': 'eth1', 'mac': self.env.generate_random_mac() }]) node = self.env.create_node(api=True, meta=meta, mac=mac) cluster = self.env.create_cluster(api=True, nodes=[node['id']]) resp = self.app.put(reverse('ClusterHandler', kwargs={'obj_id': cluster['id']}), jsonutils.dumps({'nodes': []}), headers=self.default_headers) self.assertEqual(resp.status_code, 200) resp = self.app.get(reverse('NodeNICsHandler', kwargs={'node_id': node['id']}), headers=self.default_headers) self.assertEqual(resp.status_code, 200) response = jsonutils.loads(resp.body) for resp_nic in response: self.assertEqual(resp_nic['assigned_networks'], [])
def test_NIC_updates_by_agent(self): meta = self.env.default_metadata() self.env.set_interfaces_in_meta(meta, [ {'name': 'eth0', 'mac': '12345', 'current_speed': 1, 'state': 'up'}]) node = self.env.create_node(api=True, meta=meta) new_meta = self.env.default_metadata() self.env.set_interfaces_in_meta(new_meta, [ {'name': 'new_nic', 'mac': '12345', 'current_speed': 10, 'max_speed': 10, 'state': 'down'}]) node_data = {'mac': node['mac'], 'meta': new_meta} resp = self.app.put( reverse('NodeAgentHandler'), jsonutils.dumps(node_data), headers=self.default_headers) self.assertEqual(resp.status_code, 200) resp = self.app.get( reverse('NodeNICsHandler', kwargs={'node_id': node['id']}), headers=self.default_headers) self.assertEqual(resp.status_code, 200) response = jsonutils.loads(resp.body) self.assertEqual(len(response), 1) resp_nic = response[0] nic = new_meta['interfaces'][0] self.assertEqual(resp_nic['mac'], nic['mac']) self.assertEqual(resp_nic['current_speed'], nic['current_speed']) self.assertEqual(resp_nic['max_speed'], nic['max_speed']) self.assertEqual(resp_nic['state'], nic['state']) for conn in ('assigned_networks', ): self.assertEqual(resp_nic[conn], [])
def test_network_verify_when_env_not_ready(self): cluster_db = self.env.clusters[0] blocking_statuses = ( CLUSTER_STATUSES.deployment, CLUSTER_STATUSES.update, ) for status in blocking_statuses: cluster_db.status = status self.db.commit() resp = self.app.get( reverse( 'NovaNetworkConfigurationHandler', kwargs={'cluster_id': self.env.clusters[0].id} ), headers=self.default_headers ) nets = jsonutils.loads(resp.body) task = self.env.launch_verify_networks(nets) self.db.refresh(task) self.assertEqual(task.status, "error") error_msg = ( "Environment is not ready to run network verification " "because it is in '{0}' state.".format(status) ) self.assertEqual(task.message, error_msg)
def test_network_verify_fails_if_untagged_intersection(self, mocked_rpc): resp = self.app.get( reverse( 'NovaNetworkConfigurationHandler', kwargs={'cluster_id': self.env.clusters[0].id} ), headers=self.default_headers ) self.assertEqual(200, resp.status_code) nets = jsonutils.loads(resp.body) for net in nets['networks']: if net['name'] in ('storage',): net['vlan_start'] = None task = self.env.launch_verify_networks(nets) self.env.wait_error(task, 30) self.assertIn( 'Some untagged networks are assigned to the same physical ' 'interface. You should assign them to different physical ' 'interfaces. Affected:\n', task.message ) for n in self.env.nodes: self.assertIn('"storage"', task.message) self.assertEqual(mocked_rpc.called, False)
def test_get_handler_with_invalid_speed_data(self): meta = self.env.default_metadata() meta["interfaces"] = [] node = self.env.create_node(api=True, meta=meta) meta_clean_list = [ {"interfaces": [{"name": "eth0", "mac": "00:00:00", "max_speed": -100}]}, {"interfaces": [{"name": "eth0", "mac": "00:00:00", "current_speed": -100}]}, {"interfaces": [{"name": "eth0", "mac": "00:00:00", "current_speed": "100"}]}, {"interfaces": [{"name": "eth0", "mac": "00:00:00", "max_speed": 10.0}]}, {"interfaces": [{"name": "eth0", "mac": "00:00:00", "max_speed": "100"}]}, {"interfaces": [{"name": "eth0", "mac": "00:00:00", "current_speed": 10.0}]}, ] for nic_meta in meta_clean_list: meta = self.env.default_metadata() meta.update(nic_meta) node_data = {"mac": node["mac"], "meta": meta} resp = self.app.put( reverse("NodeAgentHandler"), jsonutils.dumps(node_data), expect_errors=True, headers=self.default_headers, ) self.assertEqual(resp.status_code, 200) resp = self.app.get(reverse("NodeHandler", kwargs={"obj_id": node["id"]}), headers=self.default_headers) ifaces = jsonutils.loads(resp.body)["meta"]["interfaces"] self.assertEqual(ifaces, [{"name": "eth0", "mac": "00:00:00", "max_speed": None, "current_speed": None}])
def make_bond_via_api(self, bond_name, bond_mode, nic_names, node_id=None): if not node_id: node_id = self.nodes[0]["id"] resp = self.app.get( reverse("NodeNICsHandler", kwargs={"node_id": node_id}), headers=self.default_headers) self.tester.assertEqual(resp.status_code, 200) data = jsonutils.loads(resp.body) nics = self.db.query(NodeNICInterface).filter( NodeNICInterface.name.in_(nic_names) ).filter( NodeNICInterface.node_id == node_id ) self.tester.assertEqual(nics.count(), len(nic_names)) assigned_nets, slaves = [], [] for nic in data: if nic['name'] in nic_names: assigned_nets.extend(nic['assigned_networks']) slaves.append({'name': nic['name']}) nic['assigned_networks'] = [] data.append({ "name": bond_name, "type": NETWORK_INTERFACE_TYPES.bond, "mode": bond_mode, "slaves": slaves, "assigned_networks": assigned_nets }) resp = self.node_nics_put(node_id, data) self.tester.assertEqual(resp.status_code, 200)
def create_cluster(self, api=True, exclude=None, **kwargs): cluster_data = { 'name': 'cluster-api-' + str(randint(0, 1000000)), } if kwargs: cluster_data.update(kwargs) if 'release_id' not in cluster_data: cluster_data['release_id'] = self.create_release(api=False).id if exclude and isinstance(exclude, list): for ex in exclude: try: del cluster_data[ex] except KeyError as err: logger.warning(err) if api: resp = self.app.post( reverse('ClusterCollectionHandler'), jsonutils.dumps(cluster_data), headers=self.default_headers, expect_errors=True ) self.tester.assertEqual(resp.status_code, 201) cluster = jsonutils.loads(resp.body) self.clusters.append( Cluster.get_by_uid(cluster['id']) ) else: cluster = Cluster.create(cluster_data) db().commit() self.clusters.append(cluster) return cluster
def create_release(self, api=False, **kwargs): version = kwargs.get( 'version', '{0}-5.1'.format(randint(0, 100000000))) release_data = { 'name': u"release_name_" + version, 'version': version, 'description': u"release_desc" + version, 'operating_system': 'CentOS', 'roles': self.get_default_roles(), 'networks_metadata': self.get_default_networks_metadata(), 'attributes_metadata': self.get_default_attributes_metadata(), 'volumes_metadata': self.get_default_volumes_metadata() } if kwargs: release_data.update(kwargs) if api: resp = self.app.post( reverse('ReleaseCollectionHandler'), params=jsonutils.dumps(release_data), headers=self.default_headers ) self.tester.assertEqual(resp.status_code, 201) release = jsonutils.loads(resp.body) self.releases.append( self.db.query(Release).get(release['id']) ) else: release = Release.create(release_data) db().commit() self.releases.append(release) return release
def test_network_changing_adds_pending_changes(self): cluster = self.env.create_cluster(api=True) cluster_db = self.env.clusters[0] objects.Cluster.clear_pending_changes(cluster_db) all_changes = self.db.query(ClusterChanges).all() self.assertEqual(len(all_changes), 0) resp = self.app.get( reverse( 'NovaNetworkConfigurationHandler', kwargs={'cluster_id': cluster['id']}), headers=self.default_headers ) net_id = jsonutils.loads(resp.body)['networks'][0]["id"] resp = self.app.put( reverse( 'NovaNetworkConfigurationHandler', kwargs={'cluster_id': cluster['id']}), jsonutils.dumps({'networks': [{ "id": net_id, "access": "restricted"} ]}), headers=self.default_headers ) pending_changes = self.db.query(ClusterChanges).filter_by( name="networks" ).all() self.assertEqual(len(pending_changes), 1)
def test_NIC_adds_by_agent(self): meta = self.env.default_metadata() self.env.set_interfaces_in_meta(meta, [ {'name': 'eth0', 'mac': '12345', 'current_speed': 1, 'state': 'up'}]) node = self.env.create_node(api=True, meta=meta) meta['interfaces'].append({'name': 'new_nic', 'mac': '643'}) node_data = {'mac': node['mac'], 'meta': meta} resp = self.app.put( reverse('NodeAgentHandler'), jsonutils.dumps(node_data), headers=self.default_headers) self.assertEqual(resp.status_code, 200) resp = self.app.get( reverse('NodeNICsHandler', kwargs={'node_id': node['id']}), headers=self.default_headers) self.assertEqual(resp.status_code, 200) response = jsonutils.loads(resp.body) self.assertEqual(len(response), len(meta['interfaces'])) for nic in meta['interfaces']: filtered_nics = filter( lambda i: i['mac'] == nic['mac'], response ) resp_nic = filtered_nics[0] self.assertEqual(resp_nic['mac'], nic['mac']) self.assertEqual(resp_nic['current_speed'], nic.get('current_speed')) self.assertEqual(resp_nic['max_speed'], nic.get('max_speed')) self.assertEqual(resp_nic['state'], nic.get('state')) for conn in ('assigned_networks', ): self.assertEqual(resp_nic[conn], [])
def check_info_handler( self, handler_name, get_info, orchestrator_data, default=[]): # updating provisioning info put_resp = self.app.put( reverse(handler_name, kwargs={'cluster_id': self.cluster.id}), jsonutils.dumps(orchestrator_data), headers=self.default_headers) self.assertEqual(put_resp.status_code, 200) self.assertEqual(get_info(), orchestrator_data) # getting provisioning info get_resp = self.app.get( reverse(handler_name, kwargs={'cluster_id': self.cluster.id}), headers=self.default_headers) self.assertEqual(get_resp.status_code, 200) self.datadiff(orchestrator_data, jsonutils.loads(get_resp.body)) # deleting provisioning info delete_resp = self.app.delete( reverse(handler_name, kwargs={'cluster_id': self.cluster.id}), headers=self.default_headers) self.assertEqual(delete_resp.status_code, 202) self.assertEqual(get_info(), default)
def test_network_verification_neutron_with_vlan_segmentation( self, mocked_rpc): # get Neutron L2 VLAN ID range vlan_rng_be = self.env.clusters[0].network_config.vlan_range vlan_rng = set(range(vlan_rng_be[0], vlan_rng_be[1] + 1)) # get nodes NICs for private network resp = self.app.get(reverse('NodeCollectionHandler'), headers=self.default_headers) self.assertEqual(200, resp.status_code) priv_nics = {} for node in jsonutils.loads(resp.body): for net in node['network_data']: if net['name'] == 'private': priv_nics[node['id']] = net['dev'] break # check private VLAN range for nodes in Verify parameters task = self.env.launch_verify_networks() self.assertEqual(task.status, 'running') for node in task.cache['args']['nodes']: for net in node['networks']: if net['iface'] == priv_nics[node['uid']]: self.assertTrue(vlan_rng <= set(net['vlans'])) break
def test_assigned_networks_when_node_added(self): mac = self.env.generate_random_mac() meta = self.env.default_metadata() self.env.set_interfaces_in_meta( meta, [{ 'name': 'eth0', 'mac': mac }, { 'name': 'eth1', 'mac': self.env.generate_random_mac() }]) node = self.env.create_node(api=True, meta=meta, mac=mac) self.env.create_cluster(api=True, nodes=[node['id']]) resp = self.app.get(reverse('NodeNICsHandler', kwargs={'node_id': node['id']}), headers=self.default_headers) self.assertEqual(resp.status_code, 200) response = jsonutils.loads(resp.body) for resp_nic in response: net_names = [net['name'] for net in resp_nic['assigned_networks']] if resp_nic['mac'] == mac: self.assertTrue("fuelweb_admin" in net_names) else: self.assertTrue("public" in net_names) self.assertGreater(len(resp_nic['assigned_networks']), 0)
def PUT(self, cluster_id): data = jsonutils.loads(web.data()) cluster = self.get_object_or_404(objects.Cluster, cluster_id) self.check_net_provider(cluster) self.check_if_network_configuration_locked(cluster) task_manager = CheckNetworksTaskManager(cluster_id=cluster.id) task = task_manager.execute(data) if task.status != consts.TASK_STATUSES.error: try: if "networks" in data: self.validator.validate_networks_update(jsonutils.dumps(data)) if "networking_parameters" in data: self.validator.validate_neutron_params(jsonutils.dumps(data), cluster_id=cluster_id) objects.Cluster.get_network_manager(cluster).update(cluster, data) except Exception as exc: # set task status to error and update its corresponding data data = {"status": "error", "progress": 100, "message": six.text_type(exc)} objects.Task.update(task, data) logger.error(traceback.format_exc()) raise self.http(202, objects.Task.to_json(task))
def test_discovered_node_unified_name(self): node_mac = self.env.generate_random_mac() def node_name_test(mac): self.env.create_node( api=True, **{'mac': mac} ) node = jsonutils.loads( self.app.get(reverse('NodeCollectionHandler')).body )[0] self.assertEqual(node['name'], 'Untitled ({0})'.format(node_mac[-5:])) node_name_test(node_mac.upper()) node_id = jsonutils.loads( self.app.get(reverse('NodeCollectionHandler')).body )[0]['id'] self.app.delete( reverse('NodeHandler', {'obj_id': node_id}) ) node_name_test(node_mac.lower())
def PUT(self, cluster_id): """:returns: JSONized Task object. :http: * 202 (network checking task created) * 404 (cluster not found in db) """ data = jsonutils.loads(web.data()) if data.get("networks"): data["networks"] = [n for n in data["networks"] if n.get("name") != "fuelweb_admin"] cluster = self.get_object_or_404(objects.Cluster, cluster_id) self.check_net_provider(cluster) self.check_if_network_configuration_locked(cluster) task_manager = CheckNetworksTaskManager(cluster_id=cluster.id) task = task_manager.execute(data) if task.status != consts.TASK_STATUSES.error: try: if "networks" in data: self.validator.validate_networks_update(jsonutils.dumps(data)) if "dns_nameservers" in data: self.validator.validate_dns_servers_update(jsonutils.dumps(data)) objects.Cluster.get_network_manager(cluster).update(cluster, data) except Exception as exc: # set task status to error and update its corresponding data data = {"status": consts.TASK_STATUSES.error, "progress": 100, "message": six.text_type(exc)} objects.Task.update(task, data) logger.error(traceback.format_exc()) raise self.http(202, objects.Task.to_json(task))
def test_update(self): c = self.env.create_cluster(api=False) n0 = self.env.create_notification() n1 = self.env.create_notification(cluster_id=c.id) notification_update = [ { 'id': n0.id, 'status': 'read' }, { 'id': n1.id, 'status': 'read' } ] resp = self.app.put( reverse('NotificationCollectionHandler'), jsonutils.dumps(notification_update), headers=self.default_headers ) self.assertEqual(200, resp.status_code) response = jsonutils.loads(resp.body) self.assertEqual(len(response), 2) if response[0]['id'] == n0.id: rn0 = response[0] rn1 = response[1] else: rn0 = response[1] rn1 = response[0] self.assertEqual(rn1['cluster'], n1.cluster_id) self.assertEqual(rn1['status'], 'read') self.assertIsNone(rn0.get('cluster', None)) self.assertEqual(rn0['status'], 'read')
def get(self, node_id): resp = self.app.get( reverse('NodeDisksHandler', kwargs={'node_id': node_id}), headers=self.default_headers) self.assertEqual(200, resp.status_code) return jsonutils.loads(resp.body)
def node_name_test(mac): self.env.create_node(api=True, **{'mac': mac}) node = jsonutils.loads( self.app.get(reverse('NodeCollectionHandler')).body)[0] self.assertEqual(node['name'], 'Untitled ({0})'.format(node_mac[-5:]))
def test_interface_changes_added(self): # Creating cluster with node self.env.create_cluster() cluster = self.env.clusters[0] self.env.create_node(roles=['controller'], pending_addition=True, cluster_id=cluster.id) # Deploying cluster deployment_task = self.env.launch_deployment() self.env.wait_ready(deployment_task) def filter_changes(chg_type, chg_list): return filter(lambda x: x.get('name') == chg_type, chg_list) # cluster = self.env.clusters[0] changes = filter_changes(consts.CLUSTER_CHANGES.interfaces, cluster['changes']) # Checking no interfaces change after cluster deployed self.assertEquals(0, len(changes)) node_id = self.env.nodes[0].id # Getting nics resp = self.env.node_nics_get(node_id) interfaces = jsonutils.loads(resp.body) # Updating nics self.env.node_nics_put(node_id, interfaces) # Checking 'interfaces' change in cluster changes changes = filter_changes(consts.CLUSTER_CHANGES.interfaces, cluster['changes']) self.assertEquals(1, len(changes))
def test_multiline_log_entry(self): settings.LOGS[0]['multiline'] = True log_entries = [ [ time.strftime(settings.UI_LOG_DATE_FORMAT), 'LEVEL111', 'text1', ], [ time.strftime(settings.UI_LOG_DATE_FORMAT), 'LEVEL222', 'text\nmulti\nline', ], [ time.strftime(settings.UI_LOG_DATE_FORMAT), 'LEVEL333', 'text3', ], ] self.env.create_cluster(api=False) self._create_logfile_for_node(settings.LOGS[0], log_entries) resp = self.app.get(reverse('LogEntryCollectionHandler'), params={'source': settings.LOGS[0]['id']}, headers=self.default_headers) self.assertEqual(200, resp.status_code) response = jsonutils.loads(resp.body) response['entries'].reverse() self.assertEqual(response['entries'], log_entries) settings.LOGS[0]['multiline'] = False
def test_get_handler_with_incompleted_iface_data(self): meta = self.env.default_metadata() meta["interfaces"] = [] node = self.env.create_node(api=True, meta=meta) meta_clean_list = [ {'interfaces': [{'name': '', 'mac': '00:00:00'}]}, {'interfaces': [{'name': 'eth0', 'mac': ''}]}, {'interfaces': [{'mac': '00:00:00'}]}, {'interfaces': [{'name': 'eth0'}]} ] for nic_meta in meta_clean_list: meta = self.env.default_metadata() meta.update(nic_meta) node_data = {'mac': node['mac'], 'meta': meta} resp = self.app.put( reverse('NodeAgentHandler'), jsonutils.dumps(node_data), expect_errors=True, headers=self.default_headers ) self.assertEqual(resp.status_code, 200) resp = self.app.get( reverse('NodeNICsHandler', kwargs={'node_id': node['id']}), headers=self.default_headers ) ifaces = jsonutils.loads(resp.body) self.assertEqual(ifaces, [])
def test_mgmt_storage_networks_have_no_gateway(self): resp = self.env.neutron_networks_get(self.cluster.id) self.assertEqual(200, resp.status_code) data = jsonutils.loads(resp.body) for net in data['networks']: if net['name'] in ['management', 'storage']: self.assertIsNone(net['gateway'])
def test_roles_failed_to_delete_assigned(self): self.env.create( nodes_kwargs=[ {"status": "ready", "roles": ["controller"]} ] ) resp = self.app.get( reverse('ReleaseCollectionHandler'), headers=self.default_headers ) release_json = jsonutils.loads(resp.body)[0] old_roles = set(release_json["roles"]) old_roles.remove("controller") release_json["roles"] = list(old_roles) resp = self.app.put( reverse( 'ReleaseHandler', kwargs={ "obj_id": release_json["id"] } ), jsonutils.dumps(release_json), headers=self.default_headers, expect_errors=True ) self.assertEqual(resp.status_code, 400) self.assertEqual( resp.body, "Cannot delete roles already assigned to nodes: controller" )
def test_network_assignment_when_node_added(self): cluster = self.env.create_cluster(api=True) mac = self.env.generate_random_mac() meta = self.env.default_metadata() self.env.set_interfaces_in_meta( meta, [{"name": "eth0", "mac": mac}, {"name": "eth1", "mac": self.env.generate_random_mac()}] ) node = self.env.create_node(api=True, meta=meta, mac=mac) resp = self.app.put( reverse("NodeCollectionHandler"), jsonutils.dumps([{"id": node["id"], "cluster_id": cluster["id"]}]), headers=self.default_headers, ) self.assertEqual(resp.status_code, 200) resp = self.app.get(reverse("NodeNICsHandler", kwargs={"node_id": node["id"]}), headers=self.default_headers) self.assertEqual(resp.status_code, 200) response = jsonutils.loads(resp.body) for resp_nic in response: net_names = [net["name"] for net in resp_nic["assigned_networks"]] if resp_nic["mac"] == mac: self.assertTrue("fuelweb_admin" in net_names) else: self.assertTrue("public" in net_names) self.assertGreater(len(resp_nic["assigned_networks"]), 0)
def test_admin_untagged_intersection(self): meta = self.env.default_metadata() self.env.set_interfaces_in_meta(meta, [{ "mac": "00:00:00:00:00:66", "max_speed": 1000, "name": "eth0", "current_speed": 1000 }, { "mac": "00:00:00:00:00:77", "max_speed": 1000, "name": "eth1", "current_speed": None}]) self.env.create( nodes_kwargs=[ { 'api': True, 'roles': ['controller'], 'pending_addition': True, 'meta': meta, 'mac': "00:00:00:00:00:66" } ] ) cluster_id = self.env.clusters[0].id resp = self.env.nova_networks_get(cluster_id) nets = jsonutils.loads(resp.body) for net in nets["networks"]: if net["name"] in ["management", ]: net["vlan_start"] = None self.env.nova_networks_put(cluster_id, nets) supertask = self.env.launch_deployment() self.env.wait_error(supertask)
def PUT(self, cluster_id): data = jsonutils.loads(web.data()) cluster = self.get_object_or_404(objects.Cluster, cluster_id) self.check_net_provider(cluster) self.check_if_network_configuration_locked(cluster) task_manager = CheckNetworksTaskManager(cluster_id=cluster.id) task = task_manager.execute(data) if task.status != consts.TASK_STATUSES.error: try: if 'networks' in data: self.validator.validate_networks_update( jsonutils.dumps(data)) if 'networking_parameters' in data: self.validator.validate_neutron_params( jsonutils.dumps(data), cluster_id=cluster_id) objects.Cluster.get_network_manager(cluster).update( cluster, data) except Exception as exc: # set task status to error and update its corresponding data data = { 'status': 'error', 'progress': 100, 'message': six.text_type(exc) } objects.Task.update(task, data) logger.error(traceback.format_exc()) raise self.http(202, objects.Task.to_json(task))
def test_get_handler_with_NICs(self): meta = self.env.default_metadata() self.env.set_interfaces_in_meta(meta, [ {'name': 'eth0', 'mac': self.env.generate_random_mac(), 'current_speed': 1, 'max_speed': 1}, {'name': 'eth1', 'mac': self.env.generate_random_mac(), 'current_speed': 1, 'max_speed': 1}]) self.env.create_node(api=True, meta=meta) node_db = self.env.nodes[0] resp = self.app.get( reverse('NodeNICsHandler', kwargs={'node_id': node_db.id}), headers=self.default_headers) self.assertEqual(resp.status_code, 200) response = jsonutils.loads(resp.body) self.assertItemsEqual( map(lambda i: i['id'], response), map(lambda i: i.id, node_db.interfaces) ) for nic in meta['interfaces']: filtered_nics = filter( lambda i: i['mac'] == nic['mac'], response ) resp_nic = filtered_nics[0] self.assertEqual(resp_nic['mac'], nic['mac']) self.assertEqual(resp_nic['current_speed'], nic['current_speed']) self.assertEqual(resp_nic['max_speed'], nic['max_speed']) for conn in ('assigned_networks', ): self.assertEqual(resp_nic[conn], [])
def http_get(self, name, arguments): """Makes a GET request to a resource with `name`. Returns a deserialized dict. """ response = self.app.get(base.reverse(name, kwargs=arguments), headers=self.default_headers) return json.loads(response.body)
def create_node_and_check_assignment(self): meta = self.env.default_metadata() admin_ip = str( IPNetwork( self.env.network_manager.get_admin_network_group().cidr)[0]) meta['interfaces'] = [{ 'name': 'eth3', 'mac': self.env.generate_random_mac() }, { 'name': 'eth2', 'mac': self.env.generate_random_mac() }, { 'name': 'eth0', 'mac': self.env.generate_random_mac(), 'ip': admin_ip }, { 'name': 'eth1', 'mac': self.env.generate_random_mac() }] node = self.env.create_node(api=True, meta=meta, cluster_id=self.env.clusters[0].id) resp = self.app.get(reverse('NodeNICsHandler', kwargs={'node_id': node['id']}), headers=self.default_headers) self.assertEqual(resp.status_code, 200) data = jsonutils.loads(resp.body) eth1 = [nic for nic in data if nic['name'] == 'eth1'] self.assertEqual(len(eth1), 1) self.assertEqual( len( filter(lambda n: n['name'] == 'public', eth1[0]['assigned_networks'])), 1)
def get_nodes(): resp = self.app.get( reverse('NodeCollectionHandler', kwargs={'cluster_id': self.env.clusters[0].id}), headers=self.default_headers, ) return jsonutils.loads(resp.body)
def create_release(self, api=False, **kwargs): version = str(randint(0, 100000000)) release_data = { 'name': u"release_name_" + version, 'version': version, 'description': u"release_desc" + version, 'operating_system': 'CentOS', 'roles': self.get_default_roles(), 'networks_metadata': self.get_default_networks_metadata(), 'attributes_metadata': self.get_default_attributes_metadata(), 'volumes_metadata': self.get_default_volumes_metadata() } if kwargs: release_data.update(kwargs) if api: resp = self.app.post( reverse('ReleaseCollectionHandler'), params=jsonutils.dumps(release_data), headers=self.default_headers ) self.tester.assertEqual(resp.status_code, 201) release = jsonutils.loads(resp.body) self.releases.append( self.db.query(Release).get(release['id']) ) else: release = Release.create(release_data) db().commit() self.releases.append(release) return release
def test_node_update_ext_mac(self): meta = self.env.default_metadata() node1 = self.env.create_node( api=False, mac=meta["interfaces"][0]["mac"], meta={} ) node1_json = { "mac": self.env.generate_random_mac(), "meta": meta } # We want to be sure that new mac is not equal to old one self.assertNotEqual(node1.mac, node1_json["mac"]) # Here we are trying to update node resp = self.app.put( reverse('NodeCollectionHandler'), jsonutils.dumps([node1_json]), headers=self.default_headers, expect_errors=True ) self.assertEqual(resp.status_code, 200) response = jsonutils.loads(resp.body) # Here we are checking if node mac is successfully updated self.assertEqual(node1_json["mac"], response[0]["mac"]) self.assertEqual(meta, response[0]["meta"])