def test_provisioning_serialization_ignore_customized(self): cluster = self._create_cluster_with_extensions() data = {"nodes": cluster.nodes} mserializer = mock.MagicMock() mserializer.serialize.return_value = data with mock.patch( 'nailgun.orchestrator.provisioning_serializers.' 'get_serializer_for_cluster', return_value=mserializer): with mock.patch('nailgun.orchestrator.provisioning_serializers.' 'fire_callback_on_provisioning_data_serialization' ) as mfire_callback: replaced_data = {"it's": "something"} with mock.patch.object( cluster.nodes[0], 'replaced_provisioning_info', new_callable=mock.Mock(return_value=replaced_data)): provisioning_serializers.serialize(cluster, cluster.nodes, ignore_customized=True) mfire_callback.assert_called_once_with(data, cluster, cluster.nodes)
def test_provisioning_serialization_ignore_customized_false(self): cluster = self._create_cluster_with_extensions(nodes_kwargs=[ { 'roles': ['controller'], 'pending_addition': True }, { 'roles': ['controller'], 'pending_addition': True }, { 'roles': ['controller'], 'pending_addition': True }, { 'roles': ['controller'], 'pending_addition': True }, ]) data = {"nodes": [{"uid": n.uid} for n in cluster.nodes]} expected_data = {"nodes": copy.deepcopy(data["nodes"][1:])} mserializer = mock.MagicMock() mserializer.serialize.return_value = data with mock.patch( 'nailgun.orchestrator.provisioning_serializers.' 'get_serializer_for_cluster', return_value=mserializer): with mock.patch('nailgun.orchestrator.provisioning_serializers.' 'fire_callback_on_provisioning_data_serialization' ) as mfire_callback: replaced_data = {"it's": "something"} with mock.patch.object( cluster.nodes[0], 'replaced_provisioning_info', new_callable=mock.Mock(return_value=replaced_data)): provisioning_serializers.serialize(cluster, cluster.nodes, ignore_customized=False) self.assertEqual(mfire_callback.call_args[0][0], expected_data) self.assertIs(mfire_callback.call_args[0][1], cluster) self.assertItemsEqual(mfire_callback.call_args[0][2], cluster.nodes[1:])
def message(cls, task, nodes_to_provisioning): logger.debug("ProvisionTask.message(task=%s)" % task.uuid) task = objects.Task.get_by_uid( task.id, fail_if_not_found=True, lock_for_update=True ) objects.NodeCollection.lock_nodes(nodes_to_provisioning) serialized_cluster = provisioning_serializers.serialize( task.cluster, nodes_to_provisioning) for node in nodes_to_provisioning: if settings.FAKE_TASKS or settings.FAKE_TASKS_AMQP: continue logs_utils.prepare_syslog_dir(node) rpc_message = make_astute_message( task, cls._get_provision_method(task.cluster), 'provision_resp', { 'provisioning_info': serialized_cluster } ) db().commit() return rpc_message
def message(cls, task, nodes_to_provisioning): logger.debug("ProvisionTask.message(task=%s)" % task.uuid) task = objects.Task.get_by_uid( task.id, fail_if_not_found=True, lock_for_update=True ) objects.NodeCollection.lock_nodes(nodes_to_provisioning) serialized_cluster = provisioning_serializers.serialize( task.cluster, nodes_to_provisioning) for node in nodes_to_provisioning: if settings.FAKE_TASKS or settings.FAKE_TASKS_AMQP: continue admin_net_id = objects.Node.get_network_manager( node ).get_admin_network_group_id(node.id) TaskHelper.prepare_syslog_dir(node, admin_net_id) rpc_message = make_astute_message( task, 'provision', 'provision_resp', { 'provisioning_info': serialized_cluster } ) db().commit() return rpc_message
def message(cls, task, nodes_to_provisioning): logger.debug("ProvisionTask.message(task=%s)" % task.uuid) serialized_cluster = task.cluster.replaced_provisioning_info or \ provisioning_serializers.serialize( task.cluster, nodes_to_provisioning) for node in nodes_to_provisioning: if settings.FAKE_TASKS or settings.FAKE_TASKS_AMQP: continue admin_net_id = objects.Node.get_network_manager( node ).get_admin_network_group_id() TaskHelper.prepare_syslog_dir(node, admin_net_id) return make_astute_message( 'provision', 'provision_resp', { 'task_uuid': task.uuid, 'provisioning_info': serialized_cluster } )
def test_node_serialization_w_bonded_admin_iface(self): # create additional node to test bonding admin_mac = self.env.generate_random_mac() meta = { 'interfaces': [{ 'name': 'eth1', 'mac': admin_mac, 'pxe': True }, { 'name': 'eth2', 'mac': self.env.generate_random_mac() }, { 'name': 'eth3', 'mac': self.env.generate_random_mac() }, { 'name': 'eth4', 'mac': self.env.generate_random_mac() }] } node = self.env.create_node(pending_addition=True, cluster_id=self.cluster_db.id, meta=meta, mac=admin_mac) # get node from db node_db = objects.Node.get_by_uid(node['id']) # bond admin iface self.env.make_bond_via_api( 'lnx_bond', '', ['eth1', 'eth4'], node['id'], bond_properties={'mode': consts.BOND_MODES.balance_rr}) # check serialized data serialized_node = ps.serialize(self.cluster_db, [node_db])['nodes'][0] out_mac = serialized_node['kernel_options']['netcfg/choose_interface'] self.assertEqual(out_mac, admin_mac)
def test_node_serialization_w_bonded_admin_iface(self): self.cluster_db = self.env.clusters[0] # create additional node to test bonding admin_mac = self.env.generate_random_mac() meta = { 'interfaces': [ {'name': 'eth1', 'mac': self.env.generate_random_mac()}, {'name': 'eth2', 'mac': self.env.generate_random_mac()}, {'name': 'eth3', 'mac': self.env.generate_random_mac()}, {'name': 'eth4', 'mac': self.env.generate_random_mac()} ] } node = self.env.create_node( pending_addition=True, cluster_id=self.cluster_db.id, meta=meta, mac=admin_mac ) # get node from db node_db = objects.Node.get_by_uid(node['id']) # bond admin iface self.env.make_bond_via_api('lnx_bond', '', ['eth1', 'eth4'], node['id'], bond_properties={ 'mode': consts.BOND_MODES.balance_rr }) # check serialized data serialized_node = ps.serialize(self.cluster_db, [node_db])['nodes'][0] out_mac = serialized_node['kernel_options']['netcfg/choose_interface'] self.assertEqual(out_mac, admin_mac)
def message(cls, task): logger.debug("ProvisionTask.message(task=%s)" % task.uuid) nodes = TaskHelper.nodes_to_provision(task.cluster) USE_FAKE = settings.FAKE_TASKS or settings.FAKE_TASKS_AMQP # We need to assign admin ips # and only after that prepare syslog # directories task.cluster.prepare_for_provisioning() for node in nodes: if USE_FAKE: continue if node.offline: raise errors.NodeOffline( u'Node "%s" is offline.' " Remove it from environment and try again." % node.full_name ) TaskHelper.prepare_syslog_dir(node) serialized_cluster = task.cluster.replaced_provisioning_info or provisioning_serializers.serialize(task.cluster) message = { "method": "provision", "respond_to": "provision_resp", "args": {"task_uuid": task.uuid, "provisioning_info": serialized_cluster}, } return message
def test_provisioning_serialization_ignore_customized(self): cluster = self._create_cluster_with_extensions() data = {"nodes": cluster.nodes} mserializer = mock.MagicMock() mserializer.serialize.return_value = data with mock.patch( 'nailgun.orchestrator.provisioning_serializers.' 'get_serializer_for_cluster', return_value=mserializer): with mock.patch('nailgun.orchestrator.provisioning_serializers.' 'fire_callback_on_provisioning_data_serialization' ) as mfire_callback: replaced_data = {"it's": "something"} with mock.patch.object( cluster.nodes[0], 'replaced_provisioning_info', new_callable=mock.Mock(return_value=replaced_data)): provisioning_serializers.serialize( cluster, cluster.nodes, ignore_customized=True) mfire_callback.assert_called_once_with(data, cluster, cluster.nodes)
def test_provisioning_serialization_ignore_customized_false(self): cluster = self._create_cluster_with_extensions( nodes_kwargs=[ {'roles': ['controller'], 'pending_addition': True}, {'roles': ['controller'], 'pending_addition': True}, {'roles': ['controller'], 'pending_addition': True}, {'roles': ['controller'], 'pending_addition': True}, ] ) data = {"nodes": [{"uid": n.uid} for n in cluster.nodes]} expected_data = {"nodes": copy.deepcopy(data["nodes"][1:])} mserializer = mock.MagicMock() mserializer.serialize.return_value = data with mock.patch( 'nailgun.orchestrator.provisioning_serializers.' 'get_serializer_for_cluster', return_value=mserializer): with mock.patch('nailgun.orchestrator.provisioning_serializers.' 'fire_callback_on_provisioning_data_serialization' ) as mfire_callback: replaced_data = {"it's": "something"} with mock.patch.object( cluster.nodes[0], 'replaced_provisioning_info', new_callable=mock.Mock(return_value=replaced_data)): provisioning_serializers.serialize( cluster, cluster.nodes, ignore_customized=False) self.assertEqual(mfire_callback.call_args[0][0], expected_data) self.assertIs(mfire_callback.call_args[0][1], cluster) self.assertItemsEqual( mfire_callback.call_args[0][2], cluster.nodes[1:])
def test_pipeline_change_data(self): self.env.create(cluster_kwargs={'api': False}, nodes_kwargs=[{ 'roles': ['controller'], 'pending_addition': True }]) cluster = self.env.clusters[0] cluster.extensions = [self.extension.name] self.db.flush() class PipelinePlus1(BasePipeline): @classmethod def process_provisioning(cls, data, cluster, nodes, **kwargs): data['key'] += 1 return data class PipelinePlus2(BasePipeline): @classmethod def process_provisioning(cls, data, cluster, nodes, **kwargs): data['key'] += 2 return data class Extension(BaseExtension): name = 'ext_name' version = '1.0.0' description = 'ext description' data_pipelines = (PipelinePlus1, PipelinePlus2) extension = Extension() cluster.extensions = [extension.name] self.db.flush() data = {'key': 0, 'nodes': []} mserializer = mock.MagicMock() mserializer.serialize.return_value = data with mock.patch('nailgun.extensions.manager.get_all_extensions', return_value=[extension]): with mock.patch( 'nailgun.orchestrator.provisioning_serializers.' 'get_serializer_for_cluster', return_value=mserializer): new_data = provisioning_serializers.serialize( cluster, cluster.nodes) self.assertEqual(new_data['key'], 3)
def test_pipeline_change_data(self): cluster = self.env.create( cluster_kwargs={'api': False}, nodes_kwargs=[{'roles': ['controller'], 'pending_addition': True}] ) cluster.extensions = [self.extension.name] self.db.flush() class PipelinePlus1(BasePipeline): @classmethod def process_provisioning(cls, data, cluster, nodes, **kwargs): data['key'] += 1 return data class PipelinePlus2(BasePipeline): @classmethod def process_provisioning(cls, data, cluster, nodes, **kwargs): data['key'] += 2 return data class Extension(BaseExtension): name = 'ext_name' version = '1.0.0' description = 'ext description' data_pipelines = (PipelinePlus1, PipelinePlus2) extension = Extension() cluster.extensions = [extension.name] self.db.flush() data = {'key': 0, 'nodes': []} mserializer = mock.MagicMock() mserializer.serialize.return_value = data with mock.patch('nailgun.extensions.manager.get_all_extensions', return_value=[extension]): with mock.patch('nailgun.orchestrator.provisioning_serializers.' 'get_serializer_for_cluster', return_value=mserializer): new_data = provisioning_serializers.serialize( cluster, cluster.nodes) self.assertEqual(new_data['key'], 3)
def test_ubuntu_serializer(self): release = self.env.create_release(api=False, operating_system="Ubuntu") self.env.create( cluster_kwargs={"release_id": release.id}, nodes_kwargs=[ {"roles": ["controller"], "pending_addition": True}, {"roles": ["compute"], "pending_addition": True}, ], ) cluster_db = self.env.clusters[0] serialized_cluster = serialize(cluster_db, cluster_db.nodes) for node in serialized_cluster["nodes"]: node_db = self.db.query(Node).filter_by(fqdn=node["hostname"]).first() self.assertEqual(node["kernel_options"]["netcfg/choose_interface"], node_db.admin_interface.mac)
def message(cls, task, nodes_to_provisioning): logger.debug("ProvisionTask.message(task=%s)" % task.uuid) serialized_cluster = task.cluster.replaced_provisioning_info or \ provisioning_serializers.serialize( task.cluster, nodes_to_provisioning) for node in nodes_to_provisioning: if settings.FAKE_TASKS or settings.FAKE_TASKS_AMQP: continue TaskHelper.prepare_syslog_dir(node) return make_astute_message('provision', 'provision_resp', { 'task_uuid': task.uuid, 'provisioning_info': serialized_cluster })
def setUp(self): super(TestProvisioningSerializer, self).setUp() self.cluster_db = self.env.create() self.env.create_nodes_w_interfaces_count( 1, 1, **{ 'roles': ['controller'], 'pending_addition': True, 'cluster_id': self.cluster_db.id }) self.env.create_nodes_w_interfaces_count( 1, 1, **{ 'roles': ['compute'], 'pending_addition': True, 'cluster_id': self.cluster_db.id }) self.attributes = self.cluster_db.attributes.editable self.serialized_cluster = ps.serialize(self.cluster_db, self.cluster_db.nodes)
def test_ubuntu_serializer(self): release = self.env.create_release(api=False, operating_system='Ubuntu') self.env.create(cluster_kwargs={'release_id': release.id}, nodes_kwargs=[{ 'roles': ['controller'], 'pending_addition': True }, { 'roles': ['compute'], 'pending_addition': True }]) cluster_db = self.env.clusters[0] serialized_cluster = serialize(cluster_db, cluster_db.nodes) for node in serialized_cluster['nodes']: node_db = self.db.query(Node).filter_by( fqdn=node['hostname']).first() self.assertEqual(node['kernel_options']['netcfg/choose_interface'], node_db.admin_interface.mac)
def test_ubuntu_serializer(self): release = self.env.create_release( api=False, operating_system='Ubuntu') cluster = self.env.create( cluster_kwargs={ 'mode': 'multinode', 'release': release.id}, nodes_kwargs=[ {'roles': ['controller'], 'pending_addition': True}, {'roles': ['compute'], 'pending_addition': True}]) cluster_db = self.db.query(Cluster).get(cluster['id']) serialized_cluster = serialize(cluster_db) for node in serialized_cluster['nodes']: node_db = db().query(Node).filter_by(fqdn=node['hostname']).first() self.assertEquals( node['kernel_options']['netcfg/choose_interface'], node_db.admin_interface.name)
def message(cls, task, nodes_to_provisioning): logger.debug("ProvisionTask.message(task=%s)" % task.uuid) serialized_cluster = task.cluster.replaced_provisioning_info or \ provisioning_serializers.serialize( task.cluster, nodes_to_provisioning) for node in nodes_to_provisioning: if settings.FAKE_TASKS or settings.FAKE_TASKS_AMQP: continue TaskHelper.prepare_syslog_dir(node) message = { 'method': 'provision', 'respond_to': 'provision_resp', 'args': { 'task_uuid': task.uuid, 'provisioning_info': serialized_cluster}} return message
def message(cls, task, nodes_to_provisioning): logger.debug("ProvisionTask.message(task=%s)" % task.uuid) TaskHelper.raise_if_node_offline(nodes_to_provisioning) serialized_cluster = task.cluster.replaced_provisioning_info or \ provisioning_serializers.serialize( task.cluster, nodes_to_provisioning) for node in nodes_to_provisioning: if settings.FAKE_TASKS or settings.FAKE_TASKS_AMQP: continue TaskHelper.prepare_syslog_dir(node) message = { 'method': 'provision', 'respond_to': 'provision_resp', 'args': { 'task_uuid': task.uuid, 'provisioning_info': serialized_cluster}} return message
def setUp(self): super(TestProvisioningSerializer, self).setUp() self.cluster_db = self.env.create() self.env.create_nodes_w_interfaces_count( 1, 1, **{ 'roles': ['controller'], 'pending_addition': True, 'cluster_id': self.cluster_db.id } ) self.env.create_nodes_w_interfaces_count( 1, 1, **{ 'roles': ['compute'], 'pending_addition': True, 'cluster_id': self.cluster_db.id } ) self.attributes = self.cluster_db.attributes.editable self.serialized_cluster = ps.serialize( self.cluster_db, self.cluster_db.nodes)
def message(cls, task): logger.debug("ProvisionTask.message(task=%s)" % task.uuid) nodes = TaskHelper.nodes_to_provision(task.cluster) USE_FAKE = settings.FAKE_TASKS or settings.FAKE_TASKS_AMQP # We need to assign admin ips # and only after that prepare syslog # directories task.cluster.prepare_for_provisioning() for node in nodes: if USE_FAKE: continue if node.offline: raise errors.NodeOffline( u'Node "%s" is offline.' ' Remove it from environment and try again.' % node.full_name) TaskHelper.prepare_syslog_dir(node) node.status = 'provisioning' db().commit() serialized_cluster = task.cluster.replaced_provisioning_info or \ provisioning_serializers.serialize(task.cluster) message = { 'method': 'provision', 'respond_to': 'provision_resp', 'args': { 'task_uuid': task.uuid, 'provisioning_info': serialized_cluster } } return message
def message(cls, task, nodes_to_provisioning): logger.debug("ProvisionTask.message(task=%s)" % task.uuid) task = objects.Task.get_by_uid(task.id, fail_if_not_found=True, lock_for_update=True) objects.NodeCollection.lock_nodes(nodes_to_provisioning) serialized_cluster = provisioning_serializers.serialize( task.cluster, nodes_to_provisioning) for node in nodes_to_provisioning: if settings.FAKE_TASKS or settings.FAKE_TASKS_AMQP: continue admin_net_id = objects.Node.get_network_manager( node).get_admin_network_group_id(node.id) TaskHelper.prepare_syslog_dir(node, admin_net_id) rpc_message = make_astute_message( task, 'provision', 'provision_resp', {'provisioning_info': serialized_cluster}) db().commit() return rpc_message
def message(cls, task): logger.debug("ProvisionTask.message(task=%s)" % task.uuid) nodes = TaskHelper.nodes_to_provision(task.cluster) USE_FAKE = settings.FAKE_TASKS or settings.FAKE_TASKS_AMQP # We need to assign admin ips # and only after that prepare syslog # directories task.cluster.prepare_for_provisioning() for node in nodes: if USE_FAKE: continue if node.offline: raise errors.NodeOffline( u'Node "%s" is offline.' ' Remove it from environment and try again.' % node.full_name) TaskHelper.prepare_syslog_dir(node) node.status = 'provisioning' db().commit() serialized_cluster = task.cluster.replaced_provisioning_info or \ provisioning_serializers.serialize(task.cluster) message = { 'method': 'provision', 'respond_to': 'provision_resp', 'args': { 'task_uuid': task.uuid, 'provisioning_info': serialized_cluster}} return message
def _serialize(self, cluster, nodes): return provisioning_serializers.serialize( cluster, nodes, ignore_customized=True)
def _serialize(self, cluster, nodes): return provisioning_serializers.serialize(cluster, nodes, ignore_customized=True)