class TestRefreshSecrets(PservTestCase): """Tests for the `refresh_secrets` task.""" resources = (("celery", FixtureResource(CeleryFixture())), ) def test_does_not_require_arguments(self): refresh_secrets() # Nothing is refreshed, but there is no error either. pass def test_breaks_on_unknown_item(self): self.assertRaises(AssertionError, refresh_secrets, not_an_item=None) def test_works_as_a_task(self): self.assertTrue(refresh_secrets.delay().successful()) def test_updates_api_credentials(self): credentials = make_api_credentials() refresh_secrets(api_credentials=convert_tuple_to_string(credentials)) self.assertEqual(credentials, auth.get_recorded_api_credentials()) def test_updates_nodegroup_uuid(self): nodegroup_uuid = factory.make_name('nodegroupuuid') refresh_secrets(nodegroup_uuid=nodegroup_uuid) self.assertEqual(nodegroup_uuid, cache.cache.get('nodegroup_uuid'))
class TestNodeGroupsAPI(MultipleUsersScenarios, MAASServerTestCase): scenarios = [ ('anon', dict(userfactory=lambda: AnonymousUser())), ('user', dict(userfactory=factory.make_user)), ('admin', dict(userfactory=factory.make_admin)), ] resources = (('celery', FixtureResource(CeleryFixture())), ) def test_handler_path(self): self.assertEqual('/api/1.0/nodegroups/', reverse('nodegroups_handler')) def test_reverse_points_to_nodegroups_api(self): self.assertEqual(reverse('nodegroups_handler'), reverse('nodegroups_handler')) def test_nodegroups_index_lists_nodegroups(self): # The nodegroups index lists node groups for the MAAS. nodegroup = factory.make_node_group() response = self.client.get(reverse('nodegroups_handler'), {'op': 'list'}) self.assertEqual(httplib.OK, response.status_code) self.assertEqual([{ 'uuid': nodegroup.uuid, 'status': nodegroup.status, 'name': nodegroup.name, 'cluster_name': nodegroup.cluster_name, }], json.loads(response.content))
class TestBootImagesTasks(PservTestCase): resources = (("celery", FixtureResource(CeleryFixture())), ) def test_sends_boot_images_to_server(self): self.useFixture(ConfigFixture({'tftp': {'root': self.make_dir()}})) self.set_maas_url() auth.record_api_credentials(':'.join(make_api_credentials())) image = make_boot_image_params() self.patch(tftppath, 'list_boot_images', Mock(return_value=[image])) self.patch(boot_images, "get_cluster_uuid") self.patch(MAASClient, 'post') report_boot_images.delay() args, kwargs = MAASClient.post.call_args self.assertItemsEqual([image], json.loads(kwargs['images']))
class TestAnonNodeGroupsAPI(AnonAPITestCase): resources = (('celery', FixtureResource(CeleryFixture())), ) def test_refresh_calls_refresh_worker(self): nodegroup = factory.make_node_group(status=NODEGROUP_STATUS.ACCEPTED) response = self.client.post(reverse('nodegroups_handler'), {'op': 'refresh_workers'}) self.assertEqual(httplib.OK, response.status_code) self.assertEqual(nodegroup.uuid, get_recorded_nodegroup_uuid()) def test_refresh_does_not_return_secrets(self): # The response from "refresh" contains only an innocuous # confirmation. Anyone can call this method, so it mustn't # reveal anything sensitive. response = self.client.post(reverse('nodegroups_handler'), {'op': 'refresh_workers'}) self.assertEqual((httplib.OK, "Sending worker refresh."), (response.status_code, response.content))
class TestCleanupOldNonces(MAASTestCase): resources = (("celery", FixtureResource(CeleryFixture())), ) def test_cleanup_old_nonces_calls_cleanup_old_nonces(self): logger = self.useFixture(FakeLogger('maasserver')) nb_cleanups = 3 fake = self.patch(tasks, 'nonces_cleanup') fake.cleanup_old_nonces.return_value = nb_cleanups tasks.cleanup_old_nonces() self.assertEqual([mock.call()], fake.cleanup_old_nonces.mock_calls) message = "%d expired nonce(s) cleaned up." % nb_cleanups self.assertThat(logger.output, Contains(message)) def test_import_boot_images_on_schedule_imports_images(self): self.patch(NodeGroup, 'import_boot_images') nodegroup = factory.make_node_group(status=NODEGROUP_STATUS.ACCEPTED) tasks.import_boot_images_on_schedule() self.assertEqual([mock.call()], nodegroup.import_boot_images.mock_calls)
class TestPowerTasks(PservTestCase): resources = (("celery", FixtureResource(CeleryFixture())), ) def test_ether_wake_power_on_with_not_enough_template_args(self): # In eager test mode the assertion is raised immediately rather # than being stored in the AsyncResult, so we need to test for # that instead of using result.get(). self.assertRaises(PowerActionFail, power_on.delay, POWER_TYPE.WAKE_ON_LAN) def test_ether_wake_power_on(self): result = power_on.delay(POWER_TYPE.WAKE_ON_LAN, mac_address=arbitrary_mac) self.assertTrue(result.successful()) def test_ether_wake_does_not_support_power_off(self): self.assertRaises(PowerActionFail, power_off.delay, POWER_TYPE.WAKE_ON_LAN, mac=arbitrary_mac)
class TestTagTasks(PservTestCase): resources = (("celery", FixtureResource(CeleryFixture())), ) def test_update_node_tags_can_be_retried(self): self.set_secrets() # The update_node_tags task can be retried. # Simulate a temporary failure. number_of_failures = UPDATE_NODE_TAGS_MAX_RETRY raised_exception = MissingCredentials(factory.make_name('exception'), random.randint(100, 200)) simulate_failures = MultiFakeMethod( [FakeMethod(failure=raised_exception)] * number_of_failures + [FakeMethod()]) self.patch(tags, 'process_node_tags', simulate_failures) tag = factory.getRandomString() result = update_node_tags.delay(tag, '//node', tag_nsmap=None, retry=True) self.assertTrue(result.successful()) def test_update_node_tags_is_retried_a_limited_number_of_times(self): self.set_secrets() # If we simulate UPDATE_NODE_TAGS_MAX_RETRY + 1 failures, the # task fails. number_of_failures = UPDATE_NODE_TAGS_MAX_RETRY + 1 raised_exception = MissingCredentials(factory.make_name('exception'), random.randint(100, 200)) simulate_failures = MultiFakeMethod( [FakeMethod(failure=raised_exception)] * number_of_failures + [FakeMethod()]) self.patch(tags, 'process_node_tags', simulate_failures) tag = factory.getRandomString() self.assertRaises(MissingCredentials, update_node_tags.delay, tag, '//node', tag_nsmap=None, retry=True)
class IntegrationTest(UnitTest): run_tests_with = AsynchronousDeferredRunTestForBrokenTwisted.make_factory( timeout=10) resources = [('rabbit', FixtureResource(RabbitServerWithoutReset()))]
class TestStartUp(MAASServerTestCase): """Testing for the method `start_up`.""" resources = (('celery', FixtureResource(CeleryFixture())), ) def setUp(self): super(TestStartUp, self).setUp() self.patch(start_up, 'LOCK_FILE_NAME', self.make_file()) def test_start_up_calls_setup_maas_avahi_service(self): recorder = FakeMethod() self.patch(start_up, 'setup_maas_avahi_service', recorder) start_up.start_up() self.assertEqual((1, [()]), (recorder.call_count, recorder.extract_args())) def test_start_up_calls_write_full_dns_config(self): recorder = FakeMethod() self.patch(start_up, 'write_full_dns_config', recorder) start_up.start_up() self.assertEqual((1, [()]), (recorder.call_count, recorder.extract_args())) def test_start_up_creates_master_nodegroup(self): start_up.start_up() self.assertEqual(1, NodeGroup.objects.all().count()) def test_start_up_refreshes_workers(self): patched_handlers = tasks.refresh_functions.copy() patched_handlers['nodegroup_uuid'] = Mock() self.patch(tasks, 'refresh_functions', patched_handlers) start_up.start_up() patched_handlers['nodegroup_uuid'].assert_called_once_with( NodeGroup.objects.ensure_master().uuid) def test_start_up_refreshes_workers_outside_lock(self): lock_checker = LockChecker() self.patch(NodeGroup.objects, 'refresh_workers', lock_checker) start_up.start_up() self.assertEquals(False, lock_checker.lock_was_held) def test_start_up_runs_in_exclusion(self): lock_checker = LockChecker() self.patch(start_up, 'inner_start_up', lock_checker) start_up.start_up() self.assertEqual(1, lock_checker.call_count) self.assertEqual(True, lock_checker.lock_was_held) def test_start_up_respects_timeout_to_acquire_lock(self): recorder = FakeMethod() self.patch(start_up, 'inner_start_up', recorder) # Use a timeout more suitable for automated testing. self.patch(start_up, 'LOCK_TIMEOUT', 0.1) # Manually create a lock. self.make_file(FileLock(start_up.LOCK_FILE_NAME).lock_file) self.assertRaises(LockTimeout, start_up.start_up) self.assertEqual(0, recorder.call_count) def test_start_up_warns_about_missing_boot_images(self): # If no boot images have been registered yet, that may mean that # the import script has not been successfully run yet, or that # the master worker is having trouble reporting its images. And # so start_up registers a persistent warning about this. BootImage.objects.all().delete() discard_persistent_error(COMPONENT.IMPORT_PXE_FILES) recorder = self.patch(start_up, 'register_persistent_error') start_up.start_up() self.assertIn(COMPONENT.IMPORT_PXE_FILES, [args[0][0] for args in recorder.call_args_list]) def test_start_up_does_not_warn_if_boot_images_are_known(self): # If boot images are known, there is no warning about the import # script. factory.make_boot_image() recorder = self.patch(start_up, 'register_persistent_error') start_up.start_up() self.assertNotIn(COMPONENT.IMPORT_PXE_FILES, [args[0][0] for args in recorder.call_args_list]) def test_start_up_does_not_warn_if_already_warning(self): # If there already is a warning about missing boot images, it is # based on more precise knowledge of whether we ever heard from # the region worker at all. It will not be replaced by a less # knowledgeable warning. BootImage.objects.all().delete() register_persistent_error(COMPONENT.IMPORT_PXE_FILES, factory.getRandomString()) recorder = self.patch(start_up, 'register_persistent_error') start_up.start_up() self.assertNotIn(COMPONENT.IMPORT_PXE_FILES, [args[0][0] for args in recorder.call_args_list]) def test_start_up_registers_atexit_lock_cleanup(self): filelock_mock = MagicMock() self.patch(start_up, 'FileLock', Mock(side_effect=filelock_mock)) # Patch atexit.register to assert it's called with the right # argument. atexit_mock = self.patch(start_up, 'atexit') start_up.start_up() self.assertEqual( [call.register(filelock_mock(start_up.LOCK_FILE_NAME).break_lock)], atexit_mock.mock_calls)
class TestDNSTasks(PservTestCase): resources = (("celery", FixtureResource(CeleryFixture())), ) def setUp(self): super(TestDNSTasks, self).setUp() # Patch DNS_CONFIG_DIR so that the configuration files will be # written in a temporary directory. self.dns_conf_dir = self.make_dir() self.patch(conf, 'DNS_CONFIG_DIR', self.dns_conf_dir) # Record the calls to 'execute_rndc_command' (instead of # executing real rndc commands). self.rndc_recorder = FakeMethod() self.patch(tasks, 'execute_rndc_command', self.rndc_recorder) def test_write_dns_config_writes_file(self): zone_names = [random.randint(1, 100), random.randint(1, 100)] command = factory.getRandomString() result = write_dns_config.delay( zone_names=zone_names, callback=rndc_command.subtask(args=[command])) self.assertThat(( result.successful(), os.path.join(self.dns_conf_dir, MAAS_NAMED_CONF_NAME), self.rndc_recorder.calls, ), MatchesListwise(( Equals(True), FileExists(), Equals([((command, ), {})]), )), result) def test_write_dns_config_attached_to_dns_worker_queue(self): self.assertEqual(write_dns_config.queue, celery_config.WORKER_QUEUE_DNS) def test_write_dns_zone_config_writes_file(self): command = factory.getRandomString() domain = factory.getRandomString() network = IPNetwork('192.168.0.3/24') ip = factory.getRandomIPInNetwork(network) forward_zone = DNSForwardZoneConfig( domain, serial=random.randint(1, 100), mapping={factory.getRandomString(): ip}, networks=[network]) reverse_zone = DNSReverseZoneConfig( domain, serial=random.randint(1, 100), mapping={factory.getRandomString(): ip}, network=network) result = write_dns_zone_config.delay( zones=[forward_zone, reverse_zone], callback=rndc_command.subtask(args=[command])) forward_file_name = 'zone.%s' % domain reverse_file_name = 'zone.0.168.192.in-addr.arpa' self.assertThat(( result.successful(), os.path.join(self.dns_conf_dir, forward_file_name), os.path.join(self.dns_conf_dir, reverse_file_name), self.rndc_recorder.calls, ), MatchesListwise(( Equals(True), FileExists(), FileExists(), Equals([((command, ), {})]), )), result) def test_write_dns_zone_config_attached_to_dns_worker_queue(self): self.assertEqual(write_dns_zone_config.queue, celery_config.WORKER_QUEUE_DNS) def test_setup_rndc_configuration_writes_files(self): command = factory.getRandomString() result = setup_rndc_configuration.delay(callback=rndc_command.subtask( args=[command])) self.assertThat(( result.successful(), os.path.join(self.dns_conf_dir, MAAS_RNDC_CONF_NAME), os.path.join(self.dns_conf_dir, MAAS_NAMED_RNDC_CONF_NAME), self.rndc_recorder.calls, ), MatchesListwise(( Equals(True), FileExists(), FileExists(), Equals([((command, ), {})]), )), result) def test_setup_rndc_configuration_attached_to_dns_worker_queue(self): self.assertEqual(setup_rndc_configuration.queue, celery_config.WORKER_QUEUE_DNS) def test_rndc_command_execute_command(self): command = factory.getRandomString() result = rndc_command.delay(command) self.assertThat((result.successful(), self.rndc_recorder.calls), MatchesListwise(( Equals(True), Equals([((command, ), {})]), ))) def test_rndc_command_can_be_retried(self): # The rndc_command task can be retried. # Simulate a temporary failure. number_of_failures = RNDC_COMMAND_MAX_RETRY raised_exception = CalledProcessError(factory.make_name('exception'), random.randint(100, 200)) simulate_failures = MultiFakeMethod( [FakeMethod(failure=raised_exception)] * number_of_failures + [FakeMethod()]) self.patch(tasks, 'execute_rndc_command', simulate_failures) command = factory.getRandomString() result = rndc_command.delay(command, retry=True) self.assertTrue(result.successful()) def test_rndc_command_is_retried_a_limited_number_of_times(self): # If we simulate RNDC_COMMAND_MAX_RETRY + 1 failures, the # task fails. number_of_failures = RNDC_COMMAND_MAX_RETRY + 1 raised_exception = utils.ExternalProcessError( random.randint(100, 200), factory.make_name('exception')) simulate_failures = MultiFakeMethod( [FakeMethod(failure=raised_exception)] * number_of_failures + [FakeMethod()]) self.patch(tasks, 'execute_rndc_command', simulate_failures) command = factory.getRandomString() self.assertRaises(utils.ExternalProcessError, rndc_command.delay, command, retry=True) def test_rndc_command_attached_to_dns_worker_queue(self): self.assertEqual(rndc_command.queue, celery_config.WORKER_QUEUE_DNS) def test_write_full_dns_config_sets_up_config(self): # write_full_dns_config writes the config file, writes # the zone files, and reloads the dns service. domain = factory.getRandomString() network = IPNetwork('192.168.0.3/24') ip = factory.getRandomIPInNetwork(network) zones = [ DNSForwardZoneConfig(domain, serial=random.randint(1, 100), mapping={factory.getRandomString(): ip}, networks=[network]), DNSReverseZoneConfig(domain, serial=random.randint(1, 100), mapping={factory.getRandomString(): ip}, network=network), ] command = factory.getRandomString() result = write_full_dns_config.delay( zones=zones, callback=rndc_command.subtask(args=[command]), upstream_dns=factory.getRandomIPAddress()) forward_file_name = 'zone.%s' % domain reverse_file_name = 'zone.0.168.192.in-addr.arpa' self.assertThat(( result.successful(), self.rndc_recorder.calls, os.path.join(self.dns_conf_dir, forward_file_name), os.path.join(self.dns_conf_dir, reverse_file_name), os.path.join(self.dns_conf_dir, MAAS_NAMED_CONF_NAME), os.path.join(self.dns_conf_dir, MAAS_NAMED_CONF_OPTIONS_INSIDE_NAME), ), MatchesListwise(( Equals(True), Equals([((command, ), {})]), FileExists(), FileExists(), FileExists(), FileExists(), ))) def test_write_full_dns_attached_to_dns_worker_queue(self): self.assertEqual(write_full_dns_config.queue, celery_config.WORKER_QUEUE_DNS)
class TestDHCPTasks(PservTestCase): resources = (("celery", FixtureResource(CeleryFixture())), ) def assertRecordedStdin(self, recorder, *args): # Helper to check that the function recorder "recorder" has all # of the items mentioned in "args" which are extracted from # stdin. We can just check that all the parameters that were # passed are being used. self.assertThat(recorder.extract_args()[0][0], ContainsAll(args)) def make_dhcp_config_params(self): """Fake up a dict of dhcp configuration parameters.""" param_names = [ 'interface', 'omapi_key', 'subnet', 'subnet_mask', 'broadcast_ip', 'dns_servers', 'domain_name', 'router_ip', 'ip_range_low', 'ip_range_high', ] return {param: factory.getRandomString() for param in param_names} def test_upload_dhcp_leases(self): self.patch(leases, 'parse_leases_file', Mock(return_value=(datetime.utcnow(), {}))) self.patch(leases, 'process_leases', Mock()) tasks.upload_dhcp_leases.delay() self.assertEqual(1, leases.process_leases.call_count) def test_add_new_dhcp_host_map(self): # We don't want to actually run omshell in the task, so we stub # out the wrapper class's _run method and record what it would # do. mac = factory.getRandomMACAddress() ip = factory.getRandomIPAddress() server_address = factory.getRandomString() key = factory.getRandomString() recorder = FakeMethod(result=(0, "hardware-type")) self.patch(Omshell, '_run', recorder) add_new_dhcp_host_map.delay({ip: mac}, server_address, key) self.assertRecordedStdin(recorder, ip, mac, server_address, key) def test_add_new_dhcp_host_map_failure(self): # Check that task failures are caught. Nothing much happens in # the Task code right now though. mac = factory.getRandomMACAddress() ip = factory.getRandomIPAddress() server_address = factory.getRandomString() key = factory.getRandomString() self.patch(Omshell, '_run', FakeMethod(result=(0, "this_will_fail"))) self.assertRaises(CalledProcessError, add_new_dhcp_host_map.delay, {mac: ip}, server_address, key) def test_remove_dhcp_host_map(self): # We don't want to actually run omshell in the task, so we stub # out the wrapper class's _run method and record what it would # do. ip = factory.getRandomIPAddress() server_address = factory.getRandomString() key = factory.getRandomString() recorder = FakeMethod(result=(0, "obj: <null>")) self.patch(Omshell, '_run', recorder) remove_dhcp_host_map.delay(ip, server_address, key) self.assertRecordedStdin(recorder, ip, server_address, key) def test_remove_dhcp_host_map_failure(self): # Check that task failures are caught. Nothing much happens in # the Task code right now though. ip = factory.getRandomIPAddress() server_address = factory.getRandomString() key = factory.getRandomString() self.patch(Omshell, '_run', FakeMethod(result=(0, "this_will_fail"))) self.assertRaises(CalledProcessError, remove_dhcp_host_map.delay, ip, server_address, key) def test_write_dhcp_config_invokes_script_correctly(self): mocked_proc = Mock() mocked_proc.returncode = 0 mocked_proc.communicate = Mock(return_value=('output', 'error output')) mocked_popen = self.patch(utils, "Popen", Mock(return_value=mocked_proc)) config_params = self.make_dhcp_config_params() write_dhcp_config(**config_params) # It should construct Popen with the right parameters. mocked_popen.assert_any_call([ "sudo", "-n", "maas-provision", "atomic-write", "--filename", celery_config.DHCP_CONFIG_FILE, "--mode", "0644" ], stdin=PIPE) # It should then pass the content to communicate(). content = config.get_config(**config_params).encode("ascii") mocked_proc.communicate.assert_any_call(content) # Similarly, it also writes the DHCPD interfaces to # /var/lib/maas/dhcpd-interfaces. mocked_popen.assert_any_call([ "sudo", "-n", "maas-provision", "atomic-write", "--filename", celery_config.DHCP_INTERFACES_FILE, "--mode", "0644" ], stdin=PIPE) def test_restart_dhcp_server_sends_command(self): recorder = FakeMethod() self.patch(tasks, 'call_and_check', recorder) restart_dhcp_server() self.assertEqual( (1, (['sudo', '-n', 'service', 'maas-dhcp-server', 'restart'], )), (recorder.call_count, recorder.extract_args()[0]))
class TestNodeGroup(MAASServerTestCase): resources = (('celery', FixtureResource(CeleryFixture())), ) def test_delete_cluster_with_nodes(self): nodegroup = factory.make_node_group() factory.make_node(nodegroup=nodegroup) nodegroup.delete() self.assertEqual(nodegroup.uuid, nodegroup.work_queue) self.assertFalse(NodeGroup.objects.filter(id=nodegroup.id).exists()) def test_work_queue_returns_uuid(self): nodegroup = factory.make_node_group() self.assertEqual(nodegroup.uuid, nodegroup.work_queue) def test_add_dhcp_host_maps_adds_maps_if_managing_dhcp(self): self.patch(Omshell, 'create', FakeMethod()) nodegroup = factory.make_node_group() leases = factory.make_random_leases() nodegroup.add_dhcp_host_maps(leases) self.assertEqual([(leases.keys()[0], leases.values()[0])], Omshell.create.extract_args()) def test_add_dhcp_host_maps_does_nothing_if_not_managing_dhcp(self): self.patch(Omshell, 'create', FakeMethod()) nodegroup = factory.make_node_group( management=NODEGROUPINTERFACE_MANAGEMENT.UNMANAGED) leases = factory.make_random_leases() nodegroup.add_dhcp_host_maps(leases) self.assertEqual([], Omshell.create.extract_args()) def test_fires_tasks_routed_to_nodegroup_worker(self): nodegroup = factory.make_node_group() task = self.patch(nodegroup_module, 'add_new_dhcp_host_map') leases = factory.make_random_leases() nodegroup.add_dhcp_host_maps(leases) args, kwargs = task.apply_async.call_args self.assertEqual(nodegroup.work_queue, kwargs['queue']) def test_get_managed_interface_returns_managed_interface(self): nodegroup = factory.make_node_group() interface = nodegroup.nodegroupinterface_set.all()[0] self.assertEqual(interface, nodegroup.get_managed_interface()) def test_get_managed_interface_does_not_return_unmanaged_interface(self): nodegroup = factory.make_node_group() interface = nodegroup.nodegroupinterface_set.all()[0] interface.management = NODEGROUPINTERFACE_MANAGEMENT.UNMANAGED interface.save() self.assertIsNone(nodegroup.get_managed_interface()) def test_get_managed_interface_does_not_return_unrelated_interface(self): nodegroup = factory.make_node_group() # Create another nodegroup with a managed interface. factory.make_node_group() interface = nodegroup.nodegroupinterface_set.all()[0] interface.management = NODEGROUPINTERFACE_MANAGEMENT.UNMANAGED interface.save() self.assertIsNone(nodegroup.get_managed_interface()) def test_accept_node_changes_status(self): nodegroup = factory.make_node_group( status=factory.getRandomEnum(NODEGROUP_STATUS)) nodegroup.accept() self.assertEqual(nodegroup.status, NODEGROUP_STATUS.ACCEPTED) def test_reject_node_changes_status(self): nodegroup = factory.make_node_group( status=factory.getRandomEnum(NODEGROUP_STATUS)) nodegroup.reject() self.assertEqual(nodegroup.status, NODEGROUP_STATUS.REJECTED) def test_ensure_dhcp_key_creates_key(self): nodegroup = factory.make_node_group(dhcp_key='') nodegroup.ensure_dhcp_key() # Check that the dhcp_key is not empty and looks # valid. self.assertThat(nodegroup.dhcp_key, EndsWith("==")) # The key is persisted. self.assertThat(reload_object(nodegroup).dhcp_key, EndsWith("==")) def test_ensure_dhcp_key_preserves_existing_key(self): key = factory.make_name('dhcp-key') nodegroup = factory.make_node_group(dhcp_key=key) nodegroup.ensure_dhcp_key() self.assertEqual(key, nodegroup.dhcp_key) def test_ensure_dhcp_key_creates_different_keys(self): nodegroup1 = factory.make_node_group(dhcp_key='') nodegroup2 = factory.make_node_group(dhcp_key='') nodegroup1.ensure_dhcp_key() nodegroup2.ensure_dhcp_key() self.assertNotEqual(nodegroup1.dhcp_key, nodegroup2.dhcp_key) def test_import_boot_images_calls_script_with_proxy(self): recorder = self.patch(tasks, 'call_and_check') proxy = factory.make_name('proxy') Config.objects.set_config('http_proxy', proxy) nodegroup = factory.make_node_group() nodegroup.import_boot_images() args, kwargs = recorder.call_args env = kwargs['env'] self.assertEqual((proxy, proxy), (env.get('http_proxy'), env.get('https_proxy'))) def test_import_boot_images_selects_archive_locations_from_config(self): recorder = self.patch(nodegroup_module, 'import_boot_images') nodegroup = factory.make_node_group(status=NODEGROUP_STATUS.ACCEPTED) archives = { 'main_archive': make_archive_url('main'), 'ports_archive': make_archive_url('ports'), 'cloud_images_archive': make_archive_url('cloud_images'), } for key, value in archives.items(): Config.objects.set_config(key, value) nodegroup.import_boot_images() kwargs = recorder.apply_async.call_args[1]['kwargs'] archive_options = {arg: kwargs.get(arg) for arg in archives} self.assertEqual(archives, archive_options) def test_import_boot_images_sent_to_nodegroup_queue(self): recorder = self.patch(nodegroup_module, 'import_boot_images', Mock()) nodegroup = factory.make_node_group() proxy = factory.make_name('proxy') Config.objects.set_config('http_proxy', proxy) nodegroup.import_boot_images() args, kwargs = recorder.apply_async.call_args self.assertEqual(nodegroup.uuid, kwargs['queue'])
from testresources import FixtureResource from fixtures import FakeLogger from txfixtures import Reactor from fakejuju.fixture import FakeJuju # Resource tree logger = FixtureResource(FakeLogger()) reactor = FixtureResource(Reactor()) fakejuju = FixtureResource(FakeJuju(reactor.fixture)) fakejuju.resources = [("logger", logger), ("reactor", reactor)]
class TestNodeGroupAPI(APITestCase): resources = (('celery', FixtureResource(CeleryFixture())), ) def test_handler_path(self): self.assertEqual('/api/1.0/nodegroups/name/', reverse('nodegroup_handler', args=['name'])) def test_GET_returns_node_group(self): nodegroup = factory.make_node_group() response = self.client.get( reverse('nodegroup_handler', args=[nodegroup.uuid])) self.assertEqual(httplib.OK, response.status_code) self.assertEqual(nodegroup.uuid, json.loads(response.content).get('uuid')) def test_GET_returns_404_for_unknown_node_group(self): response = self.client.get( reverse('nodegroup_handler', args=[factory.make_name('nodegroup')])) self.assertEqual(httplib.NOT_FOUND, response.status_code) def test_PUT_reserved_to_admin_users(self): nodegroup = factory.make_node_group() response = self.client_put( reverse('nodegroup_handler', args=[nodegroup.uuid]), {'name': factory.make_name("new-name")}) self.assertEqual(httplib.FORBIDDEN, response.status_code) def test_PUT_updates_nodegroup(self): # The api allows the updating of a NodeGroup. nodegroup = factory.make_node_group() self.become_admin() new_name = factory.make_name("new-name") new_cluster_name = factory.make_name("new-cluster-name") new_status = factory.getRandomChoice(NODEGROUP_STATUS_CHOICES, but_not=[nodegroup.status]) response = self.client_put( reverse('nodegroup_handler', args=[nodegroup.uuid]), { 'name': new_name, 'cluster_name': new_cluster_name, 'status': new_status, }) self.assertEqual(httplib.OK, response.status_code, response.content) nodegroup = reload_object(nodegroup) self.assertEqual( (new_name, new_cluster_name, new_status), (nodegroup.name, nodegroup.cluster_name, nodegroup.status)) def test_PUT_updates_nodegroup_validates_data(self): nodegroup, _ = factory.make_unrenamable_nodegroup_with_node() self.become_admin() new_name = factory.make_name("new-name") response = self.client_put( reverse('nodegroup_handler', args=[nodegroup.uuid]), {'name': new_name}) parsed_result = json.loads(response.content) self.assertEqual(httplib.BAD_REQUEST, response.status_code) self.assertIn("Can't rename DNS zone", parsed_result['name'][0]) def test_update_leases_processes_empty_leases_dict(self): nodegroup = factory.make_node_group() factory.make_dhcp_lease(nodegroup=nodegroup) client = make_worker_client(nodegroup) response = client.post( reverse('nodegroup_handler', args=[nodegroup.uuid]), { 'op': 'update_leases', 'leases': json.dumps({}), }) self.assertEqual((httplib.OK, "Leases updated."), (response.status_code, response.content)) self.assertItemsEqual([], DHCPLease.objects.filter(nodegroup=nodegroup)) def test_update_leases_stores_leases(self): self.patch(Omshell, 'create') nodegroup = factory.make_node_group() lease = factory.make_random_leases() client = make_worker_client(nodegroup) response = client.post( reverse('nodegroup_handler', args=[nodegroup.uuid]), { 'op': 'update_leases', 'leases': json.dumps(lease), }) self.assertEqual((httplib.OK, "Leases updated."), (response.status_code, response.content)) self.assertItemsEqual(lease.keys(), [ dhcplease.ip for dhcplease in DHCPLease.objects.filter(nodegroup=nodegroup) ]) def test_update_leases_adds_new_leases_on_worker(self): nodegroup = factory.make_node_group() client = make_worker_client(nodegroup) self.patch(Omshell, 'create', FakeMethod()) new_leases = factory.make_random_leases() response = client.post( reverse('nodegroup_handler', args=[nodegroup.uuid]), { 'op': 'update_leases', 'leases': json.dumps(new_leases), }) self.assertEqual((httplib.OK, "Leases updated."), (response.status_code, response.content)) self.assertEqual([(new_leases.keys()[0], new_leases.values()[0])], Omshell.create.extract_args()) def test_update_leases_does_not_add_old_leases(self): self.patch(Omshell, 'create') nodegroup = factory.make_node_group() client = make_worker_client(nodegroup) self.patch(tasks, 'add_new_dhcp_host_map', FakeMethod()) response = client.post( reverse('nodegroup_handler', args=[nodegroup.uuid]), { 'op': 'update_leases', 'leases': json.dumps(factory.make_random_leases()), }) self.assertEqual((httplib.OK, "Leases updated."), (response.status_code, response.content)) self.assertEqual([], tasks.add_new_dhcp_host_map.calls) def test_worker_calls_update_leases(self): # In bug 1041158, the worker's upload_leases task tried to call # the update_leases API at the wrong URL path. It has the right # path now. self.useFixture( EnvironmentVariableFixture("MAAS_URL", settings.DEFAULT_MAAS_URL)) nodegroup = factory.make_node_group(status=NODEGROUP_STATUS.ACCEPTED) refresh_worker(nodegroup) self.patch(MAASClient, 'post', Mock()) leases = factory.make_random_leases() send_leases(leases) nodegroup_path = reverse('nodegroup_handler', args=[nodegroup.uuid]) nodegroup_path = nodegroup_path.decode('ascii').lstrip('/') MAASClient.post.assert_called_once_with(nodegroup_path, 'update_leases', leases=json.dumps(leases)) def test_accept_accepts_nodegroup(self): nodegroups = [factory.make_node_group() for i in range(3)] uuids = [nodegroup.uuid for nodegroup in nodegroups] self.become_admin() response = self.client.post(reverse('nodegroups_handler'), { 'op': 'accept', 'uuid': uuids, }) self.assertEqual((httplib.OK, "Nodegroup(s) accepted."), (response.status_code, response.content)) self.assertThat([ nodegroup.status for nodegroup in reload_objects(NodeGroup, nodegroups) ], AllMatch(Equals(NODEGROUP_STATUS.ACCEPTED))) def test_accept_reserved_to_admin(self): response = self.client.post(reverse('nodegroups_handler'), { 'op': 'accept', 'uuid': factory.getRandomString(), }) self.assertEqual(httplib.FORBIDDEN, response.status_code) def test_reject_rejects_nodegroup(self): nodegroups = [factory.make_node_group() for i in range(3)] uuids = [nodegroup.uuid for nodegroup in nodegroups] self.become_admin() response = self.client.post(reverse('nodegroups_handler'), { 'op': 'reject', 'uuid': uuids, }) self.assertEqual((httplib.OK, "Nodegroup(s) rejected."), (response.status_code, response.content)) self.assertThat([ nodegroup.status for nodegroup in reload_objects(NodeGroup, nodegroups) ], AllMatch(Equals(NODEGROUP_STATUS.REJECTED))) def test_reject_reserved_to_admin(self): response = self.client.post(reverse('nodegroups_handler'), { 'op': 'reject', 'uuid': factory.getRandomString(), }) self.assertEqual(httplib.FORBIDDEN, response.status_code) def test_import_boot_images_calls_script_for_all_accepted_clusters(self): recorder = self.patch(nodegroup_module, 'import_boot_images') proxy = factory.make_name('proxy') Config.objects.set_config('http_proxy', proxy) accepted_nodegroups = [ factory.make_node_group(status=NODEGROUP_STATUS.ACCEPTED), factory.make_node_group(status=NODEGROUP_STATUS.ACCEPTED), ] factory.make_node_group(status=NODEGROUP_STATUS.REJECTED) factory.make_node_group(status=NODEGROUP_STATUS.PENDING) admin = factory.make_admin() client = OAuthAuthenticatedClient(admin) response = client.post(reverse('nodegroups_handler'), {'op': 'import_boot_images'}) self.assertEqual(httplib.OK, response.status_code, explain_unexpected_response(httplib.OK, response)) queues = [ kwargs['queue'] for args, kwargs in recorder.apply_async.call_args_list ] self.assertItemsEqual( [nodegroup.work_queue for nodegroup in accepted_nodegroups], queues) def test_import_boot_images_denied_if_not_admin(self): user = factory.make_user() client = OAuthAuthenticatedClient(user) response = client.post(reverse('nodegroups_handler'), {'op': 'import_boot_images'}) self.assertEqual( httplib.FORBIDDEN, response.status_code, explain_unexpected_response(httplib.FORBIDDEN, response)) def test_report_download_progress_accepts_new_download(self): nodegroup = factory.make_node_group() filename = factory.getRandomString() client = make_worker_client(nodegroup) response = client.post( reverse('nodegroup_handler', args=[nodegroup.uuid]), { 'op': 'report_download_progress', 'filename': filename, }) self.assertEqual(httplib.OK, response.status_code, explain_unexpected_response(httplib.OK, response)) progress = DownloadProgress.objects.get(nodegroup=nodegroup) self.assertEqual(nodegroup, progress.nodegroup) self.assertEqual(filename, progress.filename) self.assertIsNone(progress.size) self.assertIsNone(progress.bytes_downloaded) self.assertEqual('', progress.error) def test_report_download_progress_updates_ongoing_download(self): progress = factory.make_download_progress_incomplete() client = make_worker_client(progress.nodegroup) new_bytes_downloaded = progress.bytes_downloaded + 1 response = client.post( reverse('nodegroup_handler', args=[progress.nodegroup.uuid]), { 'op': 'report_download_progress', 'filename': progress.filename, 'bytes_downloaded': new_bytes_downloaded, }) self.assertEqual(httplib.OK, response.status_code, explain_unexpected_response(httplib.OK, response)) progress = reload_object(progress) self.assertEqual(new_bytes_downloaded, progress.bytes_downloaded) def test_report_download_progress_rejects_invalid_data(self): progress = factory.make_download_progress_incomplete() client = make_worker_client(progress.nodegroup) response = client.post( reverse('nodegroup_handler', args=[progress.nodegroup.uuid]), { 'op': 'report_download_progress', 'filename': progress.filename, 'bytes_downloaded': -1, }) self.assertEqual( httplib.BAD_REQUEST, response.status_code, explain_unexpected_response(httplib.BAD_REQUEST, response))
class TestDNSConfigModifications(MAASServerTestCase): resources = (("celery", FixtureResource(CeleryFixture())), ) def setUp(self): super(TestDNSConfigModifications, self).setUp() self.bind = self.useFixture(BINDServer()) self.patch(conf, 'DNS_CONFIG_DIR', self.bind.config.homedir) # Use a random port for rndc. self.patch(conf, 'DNS_RNDC_PORT', allocate_ports("localhost")[0]) # This simulates what should happen when the package is # installed: # Create MAAS-specific DNS configuration files. call_command('set_up_dns') # Register MAAS-specific DNS configuration files with the # system's BIND instance. call_command('get_named_conf', edit=True, config_path=self.bind.config.conf_file) # Reload BIND. self.bind.runner.rndc('reload') def create_managed_nodegroup(self): return factory.make_node_group( network=IPNetwork('192.168.0.1/24'), status=NODEGROUP_STATUS.ACCEPTED, management=NODEGROUPINTERFACE_MANAGEMENT.DHCP_AND_DNS) def create_nodegroup_with_lease(self, lease_number=1, nodegroup=None): if nodegroup is None: nodegroup = self.create_managed_nodegroup() interface = nodegroup.get_managed_interface() node = factory.make_node(nodegroup=nodegroup) mac = factory.make_mac_address(node=node) ips = IPRange(interface.ip_range_low, interface.ip_range_high) lease_ip = unicode(islice(ips, lease_number, lease_number + 1).next()) lease = factory.make_dhcp_lease(nodegroup=nodegroup, mac=mac.mac_address, ip=lease_ip) # Simulate that this lease was created by # DHCPLease.objects.update_leases: update its DNS config. dns.change_dns_zones([nodegroup]) return nodegroup, node, lease def dig_resolve(self, fqdn): """Resolve `fqdn` using dig. Returns a list of results.""" return dig_call(port=self.bind.config.port, commands=[fqdn, '+short']).split('\n') def dig_reverse_resolve(self, ip): """Reverse resolve `ip` using dig. Returns a list of results.""" return dig_call(port=self.bind.config.port, commands=['-x', ip, '+short']).split('\n') def assertDNSMatches(self, hostname, domain, ip): fqdn = "%s.%s" % (hostname, domain) autogenerated_hostname = '%s.' % generated_hostname(ip, domain) forward_lookup_result = self.dig_resolve(fqdn) if '%s.' % fqdn == autogenerated_hostname: # If the fqdn is an autogenerated hostname, it resolves to the IP # address (A record). expected_results = [ip] else: # If the fqdn is a custom hostname, it resolves to the # autogenerated hostname (CNAME record) and the IP address # (A record). expected_results = [autogenerated_hostname, ip] self.assertEqual( expected_results, forward_lookup_result, "Failed to resolve '%s' (results: '%s')." % (fqdn, ','.join(forward_lookup_result))) # A reverse lookup on the IP returns the autogenerated # hostname. reverse_lookup_result = self.dig_reverse_resolve(ip) self.assertEqual([autogenerated_hostname], reverse_lookup_result, "Failed to reverse resolve '%s' (results: '%s')." % (fqdn, ','.join(reverse_lookup_result))) def test_add_zone_loads_dns_zone(self): nodegroup, node, lease = self.create_nodegroup_with_lease() self.patch(settings, 'DNS_CONNECT', True) dns.add_zone(nodegroup) self.assertDNSMatches(node.hostname, nodegroup.name, lease.ip) def test_change_dns_zone_changes_dns_zone(self): nodegroup, _, _ = self.create_nodegroup_with_lease() self.patch(settings, 'DNS_CONNECT', True) dns.write_full_dns_config() nodegroup, new_node, new_lease = (self.create_nodegroup_with_lease( nodegroup=nodegroup, lease_number=2)) dns.change_dns_zones(nodegroup) self.assertDNSMatches(new_node.hostname, nodegroup.name, new_lease.ip) def test_is_dns_enabled_return_false_if_DNS_CONNECT_False(self): self.patch(settings, 'DNS_CONNECT', False) self.assertFalse(dns.is_dns_enabled()) def test_is_dns_enabled_return_True_if_DNS_CONNECT_True(self): self.patch(settings, 'DNS_CONNECT', True) self.assertTrue(dns.is_dns_enabled()) def test_is_dns_in_use_return_False_no_configured_interface(self): self.assertFalse(dns.is_dns_in_use()) def test_is_dns_in_use_return_True_if_configured_interface(self): self.create_managed_nodegroup() self.assertTrue(dns.is_dns_in_use()) def test_write_full_dns_loads_full_dns_config(self): nodegroup, node, lease = self.create_nodegroup_with_lease() self.patch(settings, 'DNS_CONNECT', True) dns.write_full_dns_config() self.assertDNSMatches(node.hostname, nodegroup.name, lease.ip) def test_write_full_dns_passes_reload_retry_parameter(self): self.patch(settings, 'DNS_CONNECT', True) recorder = FakeMethod() self.create_managed_nodegroup() @task def recorder_task(*args, **kwargs): return recorder(*args, **kwargs) self.patch(tasks, 'rndc_command', recorder_task) dns.write_full_dns_config(reload_retry=True) self.assertEqual(([(['reload'], True)]), recorder.extract_args()) def test_write_full_dns_passes_upstream_dns_parameter(self): self.patch(settings, 'DNS_CONNECT', True) self.create_managed_nodegroup() random_ip = factory.getRandomIPAddress() Config.objects.set_config("upstream_dns", random_ip) patched_task = self.patch(dns.tasks.write_full_dns_config, "delay") dns.write_full_dns_config() patched_task.assert_called_once_with(zones=ANY, callback=ANY, upstream_dns=random_ip) def test_write_full_dns_doesnt_call_task_it_no_interface_configured(self): self.patch(settings, 'DNS_CONNECT', True) patched_task = self.patch(dns.tasks.write_full_dns_config, "delay") dns.write_full_dns_config() self.assertEqual(0, patched_task.call_count) def test_dns_config_has_NS_record(self): ip = factory.getRandomIPAddress() self.patch(settings, 'DEFAULT_MAAS_URL', 'http://%s/' % ip) nodegroup, node, lease = self.create_nodegroup_with_lease() self.patch(settings, 'DNS_CONNECT', True) dns.write_full_dns_config() # Get the NS record for the zone 'nodegroup.name'. ns_record = dig_call(port=self.bind.config.port, commands=[nodegroup.name, 'NS', '+short']) # Resolve that hostname. ip_of_ns_record = dig_call(port=self.bind.config.port, commands=[ns_record, '+short']) self.assertEqual(ip, ip_of_ns_record) def test_add_nodegroup_creates_DNS_zone(self): self.patch(settings, "DNS_CONNECT", True) network = IPNetwork('192.168.7.1/24') ip = factory.getRandomIPInNetwork(network) nodegroup = factory.make_node_group( network=network, status=NODEGROUP_STATUS.ACCEPTED, management=NODEGROUPINTERFACE_MANAGEMENT.DHCP_AND_DNS) self.assertDNSMatches(generated_hostname(ip), nodegroup.name, ip) def test_edit_nodegroupinterface_updates_DNS_zone(self): self.patch(settings, "DNS_CONNECT", True) old_network = IPNetwork('192.168.7.1/24') old_ip = factory.getRandomIPInNetwork(old_network) nodegroup = factory.make_node_group( network=old_network, status=NODEGROUP_STATUS.ACCEPTED, management=NODEGROUPINTERFACE_MANAGEMENT.DHCP_AND_DNS) interface = nodegroup.get_managed_interface() # Edit nodegroup's network information to '192.168.44.1/24' interface.ip = '192.168.44.7' interface.router_ip = '192.168.44.14' interface.broadcast_ip = '192.168.44.255' interface.netmask = '255.255.255.0' interface.ip_range_low = '192.168.44.0' interface.ip_range_high = '192.168.44.255' interface.save() ip = factory.getRandomIPInNetwork(IPNetwork('192.168.44.1/24')) # The ip from the old network does not resolve anymore. self.assertEqual([''], self.dig_resolve(generated_hostname(old_ip))) self.assertEqual([''], self.dig_reverse_resolve(old_ip)) # The ip from the new network resolves. self.assertDNSMatches(generated_hostname(ip), nodegroup.name, ip) def test_changing_interface_management_updates_DNS_zone(self): self.patch(settings, "DNS_CONNECT", True) network = IPNetwork('192.168.7.1/24') ip = factory.getRandomIPInNetwork(network) nodegroup = factory.make_node_group( network=network, status=NODEGROUP_STATUS.ACCEPTED, management=NODEGROUPINTERFACE_MANAGEMENT.DHCP_AND_DNS) interface = nodegroup.get_managed_interface() interface.management = NODEGROUPINTERFACE_MANAGEMENT.UNMANAGED interface.save() self.assertEqual([''], self.dig_resolve(generated_hostname(ip))) self.assertEqual([''], self.dig_reverse_resolve(ip)) def test_delete_nodegroup_disables_DNS_zone(self): self.patch(settings, "DNS_CONNECT", True) network = IPNetwork('192.168.7.1/24') ip = factory.getRandomIPInNetwork(network) nodegroup = factory.make_node_group( network=network, status=NODEGROUP_STATUS.ACCEPTED, management=NODEGROUPINTERFACE_MANAGEMENT.DHCP_AND_DNS) nodegroup.delete() self.assertEqual([''], self.dig_resolve(generated_hostname(ip))) self.assertEqual([''], self.dig_reverse_resolve(ip)) def test_add_node_updates_zone(self): self.patch(settings, "DNS_CONNECT", True) nodegroup, node, lease = self.create_nodegroup_with_lease() self.assertDNSMatches(node.hostname, nodegroup.name, lease.ip) def test_delete_node_updates_zone(self): self.patch(settings, "DNS_CONNECT", True) nodegroup, node, lease = self.create_nodegroup_with_lease() # Prevent omshell task dispatch. self.patch(node_module, "remove_dhcp_host_map") node.delete() fqdn = "%s.%s" % (node.hostname, nodegroup.name) self.assertEqual([''], self.dig_resolve(fqdn)) def test_change_node_hostname_updates_zone(self): self.patch(settings, "DNS_CONNECT", True) nodegroup, node, lease = self.create_nodegroup_with_lease() node.hostname = factory.make_name('hostname') node.save() self.assertDNSMatches(node.hostname, nodegroup.name, lease.ip) def test_change_node_other_field_does_not_update_zone(self): self.patch(settings, "DNS_CONNECT", True) nodegroup, node, lease = self.create_nodegroup_with_lease() recorder = FakeMethod() self.patch(DNSZoneConfigBase, 'write_config', recorder) node.error = factory.getRandomString() node.save() self.assertEqual(0, recorder.call_count)
class TestDHCP(MAASServerTestCase): resources = (('celery', FixtureResource(CeleryFixture())), ) def test_is_dhcp_managed_follows_nodegroup_status(self): expected_results = { NODEGROUP_STATUS.PENDING: False, NODEGROUP_STATUS.REJECTED: False, NODEGROUP_STATUS.ACCEPTED: True, } nodegroups = { factory.make_node_group(status=status): value for status, value in expected_results.items() } self.patch(settings, "DHCP_CONNECT", True) results = { nodegroup.status: is_dhcp_managed(nodegroup) for nodegroup, value in nodegroups.items() } self.assertEquals(expected_results, results) def test_configure_dhcp_writes_dhcp_config(self): mocked_task = self.patch(dhcp, 'write_dhcp_config') self.patch(settings, 'DEFAULT_MAAS_URL', 'http://%s/' % factory.getRandomIPAddress()) nodegroup = factory.make_node_group( status=NODEGROUP_STATUS.ACCEPTED, dhcp_key=factory.getRandomString(), interface=factory.make_name('eth'), network=IPNetwork("192.168.102.0/22")) self.patch(settings, "DHCP_CONNECT", True) configure_dhcp(nodegroup) dhcp_params = [ 'subnet_mask', 'broadcast_ip', 'router_ip', 'ip_range_low', 'ip_range_high', ] interface = nodegroup.get_managed_interface() expected_params = { param: getattr(interface, param) for param in dhcp_params } expected_params["omapi_key"] = nodegroup.dhcp_key expected_params["dns_servers"] = get_dns_server_address() expected_params["ntp_server"] = get_default_config()['ntp_server'] expected_params["domain_name"] = nodegroup.name expected_params["subnet"] = '192.168.100.0' expected_params["dhcp_interfaces"] = interface.interface args, kwargs = mocked_task.apply_async.call_args result_params = kwargs['kwargs'] # The check that the callback is correct is done in # test_configure_dhcp_restart_dhcp_server. del result_params['callback'] self.assertEqual(expected_params, result_params) def test_dhcp_config_uses_dns_server_from_cluster_controller(self): mocked_task = self.patch(dhcp, 'write_dhcp_config') ip = factory.getRandomIPAddress() maas_url = 'http://%s/' % ip nodegroup = factory.make_node_group( maas_url=maas_url, status=NODEGROUP_STATUS.ACCEPTED, dhcp_key=factory.getRandomString(), interface=factory.make_name('eth'), network=IPNetwork("192.168.102.0/22")) self.patch(settings, "DHCP_CONNECT", True) configure_dhcp(nodegroup) kwargs = mocked_task.apply_async.call_args[1]['kwargs'] self.assertEqual(ip, kwargs['dns_servers']) def test_configure_dhcp_restart_dhcp_server(self): self.patch(tasks, "sudo_write_file") mocked_check_call = self.patch(tasks, "call_and_check") self.patch(settings, "DHCP_CONNECT", True) nodegroup = factory.make_node_group(status=NODEGROUP_STATUS.ACCEPTED) configure_dhcp(nodegroup) self.assertEqual( mocked_check_call.call_args[0][0], ['sudo', '-n', 'service', 'maas-dhcp-server', 'restart']) def test_configure_dhcp_is_called_with_valid_dhcp_key(self): self.patch(dhcp, 'write_dhcp_config') self.patch(settings, "DHCP_CONNECT", True) nodegroup = factory.make_node_group(status=NODEGROUP_STATUS.ACCEPTED, dhcp_key='') configure_dhcp(nodegroup) args, kwargs = dhcp.write_dhcp_config.apply_async.call_args self.assertThat(kwargs['kwargs']['omapi_key'], EndsWith('==')) def test_dhcp_config_gets_written_when_nodegroup_becomes_active(self): nodegroup = factory.make_node_group(status=NODEGROUP_STATUS.PENDING) self.patch(settings, "DHCP_CONNECT", True) self.patch(dhcp, 'write_dhcp_config') nodegroup.accept() self.assertEqual(1, dhcp.write_dhcp_config.apply_async.call_count) def test_write_dhcp_config_task_routed_to_nodegroup_worker(self): nodegroup = factory.make_node_group(status=NODEGROUP_STATUS.PENDING) self.patch(settings, "DHCP_CONNECT", True) self.patch(dhcp, 'write_dhcp_config') nodegroup.accept() args, kwargs = dhcp.write_dhcp_config.apply_async.call_args self.assertEqual(nodegroup.work_queue, kwargs['queue']) def test_write_dhcp_config_restart_task_routed_to_nodegroup_worker(self): nodegroup = factory.make_node_group(status=NODEGROUP_STATUS.PENDING) self.patch(settings, "DHCP_CONNECT", True) self.patch(tasks, 'sudo_write_file') task = self.patch(dhcp, 'restart_dhcp_server') nodegroup.accept() args, kwargs = task.subtask.call_args self.assertEqual(nodegroup.work_queue, kwargs['options']['queue']) def test_dhcp_config_gets_written_when_nodegroupinterface_changes(self): nodegroup = factory.make_node_group(status=NODEGROUP_STATUS.ACCEPTED) interface = nodegroup.get_managed_interface() self.patch(settings, "DHCP_CONNECT", True) self.patch(dhcp, 'write_dhcp_config') get_ip_in_network = partial(factory.getRandomIPInNetwork, interface.network) new_router_ip = next(ip for ip in iter(get_ip_in_network, None) if ip != interface.router_ip) interface.router_ip = new_router_ip interface.save() args, kwargs = dhcp.write_dhcp_config.apply_async.call_args self.assertEqual((1, new_router_ip), ( dhcp.write_dhcp_config.apply_async.call_count, kwargs['kwargs']['router_ip'], )) def test_dhcp_config_gets_written_when_ntp_server_changes(self): # When the "ntp_server" Config item is changed, check that all # nodegroups get their DHCP config re-written. num_active_nodegroups = random.randint(1, 10) num_inactive_nodegroups = random.randint(1, 10) for x in range(num_active_nodegroups): factory.make_node_group(status=NODEGROUP_STATUS.ACCEPTED) for x in range(num_inactive_nodegroups): factory.make_node_group(status=NODEGROUP_STATUS.PENDING) self.patch(settings, "DHCP_CONNECT", True) self.patch(dhcp, 'write_dhcp_config') Config.objects.set_config("ntp_server", factory.getRandomIPAddress()) self.assertEqual(num_active_nodegroups, dhcp.write_dhcp_config.apply_async.call_count)
class TestBootImagesAPI(APITestCase): resources = (('celery', FixtureResource(CeleryFixture())), ) def report_images(self, nodegroup, images, client=None): if client is None: client = self.client return client.post( reverse('boot_images_handler'), { 'images': json.dumps(images), 'nodegroup': nodegroup.uuid, 'op': 'report_boot_images', }) def test_report_boot_images_does_not_work_for_normal_user(self): nodegroup = NodeGroup.objects.ensure_master() log_in_as_normal_user(self.client) response = self.report_images(nodegroup, []) self.assertEqual(httplib.FORBIDDEN, response.status_code, response.content) def test_report_boot_images_works_for_master_worker(self): nodegroup = NodeGroup.objects.ensure_master() client = make_worker_client(nodegroup) response = self.report_images(nodegroup, [], client=client) self.assertEqual(httplib.OK, response.status_code) def test_report_boot_images_stores_images(self): nodegroup = NodeGroup.objects.ensure_master() image = make_boot_image_params() client = make_worker_client(nodegroup) response = self.report_images(nodegroup, [image], client=client) self.assertEqual((httplib.OK, "OK"), (response.status_code, response.content)) self.assertTrue( BootImage.objects.have_image(nodegroup=nodegroup, **image)) def test_report_boot_images_ignores_unknown_image_properties(self): nodegroup = NodeGroup.objects.ensure_master() image = make_boot_image_params() image['nonesuch'] = factory.make_name('nonesuch'), client = make_worker_client(nodegroup) response = self.report_images(nodegroup, [image], client=client) self.assertEqual((httplib.OK, "OK"), (response.status_code, response.content)) def test_report_boot_images_warns_if_no_images_found(self): nodegroup = NodeGroup.objects.ensure_master() factory.make_node_group() # Second nodegroup with no images. recorder = self.patch(api, 'register_persistent_error') client = make_worker_client(nodegroup) response = self.report_images(nodegroup, [], client=client) self.assertEqual((httplib.OK, "OK"), (response.status_code, response.content)) self.assertIn(COMPONENT.IMPORT_PXE_FILES, [args[0][0] for args in recorder.call_args_list]) # Check that the persistent error message contains a link to the # clusters listing. self.assertIn("/settings/#accepted-clusters", recorder.call_args_list[0][0][1]) def test_report_boot_images_warns_if_any_nodegroup_has_no_images(self): nodegroup = NodeGroup.objects.ensure_master() # Second nodegroup with no images. factory.make_node_group(status=NODEGROUP_STATUS.ACCEPTED) recorder = self.patch(api, 'register_persistent_error') client = make_worker_client(nodegroup) image = make_boot_image_params() response = self.report_images(nodegroup, [image], client=client) self.assertEqual((httplib.OK, "OK"), (response.status_code, response.content)) self.assertIn(COMPONENT.IMPORT_PXE_FILES, [args[0][0] for args in recorder.call_args_list]) def test_report_boot_images_ignores_non_accepted_groups(self): nodegroup = factory.make_node_group(status=NODEGROUP_STATUS.ACCEPTED) factory.make_node_group(status=NODEGROUP_STATUS.PENDING) factory.make_node_group(status=NODEGROUP_STATUS.REJECTED) recorder = self.patch(api, 'register_persistent_error') client = make_worker_client(nodegroup) image = make_boot_image_params() response = self.report_images(nodegroup, [image], client=client) self.assertEqual(httplib.OK, response.status_code) self.assertEqual(0, recorder.call_count) def test_report_boot_images_removes_warning_if_images_found(self): self.patch(api, 'register_persistent_error') self.patch(api, 'discard_persistent_error') nodegroup = factory.make_node_group() image = make_boot_image_params() client = make_worker_client(nodegroup) response = self.report_images(nodegroup, [image], client=client) self.assertEqual((httplib.OK, "OK"), (response.status_code, response.content)) self.assertItemsEqual([], api.register_persistent_error.call_args_list) api.discard_persistent_error.assert_called_once_with( COMPONENT.IMPORT_PXE_FILES) def test_worker_calls_report_boot_images(self): # report_boot_images() uses the report_boot_images op on the nodes # handlers to send image information. self.useFixture( EnvironmentVariableFixture("MAAS_URL", settings.DEFAULT_MAAS_URL)) refresh_worker(NodeGroup.objects.ensure_master()) self.patch(MAASClient, 'post') self.patch(tftppath, 'list_boot_images', Mock(return_value=[])) self.patch(boot_images, "get_cluster_uuid") tasks.report_boot_images.delay() # We're not concerned about the payloads (images and nodegroup) here; # those are tested in provisioningserver.tests.test_boot_images. MAASClient.post.assert_called_once_with( reverse('boot_images_handler').lstrip('/'), 'report_boot_images', images=ANY, nodegroup=ANY)