def test_convert_JSON_to_memory_with_vars(JSON_of_inv): inventory = dict_to_mem_data(JSON_of_inv) # Assertions about the variables on the objects assert inventory.get_host('my_host').variables == {'foo': 'bar'} assert inventory.get_group('my_group').variables == {'foobar': 'barfoo'} # Host should be child of group assert inventory.get_host('group_host') in inventory.get_group( 'my_group').hosts
class TestINIImports: @mock.patch.object(inventory_import.AnsibleInventoryLoader, 'load', mock.MagicMock(return_value=TEST_MEM_OBJECTS)) def test_inventory_single_ini_import(self, inventory, capsys): cmd = inventory_import.Command() r = cmd.handle_noargs( inventory_id=inventory.pk, source=__file__, method='backport') out, err = capsys.readouterr() assert r is None assert out == '' assert set(inventory.groups.values_list('name', flat=True)) == set([ 'servers', 'dbservers', 'webservers', 'others']) assert set(inventory.hosts.values_list('name', flat=True)) == set([ 'web1.example.com', 'web2.example.com', 'web3.example.com', 'db1.example.com', 'db2.example.com', '10.11.12.13', '10.12.14.16', 'fe80::1610:9fff:fedd:654b', 'fe80::1610:9fff:fedd:b654', '::1']) reloaded_inv = Inventory.objects.get(pk=inventory.pk) assert reloaded_inv.variables_dict == {'vara': 'A'} # Groups vars are applied to host in the newer versions assert Host.objects.get(name='web1.example.com').variables_dict == {'ansible_ssh_host': 'w1.example.net'} # Old version uses `ansible_ssh_port` but new version uses `ansible_port` assert Host.objects.get(name='web3.example.com').variables_dict == {'ansible_port': 1022} assert Host.objects.get(name='fe80::1610:9fff:fedd:b654').variables_dict == {'ansible_port': 1022} assert Host.objects.get(name='10.12.14.16').variables_dict == {'ansible_port': 8022} servers = Group.objects.get(name='servers') assert servers.variables_dict == {'varb': 'B'} assert set(servers.children.values_list('name', flat=True)) == set(['dbservers', 'webservers']) assert servers.hosts.count() == 0 servers = Group.objects.get(name='dbservers') assert servers.variables_dict == {'dbvar': 'ugh'} assert servers.children.count() == 0 assert set(servers.hosts.values_list('name', flat=True)) == set(['db1.example.com','db2.example.com']) servers = Group.objects.get(name='webservers') assert servers.variables_dict == {'webvar': 'blah'} assert servers.children.count() == 0 assert set(servers.hosts.values_list('name', flat=True)) == set(['web1.example.com','web2.example.com', 'web3.example.com']) assert reloaded_inv.inventory_sources.filter().count() == 1 invsrc = reloaded_inv.inventory_sources.first() assert invsrc.source == 'file' assert invsrc.inventory_updates.count() == 1 assert invsrc.inventory_updates.first().status == 'successful' # Check creation of ad-hoc inventory source - this was not called with one specified assert reloaded_inv.inventory_sources.count() == 1 assert reloaded_inv.inventory_sources.all()[0].source == 'file' @mock.patch.object( inventory_import, 'load_inventory_source', mock.MagicMock( return_value=dict_to_mem_data( { "_meta": { "hostvars": {"foo": {"some_hostvar": "foobar"}} }, "all": { "children": ["ungrouped"] }, "ungrouped": { "hosts": ["foo"] } }).all_group ) ) def test_hostvars_are_saved(self, inventory): cmd = inventory_import.Command() cmd.handle_noargs(inventory_id=inventory.pk, source='doesnt matter') assert inventory.hosts.count() == 1 h = inventory.hosts.all()[0] assert h.name == 'foo' assert h.variables_dict == {"some_hostvar": "foobar"} @mock.patch.object( inventory_import, 'load_inventory_source', mock.MagicMock( return_value=dict_to_mem_data( { "_meta": { "hostvars": {} }, "all": { "children": ["fooland", "barland"] }, "fooland": { "children": ["barland"] }, "barland": { "children": ["fooland"] } }).all_group ) ) def test_recursive_group_error(self, inventory): cmd = inventory_import.Command() cmd.handle_noargs(inventory_id=inventory.pk, source='doesnt matter')
"web1.example.com": { "ansible_ssh_host": "w1.example.net" }, "web2.example.com": {}, "web3.example.com": { "ansible_port": 1022 } }, "vars": { "webvar": "blah" } } } TEST_MEM_OBJECTS = dict_to_mem_data(TEST_INVENTORY_CONTENT) def mock_logging(self): pass @pytest.mark.django_db @pytest.mark.inventory_import @mock.patch.object(inventory_import.Command, 'check_license', mock.MagicMock()) @mock.patch.object(inventory_import.Command, 'set_logging_level', mock_logging) class TestInvalidOptionsFunctional: @mock.patch.object(inventory_import.InstanceGroup.objects, 'get', new=mock.MagicMock(return_value=None)) def test_invalid_options_invalid_source(self, inventory): # Give invalid file to the command
def perform_update(self, options, data, inventory_update): """Shared method for both awx-manage CLI updates and inventory updates from the tasks system. This saves the inventory data to the database, calling load_into_database but also wraps that method in a host of options processing """ # outside of normal options, these are needed as part of programatic interface self.inventory = inventory_update.inventory self.inventory_source = inventory_update.inventory_source self.inventory_update = inventory_update # the update options, could be parser object or dict self.overwrite = bool(options.get('overwrite', False)) self.overwrite_vars = bool(options.get('overwrite_vars', False)) self.enabled_var = options.get('enabled_var', None) self.enabled_value = options.get('enabled_value', None) self.group_filter = options.get('group_filter', None) or r'^.+$' self.host_filter = options.get('host_filter', None) or r'^.+$' self.exclude_empty_groups = bool(options.get('exclude_empty_groups', False)) self.instance_id_var = options.get('instance_id_var', None) try: self.group_filter_re = re.compile(self.group_filter) except re.error: raise CommandError('invalid regular expression for --group-filter') try: self.host_filter_re = re.compile(self.host_filter) except re.error: raise CommandError('invalid regular expression for --host-filter') begin = time.time() # Since perform_update can be invoked either through the awx-manage CLI # or from the task system, we need to create a new lock at this level # (even though inventory_import.Command.handle -- which calls # perform_update -- has its own lock, inventory_ID_import) with advisory_lock('inventory_{}_perform_update'.format(self.inventory.id)): try: self.check_license() except PermissionDenied as e: self.mark_license_failure(save=True) raise e try: # Check the per-org host limits self.check_org_host_limit() except PermissionDenied as e: self.mark_org_limits_failure(save=True) raise e if settings.SQL_DEBUG: queries_before = len(connection.queries) # Update inventory update for this command line invocation. with ignore_inventory_computed_fields(): # TODO: move this to before perform_update iu = self.inventory_update if iu.status != 'running': with transaction.atomic(): self.inventory_update.status = 'running' self.inventory_update.save() logger.info('Processing JSON output...') inventory = MemInventory(group_filter_re=self.group_filter_re, host_filter_re=self.host_filter_re) inventory = dict_to_mem_data(data, inventory=inventory) logger.info('Loaded %d groups, %d hosts', len(inventory.all_group.all_groups), len(inventory.all_group.all_hosts)) if self.exclude_empty_groups: inventory.delete_empty_groups() self.all_group = inventory.all_group if settings.DEBUG: # depending on inventory source, this output can be # *exceedingly* verbose - crawling a deeply nested # inventory/group data structure and printing metadata about # each host and its memberships # # it's easy for this scale of data to overwhelm pexpect, # (and it's likely only useful for purposes of debugging the # actual inventory import code), so only print it if we have to: # https://github.com/ansible/ansible-tower/issues/7414#issuecomment-321615104 self.all_group.debug_tree() with batch_role_ancestor_rebuilding(): # If using with transaction.atomic() with try ... catch, # with transaction.atomic() must be inside the try section of the code as per Django docs try: # Ensure that this is managed as an atomic SQL transaction, # and thus properly rolled back if there is an issue. with transaction.atomic(): # Merge/overwrite inventory into database. if settings.SQL_DEBUG: logger.warning('loading into database...') with ignore_inventory_computed_fields(): if getattr(settings, 'ACTIVITY_STREAM_ENABLED_FOR_INVENTORY_SYNC', True): self.load_into_database() else: with disable_activity_stream(): self.load_into_database() if settings.SQL_DEBUG: queries_before2 = len(connection.queries) self.inventory.update_computed_fields() if settings.SQL_DEBUG: logger.warning('update computed fields took %d queries', len(connection.queries) - queries_before2) # Check if the license is valid. # If the license is not valid, a CommandError will be thrown, # and inventory update will be marked as invalid. # with transaction.atomic() will roll back the changes. license_fail = True self.check_license() # Check the per-org host limits license_fail = False self.check_org_host_limit() except PermissionDenied as e: if license_fail: self.mark_license_failure(save=True) else: self.mark_org_limits_failure(save=True) raise e if settings.SQL_DEBUG: logger.warning('Inventory import completed for %s in %0.1fs', self.inventory_source.name, time.time() - begin) else: logger.info('Inventory import completed for %s in %0.1fs', self.inventory_source.name, time.time() - begin) # If we're in debug mode, then log the queries and time # used to do the operation. if settings.SQL_DEBUG: queries_this_import = connection.queries[queries_before:] sqltime = sum(float(x['time']) for x in queries_this_import) logger.warning('Inventory import required %d queries ' 'taking %0.3fs', len(queries_this_import), sqltime)
def test_host_lists_accepted(JSON_with_lists): inventory = dict_to_mem_data(JSON_with_lists) assert inventory.get_group('marietta').name == 'marietta' # Check that marietta's hosts was saved h = inventory.get_host('host6.example.com') assert h.name == 'host6.example.com'