def update_computed_fields(self):
     changed = self.update_computed_fields_no_save()
     if not changed:
         return
     emit_channel_notification('schedules-changed', dict(id=self.id, group_name='schedules'))
     # Must save self here before calling unified_job_template computed fields
     # in order for that method to be correct
     # by adding modified to update fields, we avoid updating modified time
     super(Schedule, self).save(update_fields=['next_run', 'dtstart', 'dtend', 'modified'])
     with ignore_inventory_computed_fields():
         self.unified_job_template.update_computed_fields()
Beispiel #2
0
    def _update_host_summary_from_stats(self, hostnames):
        with ignore_inventory_computed_fields():
            if not self.job or not self.job.inventory:
                logger.info(
                    'Event {} missing job or inventory, host summaries not updated'
                    .format(self.pk))
                return
            job = self.job

            from awx.main.models import Host, JobHostSummary  # circular import
            all_hosts = Host.objects.filter(
                pk__in=self.host_map.values()).only('id')
            existing_host_ids = set(h.id for h in all_hosts)

            summaries = dict()
            for host in hostnames:
                host_id = self.host_map.get(host, None)
                if host_id not in existing_host_ids:
                    host_id = None
                host_stats = {}
                for stat in ('changed', 'dark', 'failures', 'ignored', 'ok',
                             'processed', 'rescued', 'skipped'):
                    try:
                        host_stats[stat] = self.event_data.get(stat, {}).get(
                            host, 0)
                    except AttributeError:  # in case event_data[stat] isn't a dict.
                        pass
                summary = JobHostSummary(created=now(),
                                         modified=now(),
                                         job_id=job.id,
                                         host_id=host_id,
                                         host_name=host,
                                         **host_stats)
                summary.failed = bool(summary.dark or summary.failures)
                summaries[(host_id, host)] = summary

            JobHostSummary.objects.bulk_create(summaries.values())

            # update the last_job_id and last_job_host_summary_id
            # in single queries
            host_mapping = dict((summary['host_id'], summary['id'])
                                for summary in JobHostSummary.objects.filter(
                                    job_id=job.id).values('id', 'host_id'))
            for h in all_hosts:
                h.last_job_id = job.id
                if h.id in host_mapping:
                    h.last_job_host_summary_id = host_mapping[h.id]
            Host.objects.bulk_update(
                all_hosts, ['last_job_id', 'last_job_host_summary_id'])
Beispiel #3
0
    def update_computed_fields(self):
        future_rs = dateutil.rrule.rrulestr(self.rrule, forceset=True)
        next_run_actual = future_rs.after(now())

        self.next_run = next_run_actual
        try:
            self.dtstart = future_rs[0]
        except IndexError:
            self.dtstart = None
        self.dtend = None
        if 'until' in self.rrule.lower():
            match_until = re.match(".*?(UNTIL\=[0-9]+T[0-9]+Z)", self.rrule)
            until_date = match_until.groups()[0].split("=")[1]
            self.dtend = make_aware(datetime.datetime.strptime(until_date, "%Y%m%dT%H%M%SZ"), get_default_timezone())
        if 'count' in self.rrule.lower():
            self.dtend = future_rs[-1]
        emit_channel_notification('schedules-changed', dict(id=self.id, group_name='schedules'))
        with ignore_inventory_computed_fields():
            self.unified_job_template.update_computed_fields()
Beispiel #4
0
    def _update_host_summary_from_stats(self, hostnames):
        with ignore_inventory_computed_fields():
            if not self.job or not self.job.inventory:
                logger.info(
                    'Event {} missing job or inventory, host summaries not updated'
                    .format(self.pk))
                return
            qs = self.job.inventory.hosts.filter(name__in=hostnames)
            job = self.job
            for host in hostnames:
                host_stats = {}
                for stat in ('changed', 'dark', 'failures', 'ignored', 'ok',
                             'processed', 'rescued', 'skipped'):
                    try:
                        host_stats[stat] = self.event_data.get(stat, {}).get(
                            host, 0)
                    except AttributeError:  # in case event_data[stat] isn't a dict.
                        pass
                if qs.filter(name=host).exists():
                    host_actual = qs.get(name=host)
                    host_summary, created = job.job_host_summaries.get_or_create(
                        host=host_actual,
                        host_name=host_actual.name,
                        defaults=host_stats)
                else:
                    host_summary, created = job.job_host_summaries.get_or_create(
                        host_name=host, defaults=host_stats)

                if not created:
                    update_fields = []
                    for stat, value in host_stats.items():
                        if getattr(host_summary, stat) != value:
                            setattr(host_summary, stat, value)
                            update_fields.append(stat)
                    if update_fields:
                        host_summary.save(update_fields=update_fields)
Beispiel #5
0
    def _update_host_summary_from_stats(self, hostnames):
        with ignore_inventory_computed_fields():
            try:
                if not self.job or not self.job.inventory:
                    logger.info(
                        'Event {} missing job or inventory, host summaries not updated'
                        .format(self.pk))
                    return
            except ObjectDoesNotExist:
                logger.info(
                    'Event {} missing job or inventory, host summaries not updated'
                    .format(self.pk))
                return
            job = self.job

            from awx.main.models import Host, JobHostSummary, HostMetric  # circular import

            all_hosts = Host.objects.filter(
                pk__in=self.host_map.values()).only('id', 'name')
            existing_host_ids = set(h.id for h in all_hosts)

            summaries = dict()
            updated_hosts_list = list()
            for host in hostnames:
                updated_hosts_list.append(host.lower())
                host_id = self.host_map.get(host, None)
                if host_id not in existing_host_ids:
                    host_id = None
                host_stats = {}
                for stat in ('changed', 'dark', 'failures', 'ignored', 'ok',
                             'processed', 'rescued', 'skipped'):
                    try:
                        host_stats[stat] = self.event_data.get(stat, {}).get(
                            host, 0)
                    except AttributeError:  # in case event_data[stat] isn't a dict.
                        pass
                summary = JobHostSummary(created=now(),
                                         modified=now(),
                                         job_id=job.id,
                                         host_id=host_id,
                                         host_name=host,
                                         **host_stats)
                summary.failed = bool(summary.dark or summary.failures)
                summaries[(host_id, host)] = summary

            JobHostSummary.objects.bulk_create(summaries.values())

            # update the last_job_id and last_job_host_summary_id
            # in single queries
            host_mapping = dict((summary['host_id'], summary['id'])
                                for summary in JobHostSummary.objects.filter(
                                    job_id=job.id).values('id', 'host_id'))
            updated_hosts = set()
            for h in all_hosts:
                # if the hostname *shows up* in the playbook_on_stats event
                if h.name in hostnames:
                    h.last_job_id = job.id
                    updated_hosts.add(h)
                if h.id in host_mapping:
                    h.last_job_host_summary_id = host_mapping[h.id]
                    updated_hosts.add(h)

            Host.objects.bulk_update(
                list(updated_hosts),
                ['last_job_id', 'last_job_host_summary_id'],
                batch_size=100)

            # bulk-create
            current_time = now()
            HostMetric.objects.bulk_create([
                HostMetric(hostname=hostname, last_automation=current_time)
                for hostname in updated_hosts_list
            ],
                                           ignore_conflicts=True,
                                           batch_size=100)
            HostMetric.objects.filter(hostname__in=updated_hosts_list).update(
                last_automation=current_time)
Beispiel #6
0
    def perform_update(self, options, data, inventory_update):
        """Shared method for both awx-manage CLI updates and inventory updates
        from the tasks system.

        This saves the inventory data to the database, calling load_into_database
        but also wraps that method in a host of options processing
        """
        # outside of normal options, these are needed as part of programatic interface
        self.inventory = inventory_update.inventory
        self.inventory_source = inventory_update.inventory_source
        self.inventory_update = inventory_update

        # the update options, could be parser object or dict
        self.overwrite = bool(options.get('overwrite', False))
        self.overwrite_vars = bool(options.get('overwrite_vars', False))
        self.enabled_var = options.get('enabled_var', None)
        self.enabled_value = options.get('enabled_value', None)
        self.group_filter = options.get('group_filter', None) or r'^.+$'
        self.host_filter = options.get('host_filter', None) or r'^.+$'
        self.exclude_empty_groups = bool(options.get('exclude_empty_groups', False))
        self.instance_id_var = options.get('instance_id_var', None)

        try:
            self.group_filter_re = re.compile(self.group_filter)
        except re.error:
            raise CommandError('invalid regular expression for --group-filter')
        try:
            self.host_filter_re = re.compile(self.host_filter)
        except re.error:
            raise CommandError('invalid regular expression for --host-filter')

        begin = time.time()

        # Since perform_update can be invoked either through the awx-manage CLI
        # or from the task system, we need to create a new lock at this level
        # (even though inventory_import.Command.handle -- which calls
        # perform_update -- has its own lock, inventory_ID_import)
        with advisory_lock('inventory_{}_perform_update'.format(self.inventory.id)):

            try:
                self.check_license()
            except PermissionDenied as e:
                self.mark_license_failure(save=True)
                raise e

            try:
                # Check the per-org host limits
                self.check_org_host_limit()
            except PermissionDenied as e:
                self.mark_org_limits_failure(save=True)
                raise e

            if settings.SQL_DEBUG:
                queries_before = len(connection.queries)

            # Update inventory update for this command line invocation.
            with ignore_inventory_computed_fields():
                # TODO: move this to before perform_update
                iu = self.inventory_update
                if iu.status != 'running':
                    with transaction.atomic():
                        self.inventory_update.status = 'running'
                        self.inventory_update.save()

            logger.info('Processing JSON output...')
            inventory = MemInventory(group_filter_re=self.group_filter_re, host_filter_re=self.host_filter_re)
            inventory = dict_to_mem_data(data, inventory=inventory)

            logger.info('Loaded %d groups, %d hosts', len(inventory.all_group.all_groups), len(inventory.all_group.all_hosts))

            if self.exclude_empty_groups:
                inventory.delete_empty_groups()

            self.all_group = inventory.all_group

            if settings.DEBUG:
                # depending on inventory source, this output can be
                # *exceedingly* verbose - crawling a deeply nested
                # inventory/group data structure and printing metadata about
                # each host and its memberships
                #
                # it's easy for this scale of data to overwhelm pexpect,
                # (and it's likely only useful for purposes of debugging the
                # actual inventory import code), so only print it if we have to:
                # https://github.com/ansible/ansible-tower/issues/7414#issuecomment-321615104
                self.all_group.debug_tree()

            with batch_role_ancestor_rebuilding():
                # If using with transaction.atomic() with try ... catch,
                # with transaction.atomic() must be inside the try section of the code as per Django docs
                try:
                    # Ensure that this is managed as an atomic SQL transaction,
                    # and thus properly rolled back if there is an issue.
                    with transaction.atomic():
                        # Merge/overwrite inventory into database.
                        if settings.SQL_DEBUG:
                            logger.warning('loading into database...')
                        with ignore_inventory_computed_fields():
                            if getattr(settings, 'ACTIVITY_STREAM_ENABLED_FOR_INVENTORY_SYNC', True):
                                self.load_into_database()
                            else:
                                with disable_activity_stream():
                                    self.load_into_database()
                            if settings.SQL_DEBUG:
                                queries_before2 = len(connection.queries)
                            self.inventory.update_computed_fields()
                        if settings.SQL_DEBUG:
                            logger.warning('update computed fields took %d queries', len(connection.queries) - queries_before2)

                        # Check if the license is valid.
                        # If the license is not valid, a CommandError will be thrown,
                        # and inventory update will be marked as invalid.
                        # with transaction.atomic() will roll back the changes.
                        license_fail = True
                        self.check_license()

                        # Check the per-org host limits
                        license_fail = False
                        self.check_org_host_limit()
                except PermissionDenied as e:
                    if license_fail:
                        self.mark_license_failure(save=True)
                    else:
                        self.mark_org_limits_failure(save=True)
                    raise e

                if settings.SQL_DEBUG:
                    logger.warning('Inventory import completed for %s in %0.1fs', self.inventory_source.name, time.time() - begin)
                else:
                    logger.info('Inventory import completed for %s in %0.1fs', self.inventory_source.name, time.time() - begin)

            # If we're in debug mode, then log the queries and time
            # used to do the operation.
            if settings.SQL_DEBUG:
                queries_this_import = connection.queries[queries_before:]
                sqltime = sum(float(x['time']) for x in queries_this_import)
                logger.warning('Inventory import required %d queries ' 'taking %0.3fs', len(queries_this_import), sqltime)
Beispiel #7
0
    def handle(self, *args, **options):
        # Load inventory and related objects from database.
        inventory_name = options.get('inventory_name', None)
        inventory_id = options.get('inventory_id', None)
        if inventory_name and inventory_id:
            raise CommandError('--inventory-name and --inventory-id are mutually exclusive')
        elif not inventory_name and not inventory_id:
            raise CommandError('--inventory-name or --inventory-id is required')

        with advisory_lock('inventory_{}_import'.format(inventory_id)):
            # Obtain rest of the options needed to run update
            raw_source = options.get('source', None)
            if not raw_source:
                raise CommandError('--source is required')
            verbosity = int(options.get('verbosity', 1))
            self.set_logging_level(verbosity)

            # Load inventory object based on name or ID.
            if inventory_id:
                q = dict(id=inventory_id)
            else:
                q = dict(name=inventory_name)
            try:
                inventory = Inventory.objects.get(**q)
            except Inventory.DoesNotExist:
                raise CommandError('Inventory with %s = %s cannot be found' % list(q.items())[0])
            except Inventory.MultipleObjectsReturned:
                raise CommandError('Inventory with %s = %s returned multiple results' % list(q.items())[0])
            logger.info('Updating inventory %d: %s' % (inventory.pk, inventory.name))

            # Create ad-hoc inventory source and inventory update objects
            with ignore_inventory_computed_fields():
                source = Command.get_source_absolute_path(raw_source)

                inventory_source, created = InventorySource.objects.get_or_create(
                    inventory=inventory,
                    source='file',
                    source_path=os.path.abspath(source),
                    overwrite=bool(options.get('overwrite', False)),
                    overwrite_vars=bool(options.get('overwrite_vars', False)),
                )
                inventory_update = inventory_source.create_inventory_update(
                    _eager_fields=dict(job_args=json.dumps(sys.argv), job_env=dict(os.environ.items()), job_cwd=os.getcwd())
                )

            data = AnsibleInventoryLoader(source=source, verbosity=verbosity).load()

            logger.debug('Finished loading from source: %s', source)

            status, tb, exc = 'error', '', None
            try:
                self.perform_update(options, data, inventory_update)
                status = 'successful'
            except Exception as e:
                exc = e
                if isinstance(e, KeyboardInterrupt):
                    status = 'canceled'
                else:
                    tb = traceback.format_exc()

            with ignore_inventory_computed_fields():
                inventory_update = InventoryUpdate.objects.get(pk=inventory_update.pk)
                inventory_update.result_traceback = tb
                inventory_update.status = status
                inventory_update.save(update_fields=['status', 'result_traceback'])
                inventory_source.status = status
                inventory_source.save(update_fields=['status'])

        if exc:
            logger.error(str(exc))

        if exc:
            if isinstance(exc, CommandError):
                sys.exit(1)
            raise exc