コード例 #1
0
class LaunchTimeConfig(LaunchTimeConfigBase):
    '''
    Common model for all objects that save details of a saved launch config
    WFJT / WJ nodes, schedules, and job launch configs (not all implemented yet)
    '''
    class Meta:
        abstract = True

    # Special case prompting fields, even more special than the other ones
    extra_data = JSONField(blank=True, default={})
    survey_passwords = prevent_search(
        JSONField(
            blank=True,
            default={},
            editable=False,
        ))
    # Credentials needed for non-unified job / unified JT models
    credentials = models.ManyToManyField('Credential',
                                         related_name='%(class)ss')

    @property
    def extra_vars(self):
        return self.extra_data

    @extra_vars.setter
    def extra_vars(self, extra_vars):
        self.extra_data = extra_vars
コード例 #2
0
class LaunchTimeConfig(LaunchTimeConfigBase):
    '''
    Common model for all objects that save details of a saved launch config
    WFJT / WJ nodes, schedules, and job launch configs (not all implemented yet)
    '''
    class Meta:
        abstract = True

    # Special case prompting fields, even more special than the other ones
    extra_data = JSONField(
        blank=True,
        default=dict
    )
    survey_passwords = prevent_search(JSONField(
        blank=True,
        default=dict,
        editable=False,
    ))
    # Credentials needed for non-unified job / unified JT models
    credentials = models.ManyToManyField(
        'Credential',
        related_name='%(class)ss'
    )

    @property
    def extra_vars(self):
        return self.extra_data

    @extra_vars.setter
    def extra_vars(self, extra_vars):
        self.extra_data = extra_vars

    def display_extra_vars(self):
        '''
        Hides fields marked as passwords in survey.
        '''
        if hasattr(self, 'survey_passwords') and self.survey_passwords:
            extra_vars = parse_yaml_or_json(self.extra_vars).copy()
            for key, value in self.survey_passwords.items():
                if key in extra_vars:
                    extra_vars[key] = value
            return extra_vars
        else:
            return self.extra_vars

    def display_extra_data(self):
        return self.display_extra_vars()
コード例 #3
0
ファイル: jobs.py プロジェクト: srflaxu40/awx
class LaunchTimeConfigBase(BaseModel):
    '''
    Needed as separate class from LaunchTimeConfig because some models
    use `extra_data` and some use `extra_vars`. We cannot change the API,
    so we force fake it in the model definitions
     - model defines extra_vars - use this class
     - model needs to use extra data - use LaunchTimeConfig
    Use this for models which are SurveyMixins and UnifiedJobs or Templates
    '''
    class Meta:
        abstract = True

    # Prompting-related fields that have to be handled as special cases
    inventory = models.ForeignKey(
        'Inventory',
        related_name='%(class)ss',
        blank=True,
        null=True,
        default=None,
        on_delete=models.SET_NULL,
        help_text=_('Inventory applied as a prompt, assuming job template prompts for inventory')
    )
    # All standard fields are stored in this dictionary field
    # This is a solution to the nullable CharField problem, specific to prompting
    char_prompts = JSONField(
        blank=True,
        default=dict
    )

    def prompts_dict(self, display=False):
        data = {}
        # Some types may have different prompts, but always subset of JT prompts
        for prompt_name in JobTemplate.get_ask_mapping().keys():
            try:
                field = self._meta.get_field(prompt_name)
            except FieldDoesNotExist:
                field = None
            if isinstance(field, models.ManyToManyField):
                if not self.pk:
                    continue  # unsaved object can't have related many-to-many
                prompt_val = set(getattr(self, prompt_name).all())
                if len(prompt_val) > 0:
                    data[prompt_name] = prompt_val
            elif prompt_name == 'extra_vars':
                if self.extra_vars:
                    if display:
                        data[prompt_name] = self.display_extra_vars()
                    else:
                        data[prompt_name] = self.extra_vars
                    # Depending on model, field type may save and return as string
                    if isinstance(data[prompt_name], str):
                        data[prompt_name] = parse_yaml_or_json(data[prompt_name])
                if self.survey_passwords and not display:
                    data['survey_passwords'] = self.survey_passwords
            else:
                prompt_val = getattr(self, prompt_name)
                if prompt_val is not None:
                    data[prompt_name] = prompt_val
        return data
コード例 #4
0
ファイル: models.py プロジェクト: Jorge13santos/170
class Setting(CreatedModifiedModel):

    key = models.CharField(max_length=255, )
    value = JSONField(null=True, )
    user = prevent_search(
        models.ForeignKey(
            'auth.User',
            related_name='settings',
            default=None,
            null=True,
            editable=False,
            on_delete=models.CASCADE,
        ))

    def __str__(self):
        try:
            json_value = json.dumps(self.value)
        except ValueError:
            # In the rare case the DB value is invalid JSON.
            json_value = u'<Invalid JSON>'
        if self.user:
            return u'{} ({}) = {}'.format(self.key, self.user, json_value)
        else:
            return u'{} = {}'.format(self.key, json_value)

    def save(self, *args, **kwargs):
        encrypted = settings_registry.is_setting_encrypted(self.key)
        new_instance = not bool(self.pk)
        # If update_fields has been specified, add our field names to it,
        # if it hasn't been specified, then we're just doing a normal save.
        update_fields = kwargs.get('update_fields', [])
        # When first saving to the database, don't store any encrypted field
        # value, but instead save it until after the instance is created.
        # Otherwise, store encrypted value to the database.
        if encrypted:
            if new_instance:
                self._saved_value = self.value
                self.value = ''
            else:
                self.value = encrypt_field(self, 'value')
                if 'value' not in update_fields:
                    update_fields.append('value')
        super(Setting, self).save(*args, **kwargs)
        # After saving a new instance for the first time, set the encrypted
        # field and save again.
        if encrypted and new_instance:
            from awx.main.signals import disable_activity_stream
            with disable_activity_stream():
                self.value = self._saved_value
                self.save(update_fields=['value'])

    @classmethod
    def get_cache_key(self, key):
        return key

    @classmethod
    def get_cache_id_key(self, key):
        return '{}_ID'.format(key)
コード例 #5
0
ファイル: notifications.py プロジェクト: srflaxu40/awx
class Notification(CreatedModifiedModel):
    '''
    A notification event emitted when a NotificationTemplate is run
    '''

    NOTIFICATION_STATE_CHOICES = [
        ('pending', _('Pending')),
        ('successful', _('Successful')),
        ('failed', _('Failed')),
    ]

    class Meta:
        app_label = 'main'
        ordering = ('pk', )

    notification_template = models.ForeignKey('NotificationTemplate',
                                              related_name='notifications',
                                              on_delete=models.CASCADE,
                                              editable=False)
    status = models.CharField(
        max_length=20,
        choices=NOTIFICATION_STATE_CHOICES,
        default='pending',
        editable=False,
    )
    error = models.TextField(
        blank=True,
        default='',
        editable=False,
    )
    notifications_sent = models.IntegerField(
        default=0,
        editable=False,
    )
    notification_type = models.CharField(
        max_length=32,
        choices=NotificationTemplate.NOTIFICATION_TYPE_CHOICES,
    )
    recipients = models.TextField(
        blank=True,
        default='',
        editable=False,
    )
    subject = models.TextField(
        blank=True,
        default='',
        editable=False,
    )
    body = JSONField(blank=True)

    def get_absolute_url(self, request=None):
        return reverse('api:notification_detail',
                       kwargs={'pk': self.pk},
                       request=request)
コード例 #6
0
class InstanceGroup(models.Model, RelatedJobsMixin):
    """A model representing a Queue/Group of AWX Instances."""
    objects = InstanceGroupManager()

    name = models.CharField(max_length=250, unique=True)
    created = models.DateTimeField(auto_now_add=True)
    modified = models.DateTimeField(auto_now=True)
    instances = models.ManyToManyField(
        'Instance',
        related_name='rampart_groups',
        editable=False,
        help_text=_('Instances that are members of this InstanceGroup'),
    )
    controller = models.ForeignKey(
        'InstanceGroup',
        related_name='controlled_groups',
        help_text=_('Instance Group to remotely control this group.'),
        editable=False,
        default=None,
        null=True)
    policy_instance_percentage = models.IntegerField(
        default=0,
        help_text=_(
            "Percentage of Instances to automatically assign to this group"))
    policy_instance_minimum = models.IntegerField(
        default=0,
        help_text=
        _("Static minimum number of Instances to automatically assign to this group"
          ))
    policy_instance_list = JSONField(
        default=[],
        blank=True,
        help_text=
        _("List of exact-match Instances that will always be automatically assigned to this group"
          ))

    def get_absolute_url(self, request=None):
        return reverse('api:instance_group_detail',
                       kwargs={'pk': self.pk},
                       request=request)

    @property
    def capacity(self):
        return sum([inst.capacity for inst in self.instances.all()])

    '''
    RelatedJobsMixin
    '''

    def _get_related_jobs(self):
        return UnifiedJob.objects.filter(instance_group=self)

    class Meta:
        app_label = 'main'
コード例 #7
0
class SurveyJobMixin(models.Model):
    class Meta:
        abstract = True

    survey_passwords = prevent_search(
        JSONField(
            blank=True,
            default={},
            editable=False,
        ))

    def display_extra_vars(self):
        '''
        Hides fields marked as passwords in survey.
        '''
        if self.survey_passwords:
            extra_vars = json.loads(self.extra_vars)
            for key, value in self.survey_passwords.items():
                if key in extra_vars:
                    extra_vars[key] = value
            return json.dumps(extra_vars)
        else:
            return self.extra_vars

    def decrypted_extra_vars(self):
        '''
        Decrypts fields marked as passwords in survey.
        '''
        if self.survey_passwords:
            extra_vars = json.loads(self.extra_vars)
            for key in self.survey_passwords:
                value = extra_vars.get(key)
                if value and isinstance(
                        value,
                        six.string_types) and value.startswith('$encrypted$'):
                    extra_vars[key] = decrypt_value(
                        get_encryption_key('value', pk=None), value)
            return json.dumps(extra_vars)
        else:
            return self.extra_vars
コード例 #8
0
ファイル: mixins.py プロジェクト: AlexBaily/awx
class SurveyJobMixin(models.Model):
    class Meta:
        abstract = True

    survey_passwords = prevent_search(
        JSONField(
            blank=True,
            default={},
            editable=False,
        ))

    def display_extra_vars(self):
        '''
        Hides fields marked as passwords in survey.
        '''
        if self.survey_passwords:
            extra_vars = json.loads(self.extra_vars)
            for key, value in self.survey_passwords.items():
                if key in extra_vars:
                    extra_vars[key] = value
            return json.dumps(extra_vars)
        else:
            return self.extra_vars
コード例 #9
0
ファイル: jobs.py プロジェクト: rbywater/awx
class Job(UnifiedJob, JobOptions, SurveyJobMixin, JobNotificationMixin,
          TaskManagerJobMixin):
    '''
    A job applies a project (with playbook) to an inventory source with a given
    credential.  It represents a single invocation of ansible-playbook with the
    given parameters.
    '''
    class Meta:
        app_label = 'main'
        ordering = ('id', )

    job_template = models.ForeignKey(
        'JobTemplate',
        related_name='jobs',
        blank=True,
        null=True,
        default=None,
        on_delete=models.SET_NULL,
    )
    hosts = models.ManyToManyField(
        'Host',
        related_name='jobs',
        editable=False,
        through='JobHostSummary',
    )
    artifacts = JSONField(
        blank=True,
        default={},
        editable=False,
    )
    scm_revision = models.CharField(
        max_length=1024,
        blank=True,
        default='',
        editable=False,
        verbose_name=_('SCM Revision'),
        help_text=_(
            'The SCM Revision from the Project used for this job, if available'
        ),
    )
    project_update = models.ForeignKey(
        'ProjectUpdate',
        blank=True,
        null=True,
        default=None,
        on_delete=models.SET_NULL,
        help_text=
        _('The SCM Refresh task used to make sure the playbooks were available for the job run'
          ),
    )

    @classmethod
    def _get_parent_field_name(cls):
        return 'job_template'

    @classmethod
    def _get_task_class(cls):
        from awx.main.tasks import RunJob
        return RunJob

    @classmethod
    def supports_isolation(cls):
        return True

    def _global_timeout_setting(self):
        return 'DEFAULT_JOB_TIMEOUT'

    @classmethod
    def _get_unified_job_template_class(cls):
        return JobTemplate

    def get_absolute_url(self, request=None):
        return reverse('api:job_detail',
                       kwargs={'pk': self.pk},
                       request=request)

    def get_ui_url(self):
        return urljoin(settings.TOWER_URL_BASE, "/#/jobs/{}".format(self.pk))

    @property
    def ansible_virtualenv_path(self):
        # the order here enforces precedence (it matters)
        for virtualenv in (self.job_template.custom_virtualenv
                           if self.job_template else None,
                           self.project.custom_virtualenv,
                           self.project.organization.custom_virtualenv):
            if virtualenv:
                return virtualenv
        return settings.ANSIBLE_VENV_PATH

    @property
    def event_class(self):
        return JobEvent

    @property
    def ask_diff_mode_on_launch(self):
        if self.job_template is not None:
            return self.job_template.ask_diff_mode_on_launch
        return False

    @property
    def ask_variables_on_launch(self):
        if self.job_template is not None:
            return self.job_template.ask_variables_on_launch
        return False

    @property
    def ask_limit_on_launch(self):
        if self.job_template is not None:
            return self.job_template.ask_limit_on_launch
        return False

    @property
    def ask_tags_on_launch(self):
        if self.job_template is not None:
            return self.job_template.ask_tags_on_launch
        return False

    @property
    def ask_skip_tags_on_launch(self):
        if self.job_template is not None:
            return self.job_template.ask_skip_tags_on_launch
        return False

    @property
    def ask_job_type_on_launch(self):
        if self.job_template is not None:
            return self.job_template.ask_job_type_on_launch
        return False

    @property
    def ask_verbosity_on_launch(self):
        if self.job_template is not None:
            return self.job_template.ask_verbosity_on_launch
        return False

    @property
    def ask_inventory_on_launch(self):
        if self.job_template is not None:
            return self.job_template.ask_inventory_on_launch
        return False

    @property
    def ask_credential_on_launch(self):
        if self.job_template is not None:
            return self.job_template.ask_credential_on_launch
        return False

    def get_passwords_needed_to_start(self):
        return self.passwords_needed_to_start

    def _get_hosts(self, **kwargs):
        Host = JobHostSummary._meta.get_field('host').related_model
        kwargs['job_host_summaries__job__pk'] = self.pk
        return Host.objects.filter(**kwargs)

    def retry_qs(self, status):
        '''
        Returns Host queryset that will be used to produce the `limit`
        field in a retry on a subset of hosts
        '''
        kwargs = {}
        if status == 'all':
            pass
        elif status == 'failed':
            # Special case for parity with Ansible .retry files
            kwargs['job_host_summaries__failed'] = True
        elif status in ['ok', 'changed', 'unreachable']:
            if status == 'unreachable':
                status_field = 'dark'
            else:
                status_field = status
            kwargs['job_host_summaries__{}__gt'.format(status_field)] = 0
        else:
            raise ParseError(
                _('{status_value} is not a valid status option.').format(
                    status_value=status))
        return self._get_hosts(**kwargs)

    @property
    def task_impact(self):
        # NOTE: We sorta have to assume the host count matches and that forks default to 5
        from awx.main.models.inventory import Host
        if self.launch_type == 'callback':
            count_hosts = 1
        else:
            count_hosts = Host.objects.filter(
                inventory__jobs__pk=self.pk).count()
        return min(count_hosts, 5 if self.forks == 0 else self.forks) + 1

    @property
    def successful_hosts(self):
        return self._get_hosts(job_host_summaries__ok__gt=0)

    @property
    def failed_hosts(self):
        return self._get_hosts(job_host_summaries__failures__gt=0)

    @property
    def changed_hosts(self):
        return self._get_hosts(job_host_summaries__changed__gt=0)

    @property
    def dark_hosts(self):
        return self._get_hosts(job_host_summaries__dark__gt=0)

    @property
    def unreachable_hosts(self):
        return self.dark_hosts

    @property
    def skipped_hosts(self):
        return self._get_hosts(job_host_summaries__skipped__gt=0)

    @property
    def processed_hosts(self):
        return self._get_hosts(job_host_summaries__processed__gt=0)

    def notification_data(self, block=5):
        data = super(Job, self).notification_data()
        all_hosts = {}
        # NOTE: Probably related to job event slowness, remove at some point -matburt
        if block:
            summaries = self.job_host_summaries.all()
            while block > 0 and not len(summaries):
                time.sleep(1)
                block -= 1
        else:
            summaries = self.job_host_summaries.all()
        for h in self.job_host_summaries.all():
            all_hosts[h.host_name] = dict(failed=h.failed,
                                          changed=h.changed,
                                          dark=h.dark,
                                          failures=h.failures,
                                          ok=h.ok,
                                          processed=h.processed,
                                          skipped=h.skipped)
        data.update(
            dict(inventory=self.inventory.name if self.inventory else None,
                 project=self.project.name if self.project else None,
                 playbook=self.playbook,
                 credential=getattr(self.get_deprecated_credential('ssh'),
                                    'name', None),
                 limit=self.limit,
                 extra_vars=self.display_extra_vars(),
                 hosts=all_hosts))
        return data

    def _resources_sufficient_for_launch(self):
        return not (self.inventory_id is None or self.project_id is None)

    def display_artifacts(self):
        '''
        Hides artifacts if they are marked as no_log type artifacts.
        '''
        artifacts = self.artifacts
        if artifacts.get('_ansible_no_log', False):
            return "$hidden due to Ansible no_log flag$"
        return artifacts

    @property
    def preferred_instance_groups(self):
        if self.project is not None and self.project.organization is not None:
            organization_groups = [
                x for x in self.project.organization.instance_groups.all()
            ]
        else:
            organization_groups = []
        if self.inventory is not None:
            inventory_groups = [
                x for x in self.inventory.instance_groups.all()
            ]
        else:
            inventory_groups = []
        if self.job_template is not None:
            template_groups = [
                x for x in self.job_template.instance_groups.all()
            ]
        else:
            template_groups = []
        selected_groups = template_groups + inventory_groups + organization_groups
        if not selected_groups:
            return self.global_instance_groups
        return selected_groups

    def awx_meta_vars(self):
        r = super(Job, self).awx_meta_vars()
        if self.project:
            for name in ('awx', 'tower'):
                r['{}_project_revision'.format(
                    name)] = self.project.scm_revision
        if self.job_template:
            for name in ('awx', 'tower'):
                r['{}_job_template_id'.format(name)] = self.job_template.pk
                r['{}_job_template_name'.format(name)] = self.job_template.name
        return r

    '''
    JobNotificationMixin
    '''

    def get_notification_templates(self):
        if not self.job_template:
            return NotificationTemplate.objects.none()
        return self.job_template.notification_templates

    def get_notification_friendly_name(self):
        return "Job"

    @property
    def memcached_fact_key(self):
        return '{}'.format(self.inventory.id)

    def memcached_fact_host_key(self, host_name):
        return '{}-{}'.format(self.inventory.id,
                              base64.b64encode(host_name.encode('utf-8')))

    def memcached_fact_modified_key(self, host_name):
        return '{}-{}-modified'.format(
            self.inventory.id, base64.b64encode(host_name.encode('utf-8')))

    def _get_inventory_hosts(self,
                             only=[
                                 'name',
                                 'ansible_facts',
                                 'modified',
                             ]):
        return self.inventory.hosts.only(*only)

    def _get_memcache_connection(self):
        return memcache.Client([settings.CACHES['default']['LOCATION']],
                               debug=0)

    def start_job_fact_cache(self):
        if not self.inventory:
            return

        cache = self._get_memcache_connection()

        host_names = []

        for host in self._get_inventory_hosts():
            host_key = self.memcached_fact_host_key(host.name)
            modified_key = self.memcached_fact_modified_key(host.name)

            if cache.get(modified_key) is None:
                if host.ansible_facts_modified:
                    host_modified = host.ansible_facts_modified.replace(
                        tzinfo=tzutc()).isoformat()
                else:
                    host_modified = datetime.datetime.now(tzutc()).isoformat()
                cache.set(host_key, json.dumps(host.ansible_facts))
                cache.set(modified_key, host_modified)

            host_names.append(host.name)

        cache.set(self.memcached_fact_key, host_names)

    def finish_job_fact_cache(self):
        if not self.inventory:
            return

        cache = self._get_memcache_connection()

        hosts = self._get_inventory_hosts()
        for host in hosts:
            host_key = self.memcached_fact_host_key(host.name)
            modified_key = self.memcached_fact_modified_key(host.name)

            modified = cache.get(modified_key)
            if modified is None:
                cache.delete(host_key)
                continue

            # Save facts if cache is newer than DB
            modified = parser.parse(modified, tzinfos=[tzutc()])
            if not host.ansible_facts_modified or modified > host.ansible_facts_modified:
                ansible_facts = cache.get(host_key)
                try:
                    ansible_facts = json.loads(ansible_facts)
                except Exception:
                    ansible_facts = None

                if ansible_facts is None:
                    cache.delete(host_key)
                    continue
                host.ansible_facts = ansible_facts
                host.ansible_facts_modified = modified
                if 'insights' in ansible_facts and 'system_id' in ansible_facts[
                        'insights']:
                    host.insights_system_id = ansible_facts['insights'][
                        'system_id']
                host.save()
                system_tracking_logger.info(
                    'New fact for inventory {} host {}'.format(
                        smart_str(host.inventory.name), smart_str(host.name)),
                    extra=dict(inventory_id=host.inventory.id,
                               host_name=host.name,
                               ansible_facts=host.ansible_facts,
                               ansible_facts_modified=host.
                               ansible_facts_modified.isoformat(),
                               job_id=self.id))
コード例 #10
0
class UnifiedJob(PolymorphicModel, PasswordFieldsModel,
                 CommonModelNameNotUnique, UnifiedJobTypeStringMixin,
                 TaskManagerUnifiedJobMixin):
    '''
    Concrete base class for unified job run by the task engine.
    '''

    STATUS_CHOICES = UnifiedJobTemplate.JOB_STATUS_CHOICES

    LAUNCH_TYPE_CHOICES = [
        ('manual', _('Manual')),  # Job was started manually by a user.
        ('relaunch', _('Relaunch')),  # Job was started via relaunch.
        ('callback', _('Callback')),  # Job was started via host callback.
        ('scheduled', _('Scheduled')),  # Job was started from a schedule.
        ('dependency',
         _('Dependency')),  # Job was started as a dependency of another job.
        ('workflow', _('Workflow')),  # Job was started from a workflow job.
        ('sync', _('Sync')),  # Job was started from a project sync.
        ('scm', _('SCM Update'))  # Job was created as an Inventory SCM sync.
    ]

    PASSWORD_FIELDS = ('start_args', )

    # NOTE: Working around a django-polymorphic issue: https://github.com/django-polymorphic/django-polymorphic/issues/229
    base_manager_name = 'base_objects'

    class Meta:
        app_label = 'main'

    old_pk = models.PositiveIntegerField(
        null=True,
        default=None,
        editable=False,
    )
    unified_job_template = models.ForeignKey(
        'UnifiedJobTemplate',
        null=True,  # Some jobs can be run without a template.
        default=None,
        editable=False,
        related_name='%(class)s_unified_jobs',
        on_delete=models.SET_NULL,
    )
    launch_type = models.CharField(
        max_length=20,
        choices=LAUNCH_TYPE_CHOICES,
        default='manual',
        editable=False,
    )
    schedule = models.ForeignKey(  # Which schedule entry was responsible for starting this job.
        'Schedule',
        null=True,
        default=None,
        editable=False,
        on_delete=models.SET_NULL,
    )
    dependent_jobs = models.ManyToManyField(
        'self',
        editable=False,
        related_name='%(class)s_blocked_jobs+',
    )
    execution_node = models.TextField(
        blank=True,
        default='',
        editable=False,
        help_text=_("The node the job executed on."),
    )
    notifications = models.ManyToManyField(
        'Notification',
        editable=False,
        related_name='%(class)s_notifications',
    )
    cancel_flag = models.BooleanField(
        blank=True,
        default=False,
        editable=False,
    )
    status = models.CharField(
        max_length=20,
        choices=STATUS_CHOICES,
        default='new',
        editable=False,
    )
    failed = models.BooleanField(
        default=False,
        editable=False,
    )
    started = models.DateTimeField(
        null=True,
        default=None,
        editable=False,
        help_text=_("The date and time the job was queued for starting."),
    )
    finished = models.DateTimeField(
        null=True,
        default=None,
        editable=False,
        help_text=_("The date and time the job finished execution."),
    )
    elapsed = models.DecimalField(
        max_digits=12,
        decimal_places=3,
        editable=False,
        help_text=_("Elapsed time in seconds that the job ran."),
    )
    job_args = prevent_search(
        models.TextField(
            blank=True,
            default='',
            editable=False,
        ))
    job_cwd = models.CharField(
        max_length=1024,
        blank=True,
        default='',
        editable=False,
    )
    job_env = prevent_search(
        JSONField(
            blank=True,
            default={},
            editable=False,
        ))
    job_explanation = models.TextField(
        blank=True,
        default='',
        editable=False,
        help_text=
        _("A status field to indicate the state of the job if it wasn't able to run and capture stdout"
          ),
    )
    start_args = prevent_search(
        models.TextField(
            blank=True,
            default='',
            editable=False,
        ))
    result_stdout_text = models.TextField(
        blank=True,
        default='',
        editable=False,
    )
    result_stdout_file = models.TextField(  # FilePathfield?
        blank=True,
        default='',
        editable=False,
    )
    result_traceback = models.TextField(
        blank=True,
        default='',
        editable=False,
    )
    celery_task_id = models.CharField(
        max_length=100,
        blank=True,
        default='',
        editable=False,
    )
    labels = models.ManyToManyField("Label",
                                    blank=True,
                                    related_name='%(class)s_labels')
    instance_group = models.ForeignKey(
        'InstanceGroup',
        blank=True,
        null=True,
        default=None,
        on_delete=models.SET_NULL,
        help_text=_('The Rampart/Instance group the job was run under'),
    )
    credentials = models.ManyToManyField(
        'Credential',
        related_name='%(class)ss',
    )

    def get_absolute_url(self, request=None):
        real_instance = self.get_real_instance()
        if real_instance != self:
            return real_instance.get_absolute_url(request=request)
        else:
            return ''

    def get_ui_url(self):
        real_instance = self.get_real_instance()
        if real_instance != self:
            return real_instance.get_ui_url()
        else:
            return ''

    @classmethod
    def _get_task_class(cls):
        raise NotImplementedError  # Implement in subclasses.

    @classmethod
    def supports_isolation(cls):
        return False

    @classmethod
    def _get_parent_field_name(cls):
        return 'unified_job_template'  # Override in subclasses.

    @classmethod
    def _get_unified_job_template_class(cls):
        '''
        Return subclass of UnifiedJobTemplate that applies to this unified job.
        '''
        raise NotImplementedError  # Implement in subclass.

    def _global_timeout_setting(self):
        "Override in child classes, None value indicates this is not configurable"
        return None

    def _resources_sufficient_for_launch(self):
        return True

    def __unicode__(self):
        return u'%s-%s-%s' % (self.created, self.id, self.status)

    @property
    def log_format(self):
        return '{} {} ({})'.format(get_type_for_model(type(self)), self.id,
                                   self.status)

    def _get_parent_instance(self):
        return getattr(self, self._get_parent_field_name(), None)

    def _update_parent_instance_no_save(self,
                                        parent_instance,
                                        update_fields=[]):
        def parent_instance_set(key, val):
            setattr(parent_instance, key, val)
            if key not in update_fields:
                update_fields.append(key)

        if parent_instance:
            if self.status in ('pending', 'waiting', 'running'):
                if parent_instance.current_job != self:
                    parent_instance_set('current_job', self)
                # Update parent with all the 'good' states of it's child
                if parent_instance.status != self.status:
                    parent_instance_set('status', self.status)
            elif self.status in ('successful', 'failed', 'error', 'canceled'):
                if parent_instance.current_job == self:
                    parent_instance_set('current_job', None)
                parent_instance_set('last_job', self)
                parent_instance_set('last_job_failed', self.failed)

        return update_fields

    def _update_parent_instance(self):
        parent_instance = self._get_parent_instance()
        if parent_instance:
            update_fields = self._update_parent_instance_no_save(
                parent_instance)
            parent_instance.save(update_fields=update_fields)

    def save(self, *args, **kwargs):
        """Save the job, with current status, to the database.
        Ensure that all data is consistent before doing so.
        """
        # If update_fields has been specified, add our field names to it,
        # if it hasn't been specified, then we're just doing a normal save.
        update_fields = kwargs.get('update_fields', [])

        # Get status before save...
        status_before = self.status or 'new'

        # If this job already exists in the database, retrieve a copy of
        # the job in its prior state.
        if self.pk:
            self_before = self.__class__.objects.get(pk=self.pk)
            if self_before.status != self.status:
                status_before = self_before.status

        # Sanity check: Is this a failure? Ensure that the failure value
        # matches the status.
        failed = bool(self.status in ('failed', 'error', 'canceled'))
        if self.failed != failed:
            self.failed = failed
            if 'failed' not in update_fields:
                update_fields.append('failed')

        # Sanity check: Has the job just started? If so, mark down its start
        # time.
        if self.status == 'running' and not self.started:
            self.started = now()
            if 'started' not in update_fields:
                update_fields.append('started')

        # Sanity check: Has the job just completed? If so, mark down its
        # completion time, and record its output to the database.
        if self.status in ('successful', 'failed', 'error',
                           'canceled') and not self.finished:
            # Record the `finished` time.
            self.finished = now()
            if 'finished' not in update_fields:
                update_fields.append('finished')

        # If we have a start and finished time, and haven't already calculated
        # out the time that elapsed, do so.
        if self.started and self.finished and not self.elapsed:
            td = self.finished - self.started
            elapsed = (td.microseconds +
                       (td.seconds + td.days * 24 * 3600) * 10**6) / (10**6 *
                                                                      1.0)
        else:
            elapsed = 0.0
        if self.elapsed != elapsed:
            self.elapsed = str(elapsed)
            if 'elapsed' not in update_fields:
                update_fields.append('elapsed')

        # Ensure that the job template information is current.
        if self.unified_job_template != self._get_parent_instance():
            self.unified_job_template = self._get_parent_instance()
            if 'unified_job_template' not in update_fields:
                update_fields.append('unified_job_template')

        # Okay; we're done. Perform the actual save.
        result = super(UnifiedJob, self).save(*args, **kwargs)

        # If status changed, update the parent instance.
        if self.status != status_before:
            self._update_parent_instance()

        # Done.
        return result

    def delete(self):
        if self.result_stdout_file != "":
            try:
                os.remove(self.result_stdout_file)
            except Exception:
                pass
        super(UnifiedJob, self).delete()

    def copy_unified_job(self, limit=None):
        '''
        Returns saved object, including related fields.
        Create a copy of this unified job for the purpose of relaunch
        '''
        unified_job_class = self.__class__
        unified_jt_class = self._get_unified_job_template_class()
        parent_field_name = unified_job_class._get_parent_field_name()

        fields = unified_jt_class._get_unified_job_field_names() + [
            parent_field_name
        ]
        unified_job = copy_model_by_class(self, unified_job_class, fields, {})
        unified_job.launch_type = 'relaunch'
        if limit:
            unified_job.limit = limit
        unified_job.save()

        # Labels coppied here
        copy_m2m_relationships(self, unified_job, fields)
        return unified_job

    def result_stdout_raw_handle(self, attempt=0):
        """Return a file-like object containing the standard out of the
        job's result.
        """
        msg = {
            'pending': 'Waiting for results...',
            'missing': 'stdout capture is missing',
        }
        if self.result_stdout_text:
            return StringIO(self.result_stdout_text)
        else:
            if not os.path.exists(self.result_stdout_file) or os.stat(
                    self.result_stdout_file).st_size < 1:
                return StringIO(msg['missing' if self.finished else 'pending'])

            # There is a potential timing issue here, because another
            # process may be deleting the stdout file after it is written
            # to the database.
            #
            # Therefore, if we get an IOError (which generally means the
            # file does not exist), reload info from the database and
            # try again.
            try:
                return codecs.open(self.result_stdout_file,
                                   "r",
                                   encoding='utf-8')
            except IOError:
                if attempt < 3:
                    self.result_stdout_text = type(self).objects.get(
                        id=self.id).result_stdout_text
                    return self.result_stdout_raw_handle(attempt=attempt + 1)
                else:
                    return StringIO(
                        msg['missing' if self.finished else 'pending'])

    def _escape_ascii(self, content):
        # Remove ANSI escape sequences used to embed event data.
        content = re.sub(r'\x1b\[K(?:[A-Za-z0-9+/=]+\x1b\[\d+D)+\x1b\[K', '',
                         content)
        # Remove ANSI color escape sequences.
        content = re.sub(r'\x1b[^m]*m', '', content)
        return content

    def _result_stdout_raw(self, redact_sensitive=False, escape_ascii=False):
        content = self.result_stdout_raw_handle().read()
        if redact_sensitive:
            content = UriCleaner.remove_sensitive(content)
        if escape_ascii:
            content = self._escape_ascii(content)
        return content

    @property
    def result_stdout_raw(self):
        return self._result_stdout_raw()

    @property
    def result_stdout(self):
        return self._result_stdout_raw(escape_ascii=True)

    @property
    def result_stdout_size(self):
        try:
            return os.stat(self.result_stdout_file).st_size
        except Exception:
            return len(self.result_stdout)

    def _result_stdout_raw_limited(self,
                                   start_line=0,
                                   end_line=None,
                                   redact_sensitive=True,
                                   escape_ascii=False):
        return_buffer = u""
        if end_line is not None:
            end_line = int(end_line)
        stdout_lines = self.result_stdout_raw_handle().readlines()
        absolute_end = len(stdout_lines)
        for line in stdout_lines[int(start_line):end_line]:
            return_buffer += line
        if int(start_line) < 0:
            start_actual = len(stdout_lines) + int(start_line)
            end_actual = len(stdout_lines)
        else:
            start_actual = int(start_line)
            if end_line is not None:
                end_actual = min(int(end_line), len(stdout_lines))
            else:
                end_actual = len(stdout_lines)

        if redact_sensitive:
            return_buffer = UriCleaner.remove_sensitive(return_buffer)
        if escape_ascii:
            return_buffer = self._escape_ascii(return_buffer)

        return return_buffer, start_actual, end_actual, absolute_end

    def result_stdout_raw_limited(self,
                                  start_line=0,
                                  end_line=None,
                                  redact_sensitive=False):
        return self._result_stdout_raw_limited(start_line, end_line,
                                               redact_sensitive)

    def result_stdout_limited(self,
                              start_line=0,
                              end_line=None,
                              redact_sensitive=False):
        return self._result_stdout_raw_limited(start_line,
                                               end_line,
                                               redact_sensitive,
                                               escape_ascii=True)

    @property
    def spawned_by_workflow(self):
        return self.launch_type == 'workflow'

    @property
    def workflow_job_id(self):
        if self.spawned_by_workflow:
            try:
                return self.unified_job_node.workflow_job.pk
            except UnifiedJob.unified_job_node.RelatedObjectDoesNotExist:
                pass
        return None

    @property
    def workflow_node_id(self):
        if self.spawned_by_workflow:
            try:
                return self.unified_job_node.pk
            except UnifiedJob.unified_job_node.RelatedObjectDoesNotExist:
                pass
        return None

    @property
    def celery_task(self):
        try:
            if self.celery_task_id:
                return TaskResult.objects.get(task_id=self.celery_task_id)
        except TaskResult.DoesNotExist:
            pass

    def get_passwords_needed_to_start(self):
        return []

    def handle_extra_data(self, extra_data):
        if hasattr(self, 'extra_vars') and extra_data:
            extra_data_dict = {}
            try:
                extra_data_dict = parse_yaml_or_json(extra_data,
                                                     silent_failure=False)
            except Exception as e:
                logger.warn("Exception deserializing extra vars: " + str(e))
            evars = self.extra_vars_dict
            evars.update(extra_data_dict)
            self.update_fields(extra_vars=json.dumps(evars))

    @property
    def can_start(self):
        return bool(self.status in ('new', 'waiting'))

    @property
    def task_impact(self):
        raise NotImplementedError  # Implement in subclass.

    def websocket_emit_data(self):
        ''' Return extra data that should be included when submitting data to the browser over the websocket connection '''
        websocket_data = dict()
        if self.spawned_by_workflow:
            websocket_data.update(
                dict(workflow_job_id=self.workflow_job_id,
                     workflow_node_id=self.workflow_node_id))
        return websocket_data

    def _websocket_emit_status(self, status):
        try:
            status_data = dict(unified_job_id=self.id, status=status)
            if status == 'waiting':
                if self.instance_group:
                    status_data[
                        'instance_group_name'] = self.instance_group.name
                else:
                    status_data['instance_group_name'] = None
            status_data.update(self.websocket_emit_data())
            status_data['group_name'] = 'jobs'
            emit_channel_notification('jobs-status_changed', status_data)

            if self.spawned_by_workflow:
                status_data['group_name'] = "workflow_events"
                emit_channel_notification(
                    'workflow_events-' + str(self.workflow_job_id),
                    status_data)
        except IOError:  # includes socket errors
            logger.exception(
                '%s failed to emit channel msg about status change',
                self.log_format)

    def websocket_emit_status(self, status):
        connection.on_commit(lambda: self._websocket_emit_status(status))

    def notification_data(self):
        return dict(id=self.id,
                    name=self.name,
                    url=self.get_ui_url(),
                    created_by=smart_text(self.created_by),
                    started=self.started.isoformat()
                    if self.started is not None else None,
                    finished=self.finished.isoformat()
                    if self.finished is not None else None,
                    status=self.status,
                    traceback=self.result_traceback)

    def pre_start(self, **kwargs):
        if not self.can_start:
            self.job_explanation = u'%s is not in a startable state: %s, expecting one of %s' % (
                self._meta.verbose_name, self.status, str(('new', 'waiting')))
            self.save(update_fields=['job_explanation'])
            return (False, None)

        needed = self.get_passwords_needed_to_start()
        try:
            start_args = json.loads(decrypt_field(self, 'start_args'))
        except Exception:
            start_args = None

        if start_args in (None, ''):
            start_args = kwargs

        opts = dict([(field, start_args.get(field, '')) for field in needed])

        if not all(opts.values()):
            missing_fields = ', '.join([k for k, v in opts.items() if not v])
            self.job_explanation = u'Missing needed fields: %s.' % missing_fields
            self.save(update_fields=['job_explanation'])
            return (False, None)

        if 'extra_vars' in kwargs:
            self.handle_extra_data(kwargs['extra_vars'])

        return (True, opts)

    def start_celery_task(self, opts, error_callback, success_callback, queue):
        kwargs = {
            'link_error': error_callback,
            'link': success_callback,
            'queue': None,
            'task_id': None,
        }
        if not self.celery_task_id:
            raise RuntimeError("Expected celery_task_id to be set on model.")
        kwargs['task_id'] = self.celery_task_id
        task_class = self._get_task_class()
        from awx.main.models.ha import InstanceGroup
        ig = InstanceGroup.objects.get(name=queue)
        args = [self.pk]
        if ig.controller_id:
            if self.supports_isolation():  # case of jobs and ad hoc commands
                isolated_instance = ig.instances.order_by('-capacity').first()
                args.append(isolated_instance.hostname)
            else:  # proj & inv updates, system jobs run on controller
                queue = ig.controller.name
        kwargs['queue'] = queue
        task_class().apply_async(args, opts, **kwargs)

    def start(self, error_callback, success_callback, **kwargs):
        '''
        Start the task running via Celery.
        '''
        (res, opts) = self.pre_start(**kwargs)
        if res:
            self.start_celery_task(opts, error_callback, success_callback)
        return res

    def signal_start(self, **kwargs):
        """Notify the task runner system to begin work on this task."""

        # Sanity check: Are we able to start the job? If not, do not attempt
        # to do so.
        if not self.can_start:
            return False

        # Get any passwords or other data that are prerequisites to running
        # the job.
        needed = self.get_passwords_needed_to_start()
        opts = dict([(field, kwargs.get(field, '')) for field in needed])
        if not all(opts.values()):
            return False
        if 'extra_vars' in kwargs:
            self.handle_extra_data(kwargs['extra_vars'])

        # Sanity check: If we are running unit tests, then run synchronously.
        if getattr(settings, 'CELERY_UNIT_TEST', False):
            return self.start(None, None, **kwargs)

        # Save the pending status, and inform the SocketIO listener.
        self.update_fields(start_args=json.dumps(kwargs), status='pending')
        self.websocket_emit_status("pending")

        from awx.main.scheduler.tasks import run_job_launch
        connection.on_commit(lambda: run_job_launch.delay(self.id))

        # Each type of unified job has a different Task class; get the
        # appropirate one.
        # task_type = get_type_for_model(self)

        # Actually tell the task runner to run this task.
        # FIXME: This will deadlock the task runner
        #from awx.main.tasks import notify_task_runner
        #notify_task_runner.delay({'id': self.id, 'metadata': kwargs,
        #                          'task_type': task_type})

        # Done!
        return True

    @property
    def can_cancel(self):
        return bool(self.status in CAN_CANCEL)

    def _force_cancel(self):
        # Update the status to 'canceled' if we can detect that the job
        # really isn't running (i.e. celery has crashed or forcefully
        # killed the worker).
        task_statuses = ('STARTED', 'SUCCESS', 'FAILED', 'RETRY', 'REVOKED')
        try:
            taskmeta = self.celery_task
            if not taskmeta or taskmeta.status not in task_statuses:
                return
            from celery import current_app
            i = current_app.control.inspect()
            for v in (i.active() or {}).values():
                if taskmeta.task_id in [x['id'] for x in v]:
                    return
            for v in (i.reserved() or {}).values():
                if taskmeta.task_id in [x['id'] for x in v]:
                    return
            for v in (i.revoked() or {}).values():
                if taskmeta.task_id in [x['id'] for x in v]:
                    return
            for v in (i.scheduled() or {}).values():
                if taskmeta.task_id in [x['id'] for x in v]:
                    return
            instance = self.__class__.objects.get(pk=self.pk)
            if instance.can_cancel:
                instance.status = 'canceled'
                update_fields = ['status']
                if not instance.job_explanation:
                    instance.job_explanation = 'Forced cancel'
                    update_fields.append('job_explanation')
                instance.save(update_fields=update_fields)
                self.websocket_emit_status("canceled")
        except Exception:  # FIXME: Log this exception!
            if settings.DEBUG:
                raise

    def _build_job_explanation(self):
        if not self.job_explanation:
            return 'Previous Task Canceled: {"job_type": "%s", "job_name": "%s", "job_id": "%s"}' % \
                   (self.model_to_str(), self.name, self.id)
        return None

    def cancel(self, job_explanation=None, is_chain=False):
        if self.can_cancel:
            if not is_chain:
                map(
                    lambda x: x.cancel(job_explanation=self.
                                       _build_job_explanation(),
                                       is_chain=True),
                    self.get_jobs_fail_chain())

            if not self.cancel_flag:
                self.cancel_flag = True
                cancel_fields = ['cancel_flag']
                if self.status in ('pending', 'waiting', 'new'):
                    self.status = 'canceled'
                    cancel_fields.append('status')
                if job_explanation is not None:
                    self.job_explanation = job_explanation
                    cancel_fields.append('job_explanation')
                self.save(update_fields=cancel_fields)
                self.websocket_emit_status("canceled")
            if settings.CELERY_BROKER_URL.startswith('amqp://'):
                self._force_cancel()
        return self.cancel_flag

    @property
    def preferred_instance_groups(self):
        '''
        Return Instance/Rampart Groups preferred by this unified job templates
        '''
        if not self.unified_job_template:
            return []
        template_groups = [
            x for x in self.unified_job_template.instance_groups.all()
        ]
        return template_groups

    @property
    def global_instance_groups(self):
        from awx.main.models.ha import InstanceGroup
        default_instance_group = InstanceGroup.objects.filter(name='tower')
        if default_instance_group.exists():
            return [default_instance_group.first()]
        return []
コード例 #11
0
class WorkflowJobNode(WorkflowNodeBase):
    job = models.OneToOneField(
        'UnifiedJob',
        related_name='unified_job_node',
        blank=True,
        null=True,
        default=None,
        on_delete=models.SET_NULL,
    )
    workflow_job = models.ForeignKey(
        'WorkflowJob',
        related_name='workflow_job_nodes',
        blank=True,
        null=True,
        default=None,
        on_delete=models.CASCADE,
    )
    ancestor_artifacts = JSONField(
        blank=True,
        default={},
        editable=False,
    )

    def get_absolute_url(self, request=None):
        return reverse('api:workflow_job_node_detail', kwargs={'pk': self.pk}, request=request)

    def get_job_kwargs(self):
        '''
        In advance of creating a new unified job as part of a workflow,
        this method builds the attributes to use
        It alters the node by saving its updated version of
        ancestor_artifacts, making it available to subsequent nodes.
        '''
        # reject/accept prompted fields
        data = {}
        ujt_obj = self.unified_job_template
        if ujt_obj is not None:
            accepted_fields, ignored_fields, errors = ujt_obj._accept_or_ignore_job_kwargs(**self.prompts_dict())
            if errors:
                logger.info(_('Bad launch configuration starting template {template_pk} as part of '
                              'workflow {workflow_pk}. Errors:\n{error_text}').format(
                                  template_pk=ujt_obj.pk,
                                  workflow_pk=self.pk,
                                  error_text=errors))
            data.update(accepted_fields)  # missing fields are handled in the scheduler
        # build ancestor artifacts, save them to node model for later
        aa_dict = {}
        for parent_node in self.get_parent_nodes():
            aa_dict.update(parent_node.ancestor_artifacts)
            if parent_node.job and hasattr(parent_node.job, 'artifacts'):
                aa_dict.update(parent_node.job.artifacts)
        if aa_dict:
            self.ancestor_artifacts = aa_dict
            self.save(update_fields=['ancestor_artifacts'])
        # process password list
        password_dict = {}
        if '_ansible_no_log' in aa_dict:
            for key in aa_dict:
                if key != '_ansible_no_log':
                    password_dict[key] = REPLACE_STR
        if self.workflow_job.survey_passwords:
            password_dict.update(self.workflow_job.survey_passwords)
        if self.survey_passwords:
            password_dict.update(self.survey_passwords)
        if password_dict:
            data['survey_passwords'] = password_dict
        # process extra_vars
        extra_vars = data.get('extra_vars', {})
        if aa_dict:
            functional_aa_dict = copy(aa_dict)
            functional_aa_dict.pop('_ansible_no_log', None)
            extra_vars.update(functional_aa_dict)
        # Workflow Job extra_vars higher precedence than ancestor artifacts
        if self.workflow_job and self.workflow_job.extra_vars:
            extra_vars.update(self.workflow_job.extra_vars_dict)
        if extra_vars:
            data['extra_vars'] = extra_vars
        # ensure that unified jobs created by WorkflowJobs are marked
        data['_eager_fields'] = {'launch_type': 'workflow'}
        return data
コード例 #12
0
ファイル: ha.py プロジェクト: sky-joker/awx-arm64arch
class InstanceGroup(HasPolicyEditsMixin, BaseModel, RelatedJobsMixin):
    """A model representing a Queue/Group of AWX Instances."""
    objects = InstanceGroupManager()

    name = models.CharField(max_length=250, unique=True)
    created = models.DateTimeField(auto_now_add=True)
    modified = models.DateTimeField(auto_now=True)
    instances = models.ManyToManyField(
        'Instance',
        related_name='rampart_groups',
        editable=False,
        help_text=_('Instances that are members of this InstanceGroup'),
    )
    controller = models.ForeignKey(
        'InstanceGroup',
        related_name='controlled_groups',
        help_text=_('Instance Group to remotely control this group.'),
        editable=False,
        default=None,
        null=True,
        on_delete=models.CASCADE)
    policy_instance_percentage = models.IntegerField(
        default=0,
        help_text=_(
            "Percentage of Instances to automatically assign to this group"))
    policy_instance_minimum = models.IntegerField(
        default=0,
        help_text=
        _("Static minimum number of Instances to automatically assign to this group"
          ))
    policy_instance_list = JSONField(
        default=[],
        blank=True,
        help_text=
        _("List of exact-match Instances that will always be automatically assigned to this group"
          ))

    POLICY_FIELDS = frozenset(
        ('policy_instance_list', 'policy_instance_minimum',
         'policy_instance_percentage', 'controller'))

    def get_absolute_url(self, request=None):
        return reverse('api:instance_group_detail',
                       kwargs={'pk': self.pk},
                       request=request)

    @property
    def capacity(self):
        return sum([inst.capacity for inst in self.instances.all()])

    @property
    def jobs_running(self):
        return UnifiedJob.objects.filter(status__in=('running', 'waiting'),
                                         instance_group=self).count()

    @property
    def jobs_total(self):
        return UnifiedJob.objects.filter(instance_group=self).count()

    @property
    def is_controller(self):
        return self.controlled_groups.exists()

    @property
    def is_isolated(self):
        return bool(self.controller)

    '''
    RelatedJobsMixin
    '''

    def _get_related_jobs(self):
        return UnifiedJob.objects.filter(instance_group=self)

    class Meta:
        app_label = 'main'

    def fit_task_to_most_remaining_capacity_instance(self, task):
        instance_most_capacity = None
        for i in self.instances.filter(capacity__gt=0,
                                       enabled=True).order_by('hostname'):
            if i.remaining_capacity >= task.task_impact and \
                    (instance_most_capacity is None or
                     i.remaining_capacity > instance_most_capacity.remaining_capacity):
                instance_most_capacity = i
        return instance_most_capacity

    def find_largest_idle_instance(self):
        largest_instance = None
        for i in self.instances.filter(capacity__gt=0,
                                       enabled=True).order_by('hostname'):
            if i.jobs_running == 0:
                if largest_instance is None:
                    largest_instance = i
                elif i.capacity > largest_instance.capacity:
                    largest_instance = i
        return largest_instance

    def choose_online_controller_node(self):
        return random.choice(
            list(
                self.controller.instances.filter(
                    capacity__gt=0, enabled=True).values_list('hostname',
                                                              flat=True)))
コード例 #13
0
class Project(UnifiedJobTemplate, ProjectOptions, ResourceMixin, CustomVirtualEnvMixin, RelatedJobsMixin):
    """
    A project represents a playbook git repo that can access a set of inventories
    """

    SOFT_UNIQUE_TOGETHER = [('polymorphic_ctype', 'name', 'organization')]
    FIELDS_TO_PRESERVE_AT_COPY = ['labels', 'instance_groups', 'credentials']
    FIELDS_TO_DISCARD_AT_COPY = ['local_path']
    FIELDS_TRIGGER_UPDATE = frozenset(['scm_url', 'scm_branch', 'scm_type', 'scm_refspec'])

    class Meta:
        app_label = 'main'
        ordering = ('id',)

    default_environment = models.ForeignKey(
        'ExecutionEnvironment',
        null=True,
        blank=True,
        default=None,
        on_delete=polymorphic.SET_NULL,
        related_name='+',
        help_text=_('The default execution environment for jobs run using this project.'),
    )
    scm_update_on_launch = models.BooleanField(
        default=False,
        help_text=_('Update the project when a job is launched that uses the project.'),
    )
    scm_update_cache_timeout = models.PositiveIntegerField(
        default=0,
        blank=True,
        help_text=_('The number of seconds after the last project update ran that a new ' 'project update will be launched as a job dependency.'),
    )
    allow_override = models.BooleanField(
        default=False,
        help_text=_('Allow changing the SCM branch or revision in a job template ' 'that uses this project.'),
    )

    scm_revision = models.CharField(
        max_length=1024,
        blank=True,
        default='',
        editable=False,
        verbose_name=_('SCM Revision'),
        help_text=_('The last revision fetched by a project update'),
    )

    playbook_files = JSONField(
        blank=True,
        default=[],
        editable=False,
        verbose_name=_('Playbook Files'),
        help_text=_('List of playbooks found in the project'),
    )

    inventory_files = JSONField(
        blank=True,
        default=[],
        editable=False,
        verbose_name=_('Inventory Files'),
        help_text=_('Suggested list of content that could be Ansible inventory in the project'),
    )

    admin_role = ImplicitRoleField(
        parent_role=[
            'organization.project_admin_role',
            'singleton:' + ROLE_SINGLETON_SYSTEM_ADMINISTRATOR,
        ]
    )

    use_role = ImplicitRoleField(
        parent_role='admin_role',
    )

    update_role = ImplicitRoleField(
        parent_role='admin_role',
    )

    read_role = ImplicitRoleField(
        parent_role=[
            'organization.auditor_role',
            'singleton:' + ROLE_SINGLETON_SYSTEM_AUDITOR,
            'use_role',
            'update_role',
        ]
    )

    @classmethod
    def _get_unified_job_class(cls):
        return ProjectUpdate

    @classmethod
    def _get_unified_job_field_names(cls):
        return set(f.name for f in ProjectOptions._meta.fields) | set(['name', 'description', 'organization'])

    def clean_organization(self):
        if self.pk:
            old_org_id = getattr(self, '_prior_values_store', {}).get('organization_id', None)
            if self.organization_id != old_org_id and self.jobtemplates.exists():
                raise ValidationError({'organization': _('Organization cannot be changed when in use by job templates.')})
        return self.organization

    def save(self, *args, **kwargs):
        new_instance = not bool(self.pk)
        pre_save_vals = getattr(self, '_prior_values_store', {})
        # If update_fields has been specified, add our field names to it,
        # if it hasn't been specified, then we're just doing a normal save.
        update_fields = kwargs.get('update_fields', [])
        skip_update = bool(kwargs.pop('skip_update', False))
        # Create auto-generated local path if project uses SCM.
        if self.pk and self.scm_type and not self.local_path.startswith('_'):
            slug_name = slugify(str(self.name)).replace(u'-', u'_')
            self.local_path = u'_%d__%s' % (int(self.pk), slug_name)
            if 'local_path' not in update_fields:
                update_fields.append('local_path')
        # Do the actual save.
        super(Project, self).save(*args, **kwargs)
        if new_instance:
            update_fields = []
            # Generate local_path for SCM after initial save (so we have a PK).
            if self.scm_type and not self.local_path.startswith('_'):
                update_fields.append('local_path')
            if update_fields:
                from awx.main.signals import disable_activity_stream

                with disable_activity_stream():
                    self.save(update_fields=update_fields)
        # If we just created a new project with SCM, start the initial update.
        # also update if certain fields have changed
        relevant_change = any(pre_save_vals.get(fd_name, None) != self._prior_values_store.get(fd_name, None) for fd_name in self.FIELDS_TRIGGER_UPDATE)
        if (relevant_change or new_instance) and (not skip_update) and self.scm_type:
            self.update()

    def _get_current_status(self):
        if self.scm_type:
            if self.current_job and self.current_job.status:
                return self.current_job.status
            elif not self.last_job:
                return 'never updated'
            # inherit the child job status on failure
            elif self.last_job_failed:
                return self.last_job.status
            # Return the successful status
            else:
                return self.last_job.status
        elif not self.get_project_path():
            return 'missing'
        else:
            return 'ok'

    def _get_last_job_run(self):
        if self.scm_type and self.last_job:
            return self.last_job.finished
        else:
            project_path = self.get_project_path()
            if project_path:
                try:
                    mtime = os.path.getmtime(smart_str(project_path))
                    dt = datetime.datetime.fromtimestamp(mtime)
                    return make_aware(dt, get_default_timezone())
                except os.error:
                    pass

    def _can_update(self):
        return bool(self.scm_type)

    def create_project_update(self, **kwargs):
        return self.create_unified_job(**kwargs)

    @property
    def cache_timeout_blocked(self):
        if not self.last_job_run:
            return False
        if (self.last_job_run + datetime.timedelta(seconds=self.scm_update_cache_timeout)) > now():
            return True
        return False

    @property
    def needs_update_on_launch(self):
        if self.scm_type and self.scm_update_on_launch:
            if not self.last_job_run:
                return True
            if (self.last_job_run + datetime.timedelta(seconds=self.scm_update_cache_timeout)) <= now():
                return True
        return False

    @property
    def cache_id(self):
        return str(self.last_job_id)

    @property
    def notification_templates(self):
        base_notification_templates = NotificationTemplate.objects
        error_notification_templates = list(base_notification_templates.filter(unifiedjobtemplate_notification_templates_for_errors=self))
        started_notification_templates = list(base_notification_templates.filter(unifiedjobtemplate_notification_templates_for_started=self))
        success_notification_templates = list(base_notification_templates.filter(unifiedjobtemplate_notification_templates_for_success=self))
        # Get Organization NotificationTemplates
        if self.organization is not None:
            error_notification_templates = set(
                error_notification_templates + list(base_notification_templates.filter(organization_notification_templates_for_errors=self.organization))
            )
            started_notification_templates = set(
                started_notification_templates + list(base_notification_templates.filter(organization_notification_templates_for_started=self.organization))
            )
            success_notification_templates = set(
                success_notification_templates + list(base_notification_templates.filter(organization_notification_templates_for_success=self.organization))
            )
        return dict(error=list(error_notification_templates), started=list(started_notification_templates), success=list(success_notification_templates))

    def get_absolute_url(self, request=None):
        return reverse('api:project_detail', kwargs={'pk': self.pk}, request=request)

    '''
    RelatedJobsMixin
    '''

    def _get_related_jobs(self):
        return UnifiedJob.objects.non_polymorphic().filter(models.Q(job__project=self) | models.Q(projectupdate__project=self))

    def delete(self, *args, **kwargs):
        paths_to_delete = (self.get_project_path(check_if_exists=False), self.get_cache_path())
        r = super(Project, self).delete(*args, **kwargs)
        for path_to_delete in paths_to_delete:
            if self.scm_type and path_to_delete:  # non-manual, concrete path
                from awx.main.tasks import delete_project_files

                delete_project_files.delay(path_to_delete)
        return r
コード例 #14
0
class Job(UnifiedJob, JobOptions, SurveyJobMixin, JobNotificationMixin,
          TaskManagerJobMixin, CustomVirtualEnvMixin):
    '''
    A job applies a project (with playbook) to an inventory source with a given
    credential.  It represents a single invocation of ansible-playbook with the
    given parameters.
    '''
    class Meta:
        app_label = 'main'
        ordering = ('id', )

    job_template = models.ForeignKey(
        'JobTemplate',
        related_name='jobs',
        blank=True,
        null=True,
        default=None,
        on_delete=models.SET_NULL,
    )
    hosts = models.ManyToManyField(
        'Host',
        related_name='jobs',
        editable=False,
        through='JobHostSummary',
    )
    artifacts = JSONField(
        blank=True,
        default={},
        editable=False,
    )
    scm_revision = models.CharField(
        max_length=1024,
        blank=True,
        default='',
        editable=False,
        verbose_name=_('SCM Revision'),
        help_text=_(
            'The SCM Revision from the Project used for this job, if available'
        ),
    )
    project_update = models.ForeignKey(
        'ProjectUpdate',
        blank=True,
        null=True,
        default=None,
        on_delete=models.SET_NULL,
        help_text=
        _('The SCM Refresh task used to make sure the playbooks were available for the job run'
          ),
    )
    job_slice_number = models.PositiveIntegerField(
        blank=True,
        default=0,
        help_text=_(
            "If part of a sliced job, the ID of the inventory slice operated on. "
            "If not part of sliced job, parameter is not used."),
    )
    job_slice_count = models.PositiveIntegerField(
        blank=True,
        default=1,
        help_text=_(
            "If ran as part of sliced jobs, the total number of slices. "
            "If 1, job is not part of a sliced job."),
    )

    def _get_parent_field_name(self):
        return 'job_template'

    @classmethod
    def _get_task_class(cls):
        from awx.main.tasks import RunJob
        return RunJob

    @classmethod
    def supports_isolation(cls):
        return True

    def _global_timeout_setting(self):
        return 'DEFAULT_JOB_TIMEOUT'

    @classmethod
    def _get_unified_job_template_class(cls):
        return JobTemplate

    def get_absolute_url(self, request=None):
        return reverse('api:job_detail',
                       kwargs={'pk': self.pk},
                       request=request)

    def get_ui_url(self):
        return urljoin(settings.TOWER_URL_BASE,
                       "/#/jobs/playbook/{}".format(self.pk))

    @property
    def ansible_virtualenv_path(self):
        # the order here enforces precedence (it matters)
        for virtualenv in (self.job_template.custom_virtualenv
                           if self.job_template else None,
                           self.project.custom_virtualenv,
                           self.project.organization.custom_virtualenv
                           if self.project.organization else None):
            if virtualenv:
                return virtualenv
        return settings.ANSIBLE_VENV_PATH

    @property
    def event_class(self):
        return JobEvent

    def copy_unified_job(self, **new_prompts):
        # Needed for job slice relaunch consistency, do no re-spawn workflow job
        # target same slice as original job
        new_prompts['_prevent_slicing'] = True
        new_prompts.setdefault('_eager_fields', {})
        new_prompts['_eager_fields'][
            'job_slice_number'] = self.job_slice_number
        new_prompts['_eager_fields']['job_slice_count'] = self.job_slice_count
        return super(Job, self).copy_unified_job(**new_prompts)

    @property
    def ask_diff_mode_on_launch(self):
        if self.job_template is not None:
            return self.job_template.ask_diff_mode_on_launch
        return False

    @property
    def ask_variables_on_launch(self):
        if self.job_template is not None:
            return self.job_template.ask_variables_on_launch
        return False

    @property
    def ask_limit_on_launch(self):
        if self.job_template is not None:
            return self.job_template.ask_limit_on_launch
        return False

    @property
    def ask_tags_on_launch(self):
        if self.job_template is not None:
            return self.job_template.ask_tags_on_launch
        return False

    @property
    def ask_skip_tags_on_launch(self):
        if self.job_template is not None:
            return self.job_template.ask_skip_tags_on_launch
        return False

    @property
    def ask_job_type_on_launch(self):
        if self.job_template is not None:
            return self.job_template.ask_job_type_on_launch
        return False

    @property
    def ask_verbosity_on_launch(self):
        if self.job_template is not None:
            return self.job_template.ask_verbosity_on_launch
        return False

    @property
    def ask_inventory_on_launch(self):
        if self.job_template is not None:
            return self.job_template.ask_inventory_on_launch
        return False

    @property
    def ask_credential_on_launch(self):
        if self.job_template is not None:
            return self.job_template.ask_credential_on_launch
        return False

    def get_passwords_needed_to_start(self):
        return self.passwords_needed_to_start

    def _get_hosts(self, **kwargs):
        Host = JobHostSummary._meta.get_field('host').related_model
        kwargs['job_host_summaries__job__pk'] = self.pk
        return Host.objects.filter(**kwargs)

    def retry_qs(self, status):
        '''
        Returns Host queryset that will be used to produce the `limit`
        field in a retry on a subset of hosts
        '''
        kwargs = {}
        if status == 'all':
            pass
        elif status == 'failed':
            # Special case for parity with Ansible .retry files
            kwargs['job_host_summaries__failed'] = True
        elif status in ['ok', 'changed', 'unreachable']:
            if status == 'unreachable':
                status_field = 'dark'
            else:
                status_field = status
            kwargs['job_host_summaries__{}__gt'.format(status_field)] = 0
        else:
            raise ParseError(
                _('{status_value} is not a valid status option.').format(
                    status_value=status))
        return self._get_hosts(**kwargs)

    @property
    def task_impact(self):
        # NOTE: We sorta have to assume the host count matches and that forks default to 5
        from awx.main.models.inventory import Host
        if self.launch_type == 'callback':
            count_hosts = 2
        else:
            count_hosts = Host.objects.filter(
                inventory__jobs__pk=self.pk).count()
            if self.job_slice_count > 1:
                # Integer division intentional
                count_hosts = (count_hosts + self.job_slice_count -
                               self.job_slice_number) // self.job_slice_count
        return min(count_hosts, 5 if self.forks == 0 else self.forks) + 1

    @property
    def successful_hosts(self):
        return self._get_hosts(job_host_summaries__ok__gt=0)

    @property
    def failed_hosts(self):
        return self._get_hosts(job_host_summaries__failures__gt=0)

    @property
    def changed_hosts(self):
        return self._get_hosts(job_host_summaries__changed__gt=0)

    @property
    def dark_hosts(self):
        return self._get_hosts(job_host_summaries__dark__gt=0)

    @property
    def unreachable_hosts(self):
        return self.dark_hosts

    @property
    def skipped_hosts(self):
        return self._get_hosts(job_host_summaries__skipped__gt=0)

    @property
    def processed_hosts(self):
        return self._get_hosts(job_host_summaries__processed__gt=0)

    def notification_data(self, block=5):
        data = super(Job, self).notification_data()
        all_hosts = {}
        # NOTE: Probably related to job event slowness, remove at some point -matburt
        if block:
            summaries = self.job_host_summaries.all()
            while block > 0 and not len(summaries):
                time.sleep(1)
                block -= 1
        else:
            summaries = self.job_host_summaries.all()
        for h in self.job_host_summaries.all():
            all_hosts[h.host_name] = dict(failed=h.failed,
                                          changed=h.changed,
                                          dark=h.dark,
                                          failures=h.failures,
                                          ok=h.ok,
                                          processed=h.processed,
                                          skipped=h.skipped)
        data.update(
            dict(inventory=self.inventory.name if self.inventory else None,
                 project=self.project.name if self.project else None,
                 playbook=self.playbook,
                 credential=getattr(self.get_deprecated_credential('ssh'),
                                    'name', None),
                 limit=self.limit,
                 extra_vars=self.display_extra_vars(),
                 hosts=all_hosts))
        return data

    def _resources_sufficient_for_launch(self):
        return not (self.inventory_id is None or self.project_id is None)

    def display_artifacts(self):
        '''
        Hides artifacts if they are marked as no_log type artifacts.
        '''
        artifacts = self.artifacts
        if artifacts.get('_ansible_no_log', False):
            return "$hidden due to Ansible no_log flag$"
        return artifacts

    @property
    def preferred_instance_groups(self):
        if self.project is not None and self.project.organization is not None:
            organization_groups = [
                x for x in self.project.organization.instance_groups.all()
            ]
        else:
            organization_groups = []
        if self.inventory is not None:
            inventory_groups = [
                x for x in self.inventory.instance_groups.all()
            ]
        else:
            inventory_groups = []
        if self.job_template is not None:
            template_groups = [
                x for x in self.job_template.instance_groups.all()
            ]
        else:
            template_groups = []
        selected_groups = template_groups + inventory_groups + organization_groups
        if not selected_groups:
            return self.global_instance_groups
        return selected_groups

    def awx_meta_vars(self):
        r = super(Job, self).awx_meta_vars()
        if self.project:
            for name in ('awx', 'tower'):
                r['{}_project_revision'.format(
                    name)] = self.project.scm_revision
        if self.job_template:
            for name in ('awx', 'tower'):
                r['{}_job_template_id'.format(name)] = self.job_template.pk
                r['{}_job_template_name'.format(name)] = self.job_template.name
        return r

    '''
    JobNotificationMixin
    '''

    def get_notification_templates(self):
        if not self.job_template:
            return NotificationTemplate.objects.none()
        return self.job_template.notification_templates

    def get_notification_friendly_name(self):
        return "Job"

    def _get_inventory_hosts(self,
                             only=[
                                 'name', 'ansible_facts',
                                 'ansible_facts_modified', 'modified',
                                 'inventory_id'
                             ]):
        if not self.inventory:
            return []
        return self.inventory.hosts.only(*only)

    def start_job_fact_cache(self,
                             destination,
                             modification_times,
                             timeout=None):
        destination = os.path.join(destination, 'facts')
        os.makedirs(destination, mode=0o700)
        hosts = self._get_inventory_hosts()
        if timeout is None:
            timeout = settings.ANSIBLE_FACT_CACHE_TIMEOUT
        if timeout > 0:
            # exclude hosts with fact data older than `settings.ANSIBLE_FACT_CACHE_TIMEOUT seconds`
            timeout = now() - datetime.timedelta(seconds=timeout)
            hosts = hosts.filter(ansible_facts_modified__gte=timeout)
        for host in hosts:
            filepath = os.sep.join(map(str, [destination, host.name]))
            if not os.path.realpath(filepath).startswith(destination):
                system_tracking_logger.error(
                    'facts for host {} could not be cached'.format(
                        smart_str(host.name)))
                continue
            try:
                with codecs.open(filepath, 'w', encoding='utf-8') as f:
                    os.chmod(f.name, 0o600)
                    json.dump(host.ansible_facts, f)
            except IOError:
                system_tracking_logger.error(
                    'facts for host {} could not be cached'.format(
                        smart_str(host.name)))
                continue
            # make note of the time we wrote the file so we can check if it changed later
            modification_times[filepath] = os.path.getmtime(filepath)

    def finish_job_fact_cache(self, destination, modification_times):
        destination = os.path.join(destination, 'facts')
        for host in self._get_inventory_hosts():
            filepath = os.sep.join(map(str, [destination, host.name]))
            if not os.path.realpath(filepath).startswith(destination):
                system_tracking_logger.error(
                    'facts for host {} could not be cached'.format(
                        smart_str(host.name)))
                continue
            if os.path.exists(filepath):
                # If the file changed since we wrote it pre-playbook run...
                modified = os.path.getmtime(filepath)
                if modified > modification_times.get(filepath, 0):
                    with codecs.open(filepath, 'r', encoding='utf-8') as f:
                        try:
                            ansible_facts = json.load(f)
                        except ValueError:
                            continue
                        host.ansible_facts = ansible_facts
                        host.ansible_facts_modified = now()
                        if 'insights' in ansible_facts and 'system_id' in ansible_facts[
                                'insights']:
                            host.insights_system_id = ansible_facts[
                                'insights']['system_id']
                        host.save()
                        system_tracking_logger.info(
                            'New fact for inventory {} host {}'.format(
                                smart_str(host.inventory.name),
                                smart_str(host.name)),
                            extra=dict(inventory_id=host.inventory.id,
                                       host_name=host.name,
                                       ansible_facts=host.ansible_facts,
                                       ansible_facts_modified=host.
                                       ansible_facts_modified.isoformat(),
                                       job_id=self.id))
            else:
                # if the file goes missing, ansible removed it (likely via clear_facts)
                host.ansible_facts = {}
                host.ansible_facts_modified = now()
                system_tracking_logger.info(
                    'Facts cleared for inventory {} host {}'.format(
                        smart_str(host.inventory.name), smart_str(host.name)))
                host.save()
コード例 #15
0
ファイル: mixins.py プロジェクト: cwdrunner/awx
class SurveyJobTemplateMixin(models.Model):
    class Meta:
        abstract = True

    survey_enabled = models.BooleanField(default=False, )
    survey_spec = prevent_search(JSONField(
        blank=True,
        default=dict,
    ))
    ask_variables_on_launch = AskForField(blank=True,
                                          default=False,
                                          allows_field='extra_vars')

    def survey_password_variables(self):
        vars = []
        if self.survey_enabled and 'spec' in self.survey_spec:
            # Get variables that are type password
            for survey_element in self.survey_spec['spec']:
                if survey_element['type'] == 'password':
                    vars.append(survey_element['variable'])
        return vars

    @property
    def variables_needed_to_start(self):
        vars = []
        if self.survey_enabled and 'spec' in self.survey_spec:
            for survey_element in self.survey_spec['spec']:
                if survey_element['required']:
                    vars.append(survey_element['variable'])
        return vars

    def _update_unified_job_kwargs(self, create_kwargs, kwargs):
        '''
        Combine extra_vars with variable precedence order:
          JT extra_vars -> JT survey defaults -> runtime extra_vars

        :param create_kwargs: key-worded arguments to be updated and later used for creating unified job.
        :type create_kwargs: dict
        :param kwargs: request parameters used to override unified job template fields with runtime values.
        :type kwargs: dict
        :return: modified create_kwargs.
        :rtype: dict
        '''
        # Job Template extra_vars
        extra_vars = self.extra_vars_dict

        survey_defaults = {}

        # transform to dict
        if 'extra_vars' in kwargs:
            runtime_extra_vars = kwargs['extra_vars']
            runtime_extra_vars = parse_yaml_or_json(runtime_extra_vars)
        else:
            runtime_extra_vars = {}

        # Overwrite job template extra vars with survey default vars
        if self.survey_enabled and 'spec' in self.survey_spec:
            for survey_element in self.survey_spec.get("spec", []):
                default = survey_element.get('default')
                variable_key = survey_element.get('variable')

                if survey_element.get('type') == 'password':
                    if variable_key in runtime_extra_vars:
                        kw_value = runtime_extra_vars[variable_key]
                        if kw_value == '$encrypted$':
                            runtime_extra_vars.pop(variable_key)

                if default is not None:
                    decrypted_default = default
                    if (survey_element['type'] == "password"
                            and isinstance(decrypted_default, str)
                            and decrypted_default.startswith('$encrypted$')):
                        decrypted_default = decrypt_value(
                            get_encryption_key('value', pk=None),
                            decrypted_default)
                    errors = self._survey_element_validation(
                        survey_element, {variable_key: decrypted_default})
                    if not errors:
                        survey_defaults[variable_key] = default
        extra_vars.update(survey_defaults)

        # Overwrite job template extra vars with explicit job extra vars
        # and add on job extra vars
        extra_vars.update(runtime_extra_vars)
        create_kwargs['extra_vars'] = json.dumps(extra_vars)
        return create_kwargs

    def _survey_element_validation(self,
                                   survey_element,
                                   data,
                                   validate_required=True):
        # Don't apply validation to the `$encrypted$` placeholder; the decrypted
        # default (if any) will be validated against instead
        errors = []

        if (survey_element['type'] == "password"):
            password_value = data.get(survey_element['variable'])
            if (isinstance(password_value, str)
                    and password_value == '$encrypted$'):
                if survey_element.get(
                        'default') is None and survey_element['required']:
                    if validate_required:
                        errors.append("'%s' value missing" %
                                      survey_element['variable'])
                return errors

        if survey_element['variable'] not in data and survey_element[
                'required']:
            if validate_required:
                errors.append("'%s' value missing" %
                              survey_element['variable'])
        elif survey_element['type'] in ["textarea", "text", "password"]:
            if survey_element['variable'] in data:
                if not isinstance(data[survey_element['variable']], str):
                    errors.append(
                        "Value %s for '%s' expected to be a string." %
                        (data[survey_element['variable']],
                         survey_element['variable']))
                    return errors

                if 'min' in survey_element and survey_element['min'] not in [
                        "", None
                ] and len(data[survey_element['variable']]) < int(
                        survey_element['min']):
                    errors.append(
                        "'%s' value %s is too small (length is %s must be at least %s)."
                        % (survey_element['variable'],
                           data[survey_element['variable']],
                           len(data[survey_element['variable']]),
                           survey_element['min']))
                if 'max' in survey_element and survey_element['max'] not in [
                        "", None
                ] and len(data[survey_element['variable']]) > int(
                        survey_element['max']):
                    errors.append(
                        "'%s' value %s is too large (must be no more than %s)."
                        % (survey_element['variable'],
                           data[survey_element['variable']],
                           survey_element['max']))

        elif survey_element['type'] == 'integer':
            if survey_element['variable'] in data:
                if type(data[survey_element['variable']]) != int:
                    errors.append(
                        "Value %s for '%s' expected to be an integer." %
                        (data[survey_element['variable']],
                         survey_element['variable']))
                    return errors
                if 'min' in survey_element and survey_element['min'] not in ["", None] and survey_element['variable'] in data and \
                   data[survey_element['variable']] < int(survey_element['min']):
                    errors.append(
                        "'%s' value %s is too small (must be at least %s)." %
                        (survey_element['variable'],
                         data[survey_element['variable']],
                         survey_element['min']))
                if 'max' in survey_element and survey_element['max'] not in ["", None] and survey_element['variable'] in data and \
                   data[survey_element['variable']] > int(survey_element['max']):
                    errors.append(
                        "'%s' value %s is too large (must be no more than %s)."
                        % (survey_element['variable'],
                           data[survey_element['variable']],
                           survey_element['max']))
        elif survey_element['type'] == 'float':
            if survey_element['variable'] in data:
                if type(data[survey_element['variable']]) not in (float, int):
                    errors.append(
                        "Value %s for '%s' expected to be a numeric type." %
                        (data[survey_element['variable']],
                         survey_element['variable']))
                    return errors
                if 'min' in survey_element and survey_element['min'] not in [
                        "", None
                ] and data[survey_element['variable']] < float(
                        survey_element['min']):
                    errors.append(
                        "'%s' value %s is too small (must be at least %s)." %
                        (survey_element['variable'],
                         data[survey_element['variable']],
                         survey_element['min']))
                if 'max' in survey_element and survey_element['max'] not in [
                        "", None
                ] and data[survey_element['variable']] > float(
                        survey_element['max']):
                    errors.append(
                        "'%s' value %s is too large (must be no more than %s)."
                        % (survey_element['variable'],
                           data[survey_element['variable']],
                           survey_element['max']))
        elif survey_element['type'] == 'multiselect':
            if survey_element['variable'] in data:
                if type(data[survey_element['variable']]) != list:
                    errors.append("'%s' value is expected to be a list." %
                                  survey_element['variable'])
                else:
                    choice_list = copy(survey_element['choices'])
                    if isinstance(choice_list, str):
                        choice_list = [
                            choice for choice in choice_list.splitlines()
                            if choice.strip() != ''
                        ]
                    for val in data[survey_element['variable']]:
                        if val not in choice_list:
                            errors.append(
                                "Value %s for '%s' expected to be one of %s." %
                                (val, survey_element['variable'], choice_list))
        elif survey_element['type'] == 'multiplechoice':
            choice_list = copy(survey_element['choices'])
            if isinstance(choice_list, str):
                choice_list = [
                    choice for choice in choice_list.splitlines()
                    if choice.strip() != ''
                ]
            if survey_element['variable'] in data:
                if data[survey_element['variable']] not in choice_list:
                    errors.append(
                        "Value %s for '%s' expected to be one of %s." %
                        (data[survey_element['variable']],
                         survey_element['variable'], choice_list))
        return errors

    def _accept_or_ignore_variables(self,
                                    data,
                                    errors=None,
                                    _exclude_errors=(),
                                    extra_passwords=None):
        survey_is_enabled = (self.survey_enabled and self.survey_spec)
        extra_vars = data.copy()
        if errors is None:
            errors = {}
        rejected = {}
        accepted = {}

        if survey_is_enabled:
            # Check for data violation of survey rules
            survey_errors = []
            for survey_element in self.survey_spec.get("spec", []):
                key = survey_element.get('variable', None)
                value = data.get(key, None)
                validate_required = 'required' not in _exclude_errors
                if extra_passwords and key in extra_passwords and is_encrypted(
                        value):
                    element_errors = self._survey_element_validation(
                        survey_element, {
                            key:
                            decrypt_value(get_encryption_key('value', pk=None),
                                          value)
                        },
                        validate_required=validate_required)
                else:
                    element_errors = self._survey_element_validation(
                        survey_element,
                        data,
                        validate_required=validate_required)

                if element_errors:
                    survey_errors += element_errors
                    if key is not None and key in extra_vars:
                        rejected[key] = extra_vars.pop(key)
                elif key in extra_vars:
                    accepted[key] = extra_vars.pop(key)
            if survey_errors:
                errors['variables_needed_to_start'] = survey_errors

        if self.ask_variables_on_launch:
            # We can accept all variables
            accepted.update(extra_vars)
            extra_vars = {}

        if extra_vars:
            # Prune the prompted variables for those identical to template
            tmp_extra_vars = self.extra_vars_dict
            for key in (set(tmp_extra_vars.keys()) & set(extra_vars.keys())):
                if tmp_extra_vars[key] == extra_vars[key]:
                    extra_vars.pop(key)

        if extra_vars:
            # Leftover extra_vars, keys provided that are not allowed
            rejected.update(extra_vars)
            # ignored variables does not block manual launch
            if 'prompts' not in _exclude_errors:
                errors['extra_vars'] = [
                    _('Variables {list_of_keys} are not allowed on launch. Check the Prompt on Launch setting '
                      + 'on the {model_name} to include Extra Variables.').
                    format(list_of_keys=', '.join(
                        [str(key) for key in extra_vars.keys()]),
                           model_name=self._meta.verbose_name.title())
                ]

        return (accepted, rejected, errors)

    @staticmethod
    def pivot_spec(spec):
        '''
        Utility method that will return a dictionary keyed off variable names
        '''
        pivoted = {}
        for element_data in spec.get('spec', []):
            if 'variable' in element_data:
                pivoted[element_data['variable']] = element_data
        return pivoted

    def survey_variable_validation(self, data):
        errors = []
        if not self.survey_enabled:
            return errors
        if 'name' not in self.survey_spec:
            errors.append("'name' missing from survey spec.")
        if 'description' not in self.survey_spec:
            errors.append("'description' missing from survey spec.")
        for survey_element in self.survey_spec.get("spec", []):
            errors += self._survey_element_validation(survey_element, data)
        return errors

    def display_survey_spec(self):
        '''
        Hide encrypted default passwords in survey specs
        '''
        survey_spec = deepcopy(self.survey_spec) if self.survey_spec else {}
        for field in survey_spec.get('spec', []):
            if field.get('type') == 'password':
                if 'default' in field and field['default']:
                    field['default'] = '$encrypted$'
        return survey_spec
コード例 #16
0
class NotificationTemplate(CommonModelNameNotUnique):

    NOTIFICATION_TYPES = [('email', _('Email'), CustomEmailBackend),
                          ('slack', _('Slack'), SlackBackend),
                          ('twilio', _('Twilio'), TwilioBackend),
                          ('pagerduty', _('Pagerduty'), PagerDutyBackend),
                          ('hipchat', _('HipChat'), HipChatBackend),
                          ('webhook', _('Webhook'), WebhookBackend),
                          ('mattermost', _('Mattermost'), MattermostBackend),
                          ('irc', _('IRC'), IrcBackend)]
    NOTIFICATION_TYPE_CHOICES = [(x[0], x[1]) for x in NOTIFICATION_TYPES]
    CLASS_FOR_NOTIFICATION_TYPE = dict([(x[0], x[2])
                                        for x in NOTIFICATION_TYPES])

    class Meta:
        app_label = 'main'
        unique_together = ('organization', 'name')

    organization = models.ForeignKey(
        'Organization',
        blank=False,
        null=True,
        on_delete=models.CASCADE,
        related_name='notification_templates',
    )

    notification_type = models.CharField(
        max_length=32,
        choices=NOTIFICATION_TYPE_CHOICES,
    )

    notification_configuration = JSONField(blank=False)

    def get_absolute_url(self, request=None):
        return reverse('api:notification_template_detail',
                       kwargs={'pk': self.pk},
                       request=request)

    @property
    def notification_class(self):
        return self.CLASS_FOR_NOTIFICATION_TYPE[self.notification_type]

    def save(self, *args, **kwargs):
        new_instance = not bool(self.pk)
        update_fields = kwargs.get('update_fields', [])
        for field in filter(
                lambda x: self.notification_class.init_parameters[x]['type'] ==
                "password", self.notification_class.init_parameters):
            if self.notification_configuration[field].startswith(
                    "$encrypted$"):
                continue
            if new_instance:
                value = self.notification_configuration[field]
                setattr(self, '_saved_{}_{}'.format("config", field), value)
                self.notification_configuration[field] = ''
            else:
                encrypted = encrypt_field(self,
                                          'notification_configuration',
                                          subfield=field,
                                          skip_utf8=True)
                self.notification_configuration[field] = encrypted
                if 'notification_configuration' not in update_fields:
                    update_fields.append('notification_configuration')
        super(NotificationTemplate, self).save(*args, **kwargs)
        if new_instance:
            update_fields = []
            for field in filter(
                    lambda x: self.notification_class.init_parameters[x][
                        'type'] == "password",
                    self.notification_class.init_parameters):
                saved_value = getattr(self,
                                      '_saved_{}_{}'.format("config",
                                                            field), '')
                self.notification_configuration[field] = saved_value
                if 'notification_configuration' not in update_fields:
                    update_fields.append('notification_configuration')
            self.save(update_fields=update_fields)

    @property
    def recipients(self):
        return self.notification_configuration[
            self.notification_class.recipient_parameter]

    def generate_notification(self, subject, message):
        notification = Notification(notification_template=self,
                                    notification_type=self.notification_type,
                                    recipients=smart_str(self.recipients),
                                    subject=subject,
                                    body=message)
        notification.save()
        return notification

    def send(self, subject, body):
        for field in filter(
                lambda x: self.notification_class.init_parameters[x]['type'] ==
                "password", self.notification_class.init_parameters):
            self.notification_configuration[field] = decrypt_field(
                self, 'notification_configuration', subfield=field)
        recipients = self.notification_configuration.pop(
            self.notification_class.recipient_parameter)
        if not isinstance(recipients, list):
            recipients = [recipients]
        sender = self.notification_configuration.pop(
            self.notification_class.sender_parameter, None)
        backend_obj = self.notification_class(
            **self.notification_configuration)
        notification_obj = EmailMessage(subject, backend_obj.format_body(body),
                                        sender, recipients)
        with set_environ(**settings.AWX_TASK_ENV):
            return backend_obj.send_messages([notification_obj])

    def display_notification_configuration(self):
        field_val = self.notification_configuration.copy()
        for field in self.notification_class.init_parameters:
            if field in field_val and force_text(
                    field_val[field]).startswith('$encrypted$'):
                field_val[field] = '$encrypted$'
        return field_val
コード例 #17
0
class ActivityStream(models.Model):
    """
    Model used to describe activity stream (audit) events
    """
    class Meta:
        app_label = 'main'
        ordering = ('pk', )

    OPERATION_CHOICES = [
        ('create', _('Entity Created')),
        ('update', _("Entity Updated")),
        ('delete', _("Entity Deleted")),
        ('associate', _("Entity Associated with another Entity")),
        ('disassociate', _("Entity was Disassociated with another Entity")),
    ]

    actor = models.ForeignKey('auth.User',
                              null=True,
                              on_delete=models.SET_NULL,
                              related_name='activity_stream')
    operation = models.CharField(max_length=13, choices=OPERATION_CHOICES)
    timestamp = models.DateTimeField(auto_now_add=True)
    changes = accepts_json(models.TextField(blank=True))
    deleted_actor = JSONField(null=True)
    action_node = models.CharField(
        blank=True,
        default='',
        editable=False,
        max_length=512,
        help_text=_("The cluster node the activity took place on."),
    )

    object_relationship_type = models.TextField(blank=True)
    object1 = models.TextField()
    object2 = models.TextField()

    user = models.ManyToManyField("auth.User", blank=True)
    organization = models.ManyToManyField("Organization", blank=True)
    inventory = models.ManyToManyField("Inventory", blank=True)
    host = models.ManyToManyField("Host", blank=True)
    group = models.ManyToManyField("Group", blank=True)
    inventory_source = models.ManyToManyField("InventorySource", blank=True)
    inventory_update = models.ManyToManyField("InventoryUpdate", blank=True)
    credential = models.ManyToManyField("Credential", blank=True)
    credential_type = models.ManyToManyField("CredentialType", blank=True)
    team = models.ManyToManyField("Team", blank=True)
    project = models.ManyToManyField("Project", blank=True)
    project_update = models.ManyToManyField("ProjectUpdate", blank=True)
    execution_environment = models.ManyToManyField("ExecutionEnvironment",
                                                   blank=True)
    job_template = models.ManyToManyField("JobTemplate", blank=True)
    job = models.ManyToManyField("Job", blank=True)
    workflow_job_template_node = models.ManyToManyField(
        "WorkflowJobTemplateNode", blank=True)
    workflow_job_node = models.ManyToManyField("WorkflowJobNode", blank=True)
    workflow_job_template = models.ManyToManyField("WorkflowJobTemplate",
                                                   blank=True)
    workflow_job = models.ManyToManyField("WorkflowJob", blank=True)
    workflow_approval_template = models.ManyToManyField(
        "WorkflowApprovalTemplate", blank=True)
    workflow_approval = models.ManyToManyField("WorkflowApproval", blank=True)
    unified_job_template = models.ManyToManyField(
        "UnifiedJobTemplate",
        blank=True,
        related_name='activity_stream_as_unified_job_template+')
    unified_job = models.ManyToManyField(
        "UnifiedJob",
        blank=True,
        related_name='activity_stream_as_unified_job+')
    ad_hoc_command = models.ManyToManyField("AdHocCommand", blank=True)
    schedule = models.ManyToManyField("Schedule", blank=True)
    execution_environment = models.ManyToManyField("ExecutionEnvironment",
                                                   blank=True)
    notification_template = models.ManyToManyField("NotificationTemplate",
                                                   blank=True)
    notification = models.ManyToManyField("Notification", blank=True)
    label = models.ManyToManyField("Label", blank=True)
    role = models.ManyToManyField("Role", blank=True)
    instance = models.ManyToManyField("Instance", blank=True)
    instance_group = models.ManyToManyField("InstanceGroup", blank=True)
    o_auth2_application = models.ManyToManyField("OAuth2Application",
                                                 blank=True)
    o_auth2_access_token = models.ManyToManyField("OAuth2AccessToken",
                                                  blank=True)

    setting = JSONField(blank=True)

    def __str__(self):
        operation = self.operation if 'operation' in self.__dict__ else '_delayed_'
        if 'timestamp' in self.__dict__:
            if self.timestamp:
                timestamp = self.timestamp.isoformat()
            else:
                timestamp = self.timestamp
        else:
            timestamp = '_delayed_'
        return u'%s-%s-pk=%s' % (operation, timestamp, self.pk)

    def get_absolute_url(self, request=None):
        return reverse('api:activity_stream_detail',
                       kwargs={'pk': self.pk},
                       request=request)

    def save(self, *args, **kwargs):
        # Store denormalized actor metadata so that we retain it for accounting
        # purposes when the User row is deleted.
        if self.actor:
            self.deleted_actor = {
                'id': self.actor_id,
                'username': smart_str(self.actor.username),
                'first_name': smart_str(self.actor.first_name),
                'last_name': smart_str(self.actor.last_name),
            }
            if 'update_fields' in kwargs and 'deleted_actor' not in kwargs[
                    'update_fields']:
                kwargs['update_fields'].append('deleted_actor')

        hostname_char_limit = self._meta.get_field('action_node').max_length
        self.action_node = settings.CLUSTER_HOST_ID[:hostname_char_limit]

        super(ActivityStream, self).save(*args, **kwargs)
コード例 #18
0
ファイル: mixins.py プロジェクト: AlexBaily/awx
class SurveyJobTemplateMixin(models.Model):
    class Meta:
        abstract = True

    survey_enabled = models.BooleanField(default=False, )
    survey_spec = prevent_search(JSONField(
        blank=True,
        default={},
    ))
    ask_variables_on_launch = AskForField(blank=True,
                                          default=False,
                                          allows_field='extra_vars')

    def survey_password_variables(self):
        vars = []
        if self.survey_enabled and 'spec' in self.survey_spec:
            # Get variables that are type password
            for survey_element in self.survey_spec['spec']:
                if survey_element['type'] == 'password':
                    vars.append(survey_element['variable'])
        return vars

    @property
    def variables_needed_to_start(self):
        vars = []
        if self.survey_enabled and 'spec' in self.survey_spec:
            for survey_element in self.survey_spec['spec']:
                if survey_element['required']:
                    vars.append(survey_element['variable'])
        return vars

    def _update_unified_job_kwargs(self, create_kwargs, kwargs):
        '''
        Combine extra_vars with variable precedence order:
          JT extra_vars -> JT survey defaults -> runtime extra_vars

        :param create_kwargs: key-worded arguments to be updated and later used for creating unified job.
        :type create_kwargs: dict
        :param kwargs: request parameters used to override unified job template fields with runtime values.
        :type kwargs: dict
        :return: modified create_kwargs.
        :rtype: dict
        '''
        # Job Template extra_vars
        extra_vars = self.extra_vars_dict

        survey_defaults = {}

        # transform to dict
        if 'extra_vars' in kwargs:
            runtime_extra_vars = kwargs['extra_vars']
            runtime_extra_vars = parse_yaml_or_json(runtime_extra_vars)
        else:
            runtime_extra_vars = {}

        # Overwrite with job template extra vars with survey default vars
        if self.survey_enabled and 'spec' in self.survey_spec:
            for survey_element in self.survey_spec.get("spec", []):
                default = survey_element.get('default')
                variable_key = survey_element.get('variable')

                if survey_element.get('type') == 'password':
                    if variable_key in runtime_extra_vars and default:
                        kw_value = runtime_extra_vars[variable_key]
                        if kw_value.startswith(
                                '$encrypted$') and kw_value != default:
                            runtime_extra_vars[variable_key] = default

                if default is not None:
                    data = {variable_key: default}
                    errors = self._survey_element_validation(
                        survey_element, data)
                    if not errors:
                        survey_defaults[variable_key] = default
        extra_vars.update(survey_defaults)

        # Overwrite job template extra vars with explicit job extra vars
        # and add on job extra vars
        extra_vars.update(runtime_extra_vars)
        create_kwargs['extra_vars'] = json.dumps(extra_vars)
        return create_kwargs

    def _survey_element_validation(self, survey_element, data):
        errors = []
        if survey_element['variable'] not in data and survey_element[
                'required']:
            errors.append("'%s' value missing" % survey_element['variable'])
        elif survey_element['type'] in ["textarea", "text", "password"]:
            if survey_element['variable'] in data:
                if type(data[survey_element['variable']]) not in (str,
                                                                  unicode):
                    errors.append(
                        "Value %s for '%s' expected to be a string." %
                        (data[survey_element['variable']],
                         survey_element['variable']))
                    return errors

                if 'min' in survey_element and survey_element['min'] not in [
                        "", None
                ] and len(data[survey_element['variable']]) < int(
                        survey_element['min']):
                    errors.append(
                        "'%s' value %s is too small (length is %s must be at least %s)."
                        % (survey_element['variable'],
                           data[survey_element['variable']],
                           len(data[survey_element['variable']]),
                           survey_element['min']))
                if 'max' in survey_element and survey_element['max'] not in [
                        "", None
                ] and len(data[survey_element['variable']]) > int(
                        survey_element['max']):
                    errors.append(
                        "'%s' value %s is too large (must be no more than %s)."
                        % (survey_element['variable'],
                           data[survey_element['variable']],
                           survey_element['max']))

        elif survey_element['type'] == 'integer':
            if survey_element['variable'] in data:
                if type(data[survey_element['variable']]) != int:
                    errors.append(
                        "Value %s for '%s' expected to be an integer." %
                        (data[survey_element['variable']],
                         survey_element['variable']))
                    return errors
                if 'min' in survey_element and survey_element['min'] not in ["", None] and survey_element['variable'] in data and \
                   data[survey_element['variable']] < int(survey_element['min']):
                    errors.append(
                        "'%s' value %s is too small (must be at least %s)." %
                        (survey_element['variable'],
                         data[survey_element['variable']],
                         survey_element['min']))
                if 'max' in survey_element and survey_element['max'] not in ["", None] and survey_element['variable'] in data and \
                   data[survey_element['variable']] > int(survey_element['max']):
                    errors.append(
                        "'%s' value %s is too large (must be no more than %s)."
                        % (survey_element['variable'],
                           data[survey_element['variable']],
                           survey_element['max']))
        elif survey_element['type'] == 'float':
            if survey_element['variable'] in data:
                if type(data[survey_element['variable']]) not in (float, int):
                    errors.append(
                        "Value %s for '%s' expected to be a numeric type." %
                        (data[survey_element['variable']],
                         survey_element['variable']))
                    return errors
                if 'min' in survey_element and survey_element['min'] not in [
                        "", None
                ] and data[survey_element['variable']] < float(
                        survey_element['min']):
                    errors.append(
                        "'%s' value %s is too small (must be at least %s)." %
                        (survey_element['variable'],
                         data[survey_element['variable']],
                         survey_element['min']))
                if 'max' in survey_element and survey_element['max'] not in [
                        "", None
                ] and data[survey_element['variable']] > float(
                        survey_element['max']):
                    errors.append(
                        "'%s' value %s is too large (must be no more than %s)."
                        % (survey_element['variable'],
                           data[survey_element['variable']],
                           survey_element['max']))
        elif survey_element['type'] == 'multiselect':
            if survey_element['variable'] in data:
                if type(data[survey_element['variable']]) != list:
                    errors.append("'%s' value is expected to be a list." %
                                  survey_element['variable'])
                else:
                    choice_list = copy(survey_element['choices'])
                    if isinstance(choice_list, basestring):
                        choice_list = choice_list.split('\n')
                    for val in data[survey_element['variable']]:
                        if val not in choice_list:
                            errors.append(
                                "Value %s for '%s' expected to be one of %s." %
                                (val, survey_element['variable'], choice_list))
        elif survey_element['type'] == 'multiplechoice':
            choice_list = copy(survey_element['choices'])
            if isinstance(choice_list, basestring):
                choice_list = choice_list.split('\n')
            if survey_element['variable'] in data:
                if data[survey_element['variable']] not in choice_list:
                    errors.append(
                        "Value %s for '%s' expected to be one of %s." %
                        (data[survey_element['variable']],
                         survey_element['variable'], choice_list))
        return errors

    def _accept_or_ignore_variables(self,
                                    data,
                                    errors=None,
                                    _exclude_errors=()):
        survey_is_enabled = (self.survey_enabled and self.survey_spec)
        extra_vars = data.copy()
        if errors is None:
            errors = {}
        rejected = {}
        accepted = {}

        if survey_is_enabled:
            # Check for data violation of survey rules
            survey_errors = []
            for survey_element in self.survey_spec.get("spec", []):
                element_errors = self._survey_element_validation(
                    survey_element, data)
                key = survey_element.get('variable', None)

                if element_errors:
                    survey_errors += element_errors
                    if key is not None and key in extra_vars:
                        rejected[key] = extra_vars.pop(key)
                elif key in extra_vars:
                    accepted[key] = extra_vars.pop(key)
            if survey_errors:
                errors['variables_needed_to_start'] = survey_errors

        if self.ask_variables_on_launch:
            # We can accept all variables
            accepted.update(extra_vars)
            extra_vars = {}

        if extra_vars:
            # Leftover extra_vars, keys provided that are not allowed
            rejected.update(extra_vars)
            # ignored variables does not block manual launch
            if 'prompts' not in _exclude_errors:
                errors['extra_vars'] = [
                    _('Variables {list_of_keys} are not allowed on launch.').
                    format(list_of_keys=', '.join(extra_vars.keys()))
                ]

        return (accepted, rejected, errors)
コード例 #19
0
ファイル: jobs.py プロジェクト: rbywater/awx
class LaunchTimeConfig(BaseModel):
    '''
    Common model for all objects that save details of a saved launch config
    WFJT / WJ nodes, schedules, and job launch configs (not all implemented yet)
    '''
    class Meta:
        abstract = True

    # Prompting-related fields that have to be handled as special cases
    credentials = models.ManyToManyField('Credential',
                                         related_name='%(class)ss')
    inventory = models.ForeignKey(
        'Inventory',
        related_name='%(class)ss',
        blank=True,
        null=True,
        default=None,
        on_delete=models.SET_NULL,
    )
    extra_data = JSONField(blank=True, default={})
    survey_passwords = prevent_search(
        JSONField(
            blank=True,
            default={},
            editable=False,
        ))
    # All standard fields are stored in this dictionary field
    # This is a solution to the nullable CharField problem, specific to prompting
    char_prompts = JSONField(blank=True, default={})

    def prompts_dict(self, display=False):
        data = {}
        for prompt_name in JobTemplate.get_ask_mapping().keys():
            try:
                field = self._meta.get_field(prompt_name)
            except FieldDoesNotExist:
                field = None
            if isinstance(field, models.ManyToManyField):
                if not self.pk:
                    continue  # unsaved object can't have related many-to-many
                prompt_val = set(getattr(self, prompt_name).all())
                if len(prompt_val) > 0:
                    data[prompt_name] = prompt_val
            elif prompt_name == 'extra_vars':
                if self.extra_data:
                    if display:
                        data[prompt_name] = self.display_extra_data()
                    else:
                        data[prompt_name] = self.extra_data
                if self.survey_passwords and not display:
                    data['survey_passwords'] = self.survey_passwords
            else:
                prompt_val = getattr(self, prompt_name)
                if prompt_val is not None:
                    data[prompt_name] = prompt_val
        return data

    def display_extra_data(self):
        '''
        Hides fields marked as passwords in survey.
        '''
        if self.survey_passwords:
            extra_data = parse_yaml_or_json(self.extra_data).copy()
            for key, value in self.survey_passwords.items():
                if key in extra_data:
                    extra_data[key] = value
            return extra_data
        else:
            return self.extra_data

    @property
    def _credential(self):
        '''
        Only used for workflow nodes to support backward compatibility.
        '''
        try:
            return [
                cred for cred in self.credentials.all()
                if cred.credential_type.kind == 'ssh'
            ][0]
        except IndexError:
            return None

    @property
    def credential(self):
        '''
        Returns an integer so it can be used as IntegerField in serializer
        '''
        cred = self._credential
        if cred is not None:
            return cred.pk
        else:
            return None
コード例 #20
0
ファイル: notifications.py プロジェクト: srflaxu40/awx
class NotificationTemplate(CommonModelNameNotUnique):

    NOTIFICATION_TYPES = [('email', _('Email'), CustomEmailBackend),
                          ('slack', _('Slack'), SlackBackend),
                          ('twilio', _('Twilio'), TwilioBackend),
                          ('pagerduty', _('Pagerduty'), PagerDutyBackend),
                          ('grafana', _('Grafana'), GrafanaBackend),
                          ('hipchat', _('HipChat'), HipChatBackend),
                          ('webhook', _('Webhook'), WebhookBackend),
                          ('mattermost', _('Mattermost'), MattermostBackend),
                          ('rocketchat', _('Rocket.Chat'), RocketChatBackend),
                          ('irc', _('IRC'), IrcBackend)]
    NOTIFICATION_TYPE_CHOICES = sorted([(x[0], x[1])
                                        for x in NOTIFICATION_TYPES])
    CLASS_FOR_NOTIFICATION_TYPE = dict([(x[0], x[2])
                                        for x in NOTIFICATION_TYPES])

    class Meta:
        app_label = 'main'
        unique_together = ('organization', 'name')
        ordering = ("name", )

    organization = models.ForeignKey(
        'Organization',
        blank=False,
        null=True,
        on_delete=models.CASCADE,
        related_name='notification_templates',
    )

    notification_type = models.CharField(
        max_length=32,
        choices=NOTIFICATION_TYPE_CHOICES,
    )

    notification_configuration = prevent_search(JSONField(blank=False))

    def default_messages():
        return {
            'started': None,
            'success': None,
            'error': None,
            'workflow_approval': None
        }

    messages = JSONField(
        null=True,
        blank=True,
        default=default_messages,
        help_text=_('Optional custom messages for notification template.'))

    def has_message(self, condition):
        potential_template = self.messages.get(condition, {})
        if potential_template == {}:
            return False
        if potential_template.get('message', {}) == {}:
            return False
        return True

    def get_message(self, condition):
        return self.messages.get(condition, {})

    def get_absolute_url(self, request=None):
        return reverse('api:notification_template_detail',
                       kwargs={'pk': self.pk},
                       request=request)

    @property
    def notification_class(self):
        return self.CLASS_FOR_NOTIFICATION_TYPE[self.notification_type]

    def save(self, *args, **kwargs):
        new_instance = not bool(self.pk)
        update_fields = kwargs.get('update_fields', [])

        # preserve existing notification messages if not overwritten by new messages
        if not new_instance:
            old_nt = NotificationTemplate.objects.get(pk=self.id)
            old_messages = old_nt.messages
            new_messages = self.messages

            def merge_messages(local_old_messages, local_new_messages,
                               local_event):
                if local_new_messages.get(local_event,
                                          {}) and local_old_messages.get(
                                              local_event, {}):
                    local_old_event_msgs = local_old_messages[local_event]
                    local_new_event_msgs = local_new_messages[local_event]
                    for msg_type in ['message', 'body']:
                        if msg_type not in local_new_event_msgs and local_old_event_msgs.get(
                                msg_type, None):
                            local_new_event_msgs[
                                msg_type] = local_old_event_msgs[msg_type]

            if old_messages is not None and new_messages is not None:
                for event in ('started', 'success', 'error',
                              'workflow_approval'):
                    if not new_messages.get(event, {}) and old_messages.get(
                            event, {}):
                        new_messages[event] = old_messages[event]
                        continue

                    if event == 'workflow_approval' and old_messages.get(
                            'workflow_approval', None):
                        new_messages.setdefault('workflow_approval', {})
                        for subevent in ('running', 'approved', 'timed_out',
                                         'denied'):
                            old_wfa_messages = old_messages[
                                'workflow_approval']
                            new_wfa_messages = new_messages[
                                'workflow_approval']
                            if not new_wfa_messages.get(
                                    subevent, {}) and old_wfa_messages.get(
                                        subevent, {}):
                                new_wfa_messages[subevent] = old_wfa_messages[
                                    subevent]
                                continue
                            if old_wfa_messages:
                                merge_messages(old_wfa_messages,
                                               new_wfa_messages, subevent)
                    else:
                        merge_messages(old_messages, new_messages, event)
                    new_messages.setdefault(event, None)

        for field in filter(
                lambda x: self.notification_class.init_parameters[x]['type'] ==
                "password", self.notification_class.init_parameters):
            if self.notification_configuration[field].startswith(
                    "$encrypted$"):
                continue
            if new_instance:
                value = self.notification_configuration[field]
                setattr(self, '_saved_{}_{}'.format("config", field), value)
                self.notification_configuration[field] = ''
            else:
                encrypted = encrypt_field(self,
                                          'notification_configuration',
                                          subfield=field)
                self.notification_configuration[field] = encrypted
                if 'notification_configuration' not in update_fields:
                    update_fields.append('notification_configuration')
        super(NotificationTemplate, self).save(*args, **kwargs)
        if new_instance:
            update_fields = []
            for field in filter(
                    lambda x: self.notification_class.init_parameters[x][
                        'type'] == "password",
                    self.notification_class.init_parameters):
                saved_value = getattr(self,
                                      '_saved_{}_{}'.format("config",
                                                            field), '')
                self.notification_configuration[field] = saved_value
                if 'notification_configuration' not in update_fields:
                    update_fields.append('notification_configuration')
            self.save(update_fields=update_fields)

    @property
    def recipients(self):
        return self.notification_configuration[
            self.notification_class.recipient_parameter]

    def generate_notification(self, msg, body):
        notification = Notification(notification_template=self,
                                    notification_type=self.notification_type,
                                    recipients=smart_str(self.recipients),
                                    subject=msg,
                                    body=body)
        notification.save()
        return notification

    def send(self, subject, body):
        for field in filter(
                lambda x: self.notification_class.init_parameters[x]['type'] ==
                "password", self.notification_class.init_parameters):
            if field in self.notification_configuration:
                self.notification_configuration[field] = decrypt_field(
                    self, 'notification_configuration', subfield=field)
        recipients = self.notification_configuration.pop(
            self.notification_class.recipient_parameter)
        if not isinstance(recipients, list):
            recipients = [recipients]
        sender = self.notification_configuration.pop(
            self.notification_class.sender_parameter, None)
        notification_configuration = deepcopy(self.notification_configuration)
        for field, params in self.notification_class.init_parameters.items():
            if field not in notification_configuration:
                if 'default' in params:
                    notification_configuration[field] = params['default']
        backend_obj = self.notification_class(**notification_configuration)
        notification_obj = EmailMessage(subject, backend_obj.format_body(body),
                                        sender, recipients)
        with set_environ(**settings.AWX_TASK_ENV):
            return backend_obj.send_messages([notification_obj])

    def display_notification_configuration(self):
        field_val = self.notification_configuration.copy()
        for field in self.notification_class.init_parameters:
            if field in field_val and force_text(
                    field_val[field]).startswith('$encrypted$'):
                field_val[field] = '$encrypted$'
        return field_val
コード例 #21
0
ファイル: workflow.py プロジェクト: wtcross/awx
class WorkflowJobNode(WorkflowNodeBase):
    job = models.OneToOneField(
        'UnifiedJob',
        related_name='unified_job_node',
        blank=True,
        null=True,
        default=None,
        on_delete=models.SET_NULL,
    )
    workflow_job = models.ForeignKey(
        'WorkflowJob',
        related_name='workflow_job_nodes',
        blank=True,
        null=True,
        default=None,
        on_delete=models.CASCADE,
    )
    ancestor_artifacts = JSONField(
        blank=True,
        default={},
        editable=False,
    )

    def get_absolute_url(self, request=None):
        return reverse('api:workflow_job_node_detail',
                       kwargs={'pk': self.pk},
                       request=request)

    def get_job_kwargs(self):
        '''
        In advance of creating a new unified job as part of a workflow,
        this method builds the attributes to use
        It alters the node by saving its updated version of
        ancestor_artifacts, making it available to subsequent nodes.
        '''
        # reject/accept prompted fields
        data = {}
        ujt_obj = self.unified_job_template
        if ujt_obj is not None:
            # MERGE note: move this to prompts_dict method on node when merging
            # with the workflow inventory branch
            prompts_data = self.prompts_dict()
            if isinstance(ujt_obj, WorkflowJobTemplate):
                if self.workflow_job.extra_vars:
                    prompts_data.setdefault('extra_vars', {})
                    prompts_data['extra_vars'].update(
                        self.workflow_job.extra_vars_dict)
            accepted_fields, ignored_fields, errors = ujt_obj._accept_or_ignore_job_kwargs(
                **prompts_data)
            if errors:
                logger.info(
                    _('Bad launch configuration starting template {template_pk} as part of '
                      'workflow {workflow_pk}. Errors:\n{error_text}').format(
                          template_pk=ujt_obj.pk,
                          workflow_pk=self.pk,
                          error_text=errors))
            data.update(
                accepted_fields)  # missing fields are handled in the scheduler
            try:
                # config saved on the workflow job itself
                wj_config = self.workflow_job.launch_config
            except ObjectDoesNotExist:
                wj_config = None
            if wj_config:
                accepted_fields, ignored_fields, errors = ujt_obj._accept_or_ignore_job_kwargs(
                    **wj_config.prompts_dict())
                accepted_fields.pop(
                    'extra_vars',
                    None)  # merge handled with other extra_vars later
                data.update(accepted_fields)
        # build ancestor artifacts, save them to node model for later
        aa_dict = {}
        is_root_node = True
        for parent_node in self.get_parent_nodes():
            is_root_node = False
            aa_dict.update(parent_node.ancestor_artifacts)
            if parent_node.job and hasattr(parent_node.job, 'artifacts'):
                aa_dict.update(parent_node.job.artifacts)
        if aa_dict and not is_root_node:
            self.ancestor_artifacts = aa_dict
            self.save(update_fields=['ancestor_artifacts'])
        # process password list
        password_dict = {}
        if '_ansible_no_log' in aa_dict:
            for key in aa_dict:
                if key != '_ansible_no_log':
                    password_dict[key] = REPLACE_STR
        if self.workflow_job.survey_passwords:
            password_dict.update(self.workflow_job.survey_passwords)
        if self.survey_passwords:
            password_dict.update(self.survey_passwords)
        if password_dict:
            data['survey_passwords'] = password_dict
        # process extra_vars
        extra_vars = data.get('extra_vars', {})
        if aa_dict:
            functional_aa_dict = copy(aa_dict)
            functional_aa_dict.pop('_ansible_no_log', None)
            extra_vars.update(functional_aa_dict)
        # Workflow Job extra_vars higher precedence than ancestor artifacts
        if ujt_obj and isinstance(ujt_obj, JobTemplate):
            if self.workflow_job and self.workflow_job.extra_vars:
                extra_vars.update(self.workflow_job.extra_vars_dict)
        if extra_vars:
            data['extra_vars'] = extra_vars
        # ensure that unified jobs created by WorkflowJobs are marked
        data['_eager_fields'] = {'launch_type': 'workflow'}
        # Extra processing in the case that this is a slice job
        if 'job_slice' in self.ancestor_artifacts and is_root_node:
            data['_eager_fields']['allow_simultaneous'] = True
            data['_eager_fields'][
                'job_slice_number'] = self.ancestor_artifacts['job_slice']
            data['_eager_fields'][
                'job_slice_count'] = self.workflow_job.workflow_job_nodes.count(
                )
            data['_prevent_slicing'] = True
        return data
コード例 #22
0
ファイル: events.py プロジェクト: tiagodread/awx
class BaseCommandEvent(CreatedModifiedModel):
    """
    An event/message logged from a command for each host.
    """

    VALID_KEYS = [
        'event_data', 'created', 'counter', 'uuid', 'stdout', 'start_line',
        'end_line', 'verbosity'
    ]

    class Meta:
        abstract = True

    event_data = JSONField(
        blank=True,
        default=dict,
    )
    uuid = models.CharField(
        max_length=1024,
        default='',
        editable=False,
    )
    counter = models.PositiveIntegerField(
        default=0,
        editable=False,
    )
    stdout = models.TextField(
        default='',
        editable=False,
    )
    verbosity = models.PositiveIntegerField(
        default=0,
        editable=False,
    )
    start_line = models.PositiveIntegerField(
        default=0,
        editable=False,
    )
    end_line = models.PositiveIntegerField(
        default=0,
        editable=False,
    )
    created = models.DateTimeField(
        null=True,
        default=None,
        editable=False,
    )
    modified = models.DateTimeField(
        default=None,
        editable=False,
        db_index=True,
    )

    def __str__(self):
        return u'%s @ %s' % (self.get_event_display(),
                             self.created.isoformat())

    @classmethod
    def create_from_data(cls, **kwargs):
        #
        # ⚠️  D-D-D-DANGER ZONE ⚠️
        # This function is called by the callback receiver *once* for *every
        # event* emitted by Ansible as a playbook runs.  That means that
        # changes to this function are _very_ susceptible to introducing
        # performance regressions (which the user will experience as "my
        # playbook stdout takes too long to show up"), *especially* code which
        # might invoke additional database queries per event.
        #
        # Proceed with caution!
        #
        # Convert the datetime for the event's creation
        # appropriately, and include a time zone for it.
        #
        # In the event of any issue, throw it out, and Django will just save
        # the current time.
        try:
            if not isinstance(kwargs['created'], datetime.datetime):
                kwargs['created'] = parse_datetime(kwargs['created'])
            if not kwargs['created'].tzinfo:
                kwargs['created'] = kwargs['created'].replace(tzinfo=utc)
        except (KeyError, ValueError):
            kwargs.pop('created', None)

        sanitize_event_keys(kwargs, cls.VALID_KEYS)
        kwargs.pop('workflow_job_id', None)
        event = cls(**kwargs)
        event._update_from_event_data()
        return event

    def get_event_display(self):
        """
        Needed for __unicode__
        """
        return self.event

    def get_event_display2(self):
        return self.get_event_display()

    def get_host_status_counts(self):
        return create_host_status_counts(getattr(self, 'event_data', {}))

    def _update_from_event_data(self):
        pass
コード例 #23
0
class LaunchTimeConfigBase(BaseModel):
    '''
    Needed as separate class from LaunchTimeConfig because some models
    use `extra_data` and some use `extra_vars`. We cannot change the API,
    so we force fake it in the model definitions
     - model defines extra_vars - use this class
     - model needs to use extra data - use LaunchTimeConfig
    Use this for models which are SurveyMixins and UnifiedJobs or Templates
    '''
    class Meta:
        abstract = True

    # Prompting-related fields that have to be handled as special cases
    inventory = models.ForeignKey(
        'Inventory',
        related_name='%(class)ss',
        blank=True,
        null=True,
        default=None,
        on_delete=models.SET_NULL,
    )
    # All standard fields are stored in this dictionary field
    # This is a solution to the nullable CharField problem, specific to prompting
    char_prompts = JSONField(blank=True, default={})

    def prompts_dict(self, display=False):
        data = {}
        # Some types may have different prompts, but always subset of JT prompts
        for prompt_name in JobTemplate.get_ask_mapping().keys():
            try:
                field = self._meta.get_field(prompt_name)
            except FieldDoesNotExist:
                field = None
            if isinstance(field, models.ManyToManyField):
                if not self.pk:
                    continue  # unsaved object can't have related many-to-many
                prompt_val = set(getattr(self, prompt_name).all())
                if len(prompt_val) > 0:
                    data[prompt_name] = prompt_val
            elif prompt_name == 'extra_vars':
                if self.extra_vars:
                    if display:
                        data[prompt_name] = self.display_extra_vars()
                    else:
                        data[prompt_name] = self.extra_vars
                if self.survey_passwords and not display:
                    data['survey_passwords'] = self.survey_passwords
            else:
                prompt_val = getattr(self, prompt_name)
                if prompt_val is not None:
                    data[prompt_name] = prompt_val
        return data

    def display_extra_vars(self):
        '''
        Hides fields marked as passwords in survey.
        '''
        if self.survey_passwords:
            extra_vars = parse_yaml_or_json(self.extra_vars).copy()
            for key, value in self.survey_passwords.items():
                if key in extra_vars:
                    extra_vars[key] = value
            return extra_vars
        else:
            return self.extra_vars

    def display_extra_data(self):
        return self.display_extra_vars()

    @property
    def _credential(self):
        '''
        Only used for workflow nodes to support backward compatibility.
        '''
        try:
            return [
                cred for cred in self.credentials.all()
                if cred.credential_type.kind == 'ssh'
            ][0]
        except IndexError:
            return None

    @property
    def credential(self):
        '''
        Returns an integer so it can be used as IntegerField in serializer
        '''
        cred = self._credential
        if cred is not None:
            return cred.pk
        else:
            return None
コード例 #24
0
class InstanceGroup(HasPolicyEditsMixin, BaseModel, RelatedJobsMixin):
    """A model representing a Queue/Group of AWX Instances."""

    objects = InstanceGroupManager()

    name = models.CharField(max_length=250, unique=True)
    created = models.DateTimeField(auto_now_add=True)
    modified = models.DateTimeField(auto_now=True)
    instances = models.ManyToManyField(
        'Instance',
        related_name='rampart_groups',
        editable=False,
        help_text=_('Instances that are members of this InstanceGroup'),
    )
    is_container_group = models.BooleanField(default=False)
    credential = models.ForeignKey(
        'Credential',
        related_name='%(class)ss',
        blank=True,
        null=True,
        default=None,
        on_delete=models.SET_NULL,
    )
    pod_spec_override = prevent_search(
        models.TextField(
            blank=True,
            default='',
        ))
    policy_instance_percentage = models.IntegerField(
        default=0,
        help_text=_(
            "Percentage of Instances to automatically assign to this group"))
    policy_instance_minimum = models.IntegerField(
        default=0,
        help_text=
        _("Static minimum number of Instances to automatically assign to this group"
          ))
    policy_instance_list = JSONField(
        default=[],
        blank=True,
        help_text=
        _("List of exact-match Instances that will always be automatically assigned to this group"
          ))

    POLICY_FIELDS = frozenset(
        ('policy_instance_list', 'policy_instance_minimum',
         'policy_instance_percentage'))

    def get_absolute_url(self, request=None):
        return reverse('api:instance_group_detail',
                       kwargs={'pk': self.pk},
                       request=request)

    @property
    def capacity(self):
        return sum(inst.capacity for inst in self.instances.all())

    @property
    def jobs_running(self):
        return UnifiedJob.objects.filter(status__in=('running', 'waiting'),
                                         instance_group=self).count()

    @property
    def jobs_total(self):
        return UnifiedJob.objects.filter(instance_group=self).count()

    '''
    RelatedJobsMixin
    '''

    def _get_related_jobs(self):
        return UnifiedJob.objects.filter(instance_group=self)

    class Meta:
        app_label = 'main'

    @staticmethod
    def fit_task_to_most_remaining_capacity_instance(task, instances):
        instance_most_capacity = None
        for i in instances:
            if i.node_type not in (task.capacity_type, 'hybrid'):
                continue
            if i.remaining_capacity >= task.task_impact and (
                    instance_most_capacity is None or i.remaining_capacity >
                    instance_most_capacity.remaining_capacity):
                instance_most_capacity = i
        return instance_most_capacity

    @staticmethod
    def find_largest_idle_instance(instances, capacity_type='execution'):
        largest_instance = None
        for i in instances:
            if i.node_type not in (capacity_type, 'hybrid'):
                continue
            if i.jobs_running == 0:
                if largest_instance is None:
                    largest_instance = i
                elif i.capacity > largest_instance.capacity:
                    largest_instance = i
        return largest_instance

    def set_default_policy_fields(self):
        self.policy_instance_list = []
        self.policy_instance_minimum = 0
        self.policy_instance_percentage = 0
コード例 #25
0
ファイル: events.py プロジェクト: sky-joker/awx-arm64arch
class BasePlaybookEvent(CreatedModifiedModel):
    '''
    An event/message logged from a playbook callback for each host.
    '''

    VALID_KEYS = [
        'event', 'event_data', 'playbook', 'play', 'role', 'task', 'created',
        'counter', 'uuid', 'stdout', 'parent_uuid', 'start_line', 'end_line',
        'verbosity'
    ]

    class Meta:
        abstract = True

    # Playbook events will be structured to form the following hierarchy:
    # - playbook_on_start (once for each playbook file)
    #   - playbook_on_vars_prompt (for each play, but before play starts, we
    #     currently don't handle responding to these prompts)
    #   - playbook_on_play_start (once for each play)
    #     - playbook_on_import_for_host (not logged, not used for v2)
    #     - playbook_on_not_import_for_host (not logged, not used for v2)
    #     - playbook_on_no_hosts_matched
    #     - playbook_on_no_hosts_remaining
    #     - playbook_on_include (only v2 - only used for handlers?)
    #     - playbook_on_setup (not used for v2)
    #       - runner_on*
    #     - playbook_on_task_start (once for each task within a play)
    #       - runner_on_failed
    #       - runner_on_ok
    #       - runner_on_error (not used for v2)
    #       - runner_on_skipped
    #       - runner_on_unreachable
    #       - runner_on_no_hosts (not used for v2)
    #       - runner_on_async_poll (not used for v2)
    #       - runner_on_async_ok (not used for v2)
    #       - runner_on_async_failed (not used for v2)
    #       - runner_on_file_diff (v2 event is v2_on_file_diff)
    #       - runner_item_on_ok (v2 only)
    #       - runner_item_on_failed (v2 only)
    #       - runner_item_on_skipped (v2 only)
    #       - runner_retry (v2 only)
    #     - playbook_on_notify (once for each notification from the play, not used for v2)
    #   - playbook_on_stats

    EVENT_TYPES = [
        # (level, event, verbose name, failed)
        (3, 'runner_on_failed', _('Host Failed'), True),
        (3, 'runner_on_ok', _('Host OK'), False),
        (3, 'runner_on_error', _('Host Failure'), True),
        (3, 'runner_on_skipped', _('Host Skipped'), False),
        (3, 'runner_on_unreachable', _('Host Unreachable'), True),
        (3, 'runner_on_no_hosts', _('No Hosts Remaining'), False),
        (3, 'runner_on_async_poll', _('Host Polling'), False),
        (3, 'runner_on_async_ok', _('Host Async OK'), False),
        (3, 'runner_on_async_failed', _('Host Async Failure'), True),
        (3, 'runner_item_on_ok', _('Item OK'), False),
        (3, 'runner_item_on_failed', _('Item Failed'), True),
        (3, 'runner_item_on_skipped', _('Item Skipped'), False),
        (3, 'runner_retry', _('Host Retry'), False),
        # Tower does not yet support --diff mode.
        (3, 'runner_on_file_diff', _('File Difference'), False),
        (0, 'playbook_on_start', _('Playbook Started'), False),
        (2, 'playbook_on_notify', _('Running Handlers'), False),
        (2, 'playbook_on_include', _('Including File'), False),
        (2, 'playbook_on_no_hosts_matched', _('No Hosts Matched'), False),
        (2, 'playbook_on_no_hosts_remaining', _('No Hosts Remaining'), False),
        (2, 'playbook_on_task_start', _('Task Started'), False),
        # Tower does not yet support vars_prompt (and will probably hang :)
        (1, 'playbook_on_vars_prompt', _('Variables Prompted'), False),
        (2, 'playbook_on_setup', _('Gathering Facts'), False),
        (2, 'playbook_on_import_for_host', _('internal: on Import for Host'),
         False),
        (2, 'playbook_on_not_import_for_host',
         _('internal: on Not Import for Host'), False),
        (1, 'playbook_on_play_start', _('Play Started'), False),
        (1, 'playbook_on_stats', _('Playbook Complete'), False),

        # Additional event types for captured stdout not directly related to
        # playbook or runner events.
        (0, 'debug', _('Debug'), False),
        (0, 'verbose', _('Verbose'), False),
        (0, 'deprecated', _('Deprecated'), False),
        (0, 'warning', _('Warning'), False),
        (0, 'system_warning', _('System Warning'), False),
        (0, 'error', _('Error'), True),
    ]
    FAILED_EVENTS = [x[1] for x in EVENT_TYPES if x[3]]
    EVENT_CHOICES = [(x[1], x[2]) for x in EVENT_TYPES]
    LEVEL_FOR_EVENT = dict([(x[1], x[0]) for x in EVENT_TYPES])

    event = models.CharField(
        max_length=100,
        choices=EVENT_CHOICES,
    )
    event_data = JSONField(
        blank=True,
        default=dict,
    )
    failed = models.BooleanField(
        default=False,
        editable=False,
    )
    changed = models.BooleanField(
        default=False,
        editable=False,
    )
    uuid = models.CharField(
        max_length=1024,
        default='',
        editable=False,
    )
    playbook = models.CharField(
        max_length=1024,
        default='',
        editable=False,
    )
    play = models.CharField(
        max_length=1024,
        default='',
        editable=False,
    )
    role = models.CharField(
        max_length=1024,
        default='',
        editable=False,
    )
    task = models.CharField(
        max_length=1024,
        default='',
        editable=False,
    )
    counter = models.PositiveIntegerField(
        default=0,
        editable=False,
    )
    stdout = models.TextField(
        default='',
        editable=False,
    )
    verbosity = models.PositiveIntegerField(
        default=0,
        editable=False,
    )
    start_line = models.PositiveIntegerField(
        default=0,
        editable=False,
    )
    end_line = models.PositiveIntegerField(
        default=0,
        editable=False,
    )
    created = models.DateTimeField(
        null=True,
        default=None,
        editable=False,
        db_index=True,
    )

    @property
    def event_level(self):
        return self.LEVEL_FOR_EVENT.get(self.event, 0)

    def get_host_status_counts(self):
        return create_host_status_counts(getattr(self, 'event_data', {}))

    def get_event_display2(self):
        msg = self.get_event_display()
        if self.event == 'playbook_on_play_start':
            if self.play:
                msg = "%s (%s)" % (msg, self.play)
        elif self.event == 'playbook_on_task_start':
            if self.task:
                if self.event_data.get('is_conditional', False):
                    msg = 'Handler Notified'
                if self.role:
                    msg = '%s (%s | %s)' % (msg, self.role, self.task)
                else:
                    msg = "%s (%s)" % (msg, self.task)

        # Change display for runner events triggered by async polling.  Some of
        # these events may not show in most cases, due to filterting them out
        # of the job event queryset returned to the user.
        res = self.event_data.get('res', {})
        # Fix for existing records before we had added the workaround on save
        # to change async_ok to async_failed.
        if self.event == 'runner_on_async_ok':
            try:
                if res.get('failed', False) or res.get('rc', 0) != 0:
                    msg = 'Host Async Failed'
            except (AttributeError, TypeError):
                pass
        # Runner events with ansible_job_id are part of async starting/polling.
        if self.event in ('runner_on_ok', 'runner_on_failed'):
            try:
                module_name = res['invocation']['module_name']
                job_id = res['ansible_job_id']
            except (TypeError, KeyError, AttributeError):
                module_name = None
                job_id = None
            if module_name and job_id:
                if module_name == 'async_status':
                    msg = 'Host Async Checking'
                else:
                    msg = 'Host Async Started'
        # Handle both 1.2 on_failed and 1.3+ on_async_failed events when an
        # async task times out.
        if self.event in ('runner_on_failed', 'runner_on_async_failed'):
            try:
                if res['msg'] == 'timed out':
                    msg = 'Host Async Timeout'
            except (TypeError, KeyError, AttributeError):
                pass
        return msg

    def _update_from_event_data(self):
        # Update event model fields from event data.
        updated_fields = set()
        event_data = self.event_data
        res = event_data.get('res', None)
        if self.event in self.FAILED_EVENTS and not event_data.get(
                'ignore_errors', False):
            self.failed = True
            updated_fields.add('failed')
        if isinstance(res, dict):
            if res.get('changed', False):
                self.changed = True
                updated_fields.add('changed')
        if self.event == 'playbook_on_stats':
            try:
                failures_dict = event_data.get('failures', {})
                dark_dict = event_data.get('dark', {})
                self.failed = bool(
                    sum(failures_dict.values()) + sum(dark_dict.values()))
                updated_fields.add('failed')
                changed_dict = event_data.get('changed', {})
                self.changed = bool(sum(changed_dict.values()))
                updated_fields.add('changed')
            except (AttributeError, TypeError):
                pass
        for field in ('playbook', 'play', 'task', 'role'):
            value = force_text(event_data.get(field, '')).strip()
            if value != getattr(self, field):
                setattr(self, field, value)
                updated_fields.add(field)
        return updated_fields

    @classmethod
    def create_from_data(cls, **kwargs):
        pk = None
        for key in ('job_id', 'project_update_id'):
            if key in kwargs:
                pk = key
        if pk is None:
            # payload must contain either a job_id or a project_update_id
            return

        # Convert the datetime for the job event's creation appropriately,
        # and include a time zone for it.
        #
        # In the event of any issue, throw it out, and Django will just save
        # the current time.
        try:
            if not isinstance(kwargs['created'], datetime.datetime):
                kwargs['created'] = parse_datetime(kwargs['created'])
            if not kwargs['created'].tzinfo:
                kwargs['created'] = kwargs['created'].replace(tzinfo=utc)
        except (KeyError, ValueError):
            kwargs.pop('created', None)

        sanitize_event_keys(kwargs, cls.VALID_KEYS)
        job_event = cls.objects.create(**kwargs)
        analytics_logger.info(
            'Event data saved.',
            extra=dict(python_objects=dict(job_event=job_event)))
        return job_event

    @property
    def job_verbosity(self):
        return 0

    def save(self, *args, **kwargs):
        # If update_fields has been specified, add our field names to it,
        # if it hasn't been specified, then we're just doing a normal save.
        update_fields = kwargs.get('update_fields', [])
        # Update model fields and related objects unless we're only updating
        # failed/changed flags triggered from a child event.
        from_parent_update = kwargs.pop('from_parent_update', False)
        if not from_parent_update:
            # Update model fields from event data.
            updated_fields = self._update_from_event_data()
            for field in updated_fields:
                if field not in update_fields:
                    update_fields.append(field)

            # Update host related field from host_name.
            if hasattr(self, 'job') and not self.host_id and self.host_name:
                if self.job.inventory.kind == 'smart':
                    # optimization to avoid calling inventory.hosts, which
                    # can take a long time to run under some circumstances
                    from awx.main.models.inventory import SmartInventoryMembership
                    membership = SmartInventoryMembership.objects.filter(
                        inventory=self.job.inventory,
                        host__name=self.host_name).first()
                    if membership:
                        host_id = membership.host_id
                    else:
                        host_id = None
                else:
                    host_qs = self.job.inventory.hosts.filter(
                        name=self.host_name)
                    host_id = host_qs.only('id').values_list(
                        'id', flat=True).first()
                if host_id != self.host_id:
                    self.host_id = host_id
                    if 'host_id' not in update_fields:
                        update_fields.append('host_id')
        super(BasePlaybookEvent, self).save(*args, **kwargs)

        # Update related objects after this event is saved.
        if hasattr(self, 'job') and not from_parent_update:
            if getattr(settings, 'CAPTURE_JOB_EVENT_HOSTS', False):
                self._update_hosts()
            if self.parent_uuid:
                kwargs = {}
                if self.changed is True:
                    kwargs['changed'] = True
                if self.failed is True:
                    kwargs['failed'] = True
                if kwargs:
                    JobEvent.objects.filter(
                        job_id=self.job_id,
                        uuid=self.parent_uuid).update(**kwargs)

            if self.event == 'playbook_on_stats':
                hostnames = self._hostnames()
                self._update_host_summary_from_stats(hostnames)
                try:
                    self.job.inventory.update_computed_fields()
                except DatabaseError:
                    logger.exception(
                        'Computed fields database error saving event {}'.
                        format(self.pk))
コード例 #26
0
ファイル: activity_stream.py プロジェクト: w3bservice/awx
class ActivityStream(models.Model):
    '''
    Model used to describe activity stream (audit) events
    '''
    class Meta:
        app_label = 'main'
        ordering = ('pk', )

    OPERATION_CHOICES = [
        ('create', _('Entity Created')), ('update', _("Entity Updated")),
        ('delete', _("Entity Deleted")),
        ('associate', _("Entity Associated with another Entity")),
        ('disassociate', _("Entity was Disassociated with another Entity"))
    ]

    actor = models.ForeignKey('auth.User',
                              null=True,
                              on_delete=models.SET_NULL,
                              related_name='activity_stream')
    operation = models.CharField(max_length=13, choices=OPERATION_CHOICES)
    timestamp = models.DateTimeField(auto_now_add=True)
    changes = models.TextField(blank=True)

    object_relationship_type = models.TextField(blank=True)
    object1 = models.TextField()
    object2 = models.TextField()

    user = models.ManyToManyField("auth.User", blank=True)
    organization = models.ManyToManyField("Organization", blank=True)
    inventory = models.ManyToManyField("Inventory", blank=True)
    host = models.ManyToManyField("Host", blank=True)
    group = models.ManyToManyField("Group", blank=True)
    inventory_source = models.ManyToManyField("InventorySource", blank=True)
    inventory_update = models.ManyToManyField("InventoryUpdate", blank=True)
    credential = models.ManyToManyField("Credential", blank=True)
    credential_type = models.ManyToManyField("CredentialType", blank=True)
    team = models.ManyToManyField("Team", blank=True)
    project = models.ManyToManyField("Project", blank=True)
    project_update = models.ManyToManyField("ProjectUpdate", blank=True)
    job_template = models.ManyToManyField("JobTemplate", blank=True)
    job = models.ManyToManyField("Job", blank=True)
    workflow_job_template_node = models.ManyToManyField(
        "WorkflowJobTemplateNode", blank=True)
    workflow_job_node = models.ManyToManyField("WorkflowJobNode", blank=True)
    workflow_job_template = models.ManyToManyField("WorkflowJobTemplate",
                                                   blank=True)
    workflow_job = models.ManyToManyField("WorkflowJob", blank=True)
    unified_job_template = models.ManyToManyField(
        "UnifiedJobTemplate",
        blank=True,
        related_name='activity_stream_as_unified_job_template+')
    unified_job = models.ManyToManyField(
        "UnifiedJob",
        blank=True,
        related_name='activity_stream_as_unified_job+')
    ad_hoc_command = models.ManyToManyField("AdHocCommand", blank=True)
    schedule = models.ManyToManyField("Schedule", blank=True)
    custom_inventory_script = models.ManyToManyField("CustomInventoryScript",
                                                     blank=True)
    notification_template = models.ManyToManyField("NotificationTemplate",
                                                   blank=True)
    notification = models.ManyToManyField("Notification", blank=True)
    label = models.ManyToManyField("Label", blank=True)
    role = models.ManyToManyField("Role", blank=True)
    instance_group = models.ManyToManyField("InstanceGroup", blank=True)

    setting = JSONField(blank=True)

    def get_absolute_url(self, request=None):
        return reverse('api:activity_stream_detail',
                       kwargs={'pk': self.pk},
                       request=request)

    def save(self, *args, **kwargs):
        # For compatibility with Django 1.4.x, attempt to handle any calls to
        # save that pass update_fields.
        try:
            super(ActivityStream, self).save(*args, **kwargs)
        except TypeError:
            if 'update_fields' not in kwargs:
                raise
            kwargs.pop('update_fields')
            super(ActivityStream, self).save(*args, **kwargs)
コード例 #27
0
ファイル: events.py プロジェクト: tiagodread/awx
class BasePlaybookEvent(CreatedModifiedModel):
    """
    An event/message logged from a playbook callback for each host.
    """

    VALID_KEYS = [
        'event',
        'event_data',
        'playbook',
        'play',
        'role',
        'task',
        'created',
        'counter',
        'uuid',
        'stdout',
        'parent_uuid',
        'start_line',
        'end_line',
        'host_id',
        'host_name',
        'verbosity',
    ]

    class Meta:
        abstract = True

    # Playbook events will be structured to form the following hierarchy:
    # - playbook_on_start (once for each playbook file)
    #   - playbook_on_vars_prompt (for each play, but before play starts, we
    #     currently don't handle responding to these prompts)
    #   - playbook_on_play_start (once for each play)
    #     - playbook_on_import_for_host (not logged, not used for v2)
    #     - playbook_on_not_import_for_host (not logged, not used for v2)
    #     - playbook_on_no_hosts_matched
    #     - playbook_on_no_hosts_remaining
    #     - playbook_on_include (only v2 - only used for handlers?)
    #     - playbook_on_setup (not used for v2)
    #       - runner_on*
    #     - playbook_on_task_start (once for each task within a play)
    #       - runner_on_failed
    #       - runner_on_start
    #       - runner_on_ok
    #       - runner_on_error (not used for v2)
    #       - runner_on_skipped
    #       - runner_on_unreachable
    #       - runner_on_no_hosts (not used for v2)
    #       - runner_on_async_poll (not used for v2)
    #       - runner_on_async_ok (not used for v2)
    #       - runner_on_async_failed (not used for v2)
    #       - runner_on_file_diff (v2 event is v2_on_file_diff)
    #       - runner_item_on_ok (v2 only)
    #       - runner_item_on_failed (v2 only)
    #       - runner_item_on_skipped (v2 only)
    #       - runner_retry (v2 only)
    #     - playbook_on_notify (once for each notification from the play, not used for v2)
    #   - playbook_on_stats

    EVENT_TYPES = [
        # (level, event, verbose name, failed)
        (3, 'runner_on_failed', _('Host Failed'), True),
        (3, 'runner_on_start', _('Host Started'), False),
        (3, 'runner_on_ok', _('Host OK'), False),
        (3, 'runner_on_error', _('Host Failure'), True),
        (3, 'runner_on_skipped', _('Host Skipped'), False),
        (3, 'runner_on_unreachable', _('Host Unreachable'), True),
        (3, 'runner_on_no_hosts', _('No Hosts Remaining'), False),
        (3, 'runner_on_async_poll', _('Host Polling'), False),
        (3, 'runner_on_async_ok', _('Host Async OK'), False),
        (3, 'runner_on_async_failed', _('Host Async Failure'), True),
        (3, 'runner_item_on_ok', _('Item OK'), False),
        (3, 'runner_item_on_failed', _('Item Failed'), True),
        (3, 'runner_item_on_skipped', _('Item Skipped'), False),
        (3, 'runner_retry', _('Host Retry'), False),
        # Tower does not yet support --diff mode.
        (3, 'runner_on_file_diff', _('File Difference'), False),
        (0, 'playbook_on_start', _('Playbook Started'), False),
        (2, 'playbook_on_notify', _('Running Handlers'), False),
        (2, 'playbook_on_include', _('Including File'), False),
        (2, 'playbook_on_no_hosts_matched', _('No Hosts Matched'), False),
        (2, 'playbook_on_no_hosts_remaining', _('No Hosts Remaining'), False),
        (2, 'playbook_on_task_start', _('Task Started'), False),
        # Tower does not yet support vars_prompt (and will probably hang :)
        (1, 'playbook_on_vars_prompt', _('Variables Prompted'), False),
        (2, 'playbook_on_setup', _('Gathering Facts'), False),
        (2, 'playbook_on_import_for_host', _('internal: on Import for Host'),
         False),
        (2, 'playbook_on_not_import_for_host',
         _('internal: on Not Import for Host'), False),
        (1, 'playbook_on_play_start', _('Play Started'), False),
        (1, 'playbook_on_stats', _('Playbook Complete'), False),
        # Additional event types for captured stdout not directly related to
        # playbook or runner events.
        (0, 'debug', _('Debug'), False),
        (0, 'verbose', _('Verbose'), False),
        (0, 'deprecated', _('Deprecated'), False),
        (0, 'warning', _('Warning'), False),
        (0, 'system_warning', _('System Warning'), False),
        (0, 'error', _('Error'), True),
    ]
    FAILED_EVENTS = [x[1] for x in EVENT_TYPES if x[3]]
    EVENT_CHOICES = [(x[1], x[2]) for x in EVENT_TYPES]
    LEVEL_FOR_EVENT = dict([(x[1], x[0]) for x in EVENT_TYPES])

    event = models.CharField(
        max_length=100,
        choices=EVENT_CHOICES,
    )
    event_data = JSONField(
        blank=True,
        default=dict,
    )
    failed = models.BooleanField(
        default=False,
        editable=False,
    )
    changed = models.BooleanField(
        default=False,
        editable=False,
    )
    uuid = models.CharField(
        max_length=1024,
        default='',
        editable=False,
    )
    playbook = models.CharField(
        max_length=1024,
        default='',
        editable=False,
    )
    play = models.CharField(
        max_length=1024,
        default='',
        editable=False,
    )
    role = models.CharField(
        max_length=1024,
        default='',
        editable=False,
    )
    task = models.CharField(
        max_length=1024,
        default='',
        editable=False,
    )
    counter = models.PositiveIntegerField(
        default=0,
        editable=False,
    )
    stdout = models.TextField(
        default='',
        editable=False,
    )
    verbosity = models.PositiveIntegerField(
        default=0,
        editable=False,
    )
    start_line = models.PositiveIntegerField(
        default=0,
        editable=False,
    )
    end_line = models.PositiveIntegerField(
        default=0,
        editable=False,
    )
    created = models.DateTimeField(
        null=True,
        default=None,
        editable=False,
    )
    modified = models.DateTimeField(
        default=None,
        editable=False,
        db_index=True,
    )

    @property
    def event_level(self):
        return self.LEVEL_FOR_EVENT.get(self.event, 0)

    def get_host_status_counts(self):
        return create_host_status_counts(getattr(self, 'event_data', {}))

    def get_event_display2(self):
        msg = self.get_event_display()
        if self.event == 'playbook_on_play_start':
            if self.play:
                msg = "%s (%s)" % (msg, self.play)
        elif self.event == 'playbook_on_task_start':
            if self.task:
                if self.event_data.get('is_conditional', False):
                    msg = 'Handler Notified'
                if self.role:
                    msg = '%s (%s | %s)' % (msg, self.role, self.task)
                else:
                    msg = "%s (%s)" % (msg, self.task)

        # Change display for runner events triggered by async polling.  Some of
        # these events may not show in most cases, due to filterting them out
        # of the job event queryset returned to the user.
        res = self.event_data.get('res', {})
        # Fix for existing records before we had added the workaround on save
        # to change async_ok to async_failed.
        if self.event == 'runner_on_async_ok':
            try:
                if res.get('failed', False) or res.get('rc', 0) != 0:
                    msg = 'Host Async Failed'
            except (AttributeError, TypeError):
                pass
        # Runner events with ansible_job_id are part of async starting/polling.
        if self.event in ('runner_on_ok', 'runner_on_failed'):
            try:
                module_name = res['invocation']['module_name']
                job_id = res['ansible_job_id']
            except (TypeError, KeyError, AttributeError):
                module_name = None
                job_id = None
            if module_name and job_id:
                if module_name == 'async_status':
                    msg = 'Host Async Checking'
                else:
                    msg = 'Host Async Started'
        # Handle both 1.2 on_failed and 1.3+ on_async_failed events when an
        # async task times out.
        if self.event in ('runner_on_failed', 'runner_on_async_failed'):
            try:
                if res['msg'] == 'timed out':
                    msg = 'Host Async Timeout'
            except (TypeError, KeyError, AttributeError):
                pass
        return msg

    def _update_from_event_data(self):
        # Update event model fields from event data.
        event_data = self.event_data
        res = event_data.get('res', None)
        if self.event in self.FAILED_EVENTS and not event_data.get(
                'ignore_errors', False):
            self.failed = True
        if isinstance(res, dict):
            if res.get('changed', False):
                self.changed = True
        if self.event == 'playbook_on_stats':
            try:
                failures_dict = event_data.get('failures', {})
                dark_dict = event_data.get('dark', {})
                self.failed = bool(
                    sum(failures_dict.values()) + sum(dark_dict.values()))
                changed_dict = event_data.get('changed', {})
                self.changed = bool(sum(changed_dict.values()))
            except (AttributeError, TypeError):
                pass

            if isinstance(self, JobEvent):
                try:
                    job = self.job
                except ObjectDoesNotExist:
                    job = None
                if job:
                    hostnames = self._hostnames()
                    self._update_host_summary_from_stats(set(hostnames))
                    if job.inventory:
                        try:
                            job.inventory.update_computed_fields()
                        except DatabaseError:
                            logger.exception(
                                'Computed fields database error saving event {}'
                                .format(self.pk))

                    # find parent links and progagate changed=T and failed=T
                    changed = (
                        job.get_event_queryset().filter(changed=True).exclude(
                            parent_uuid=None).only('parent_uuid').values_list(
                                'parent_uuid', flat=True).distinct())  # noqa
                    failed = (
                        job.get_event_queryset().filter(failed=True).exclude(
                            parent_uuid=None).only('parent_uuid').values_list(
                                'parent_uuid', flat=True).distinct())  # noqa

                    job.get_event_queryset().filter(uuid__in=changed).update(
                        changed=True)
                    job.get_event_queryset().filter(uuid__in=failed).update(
                        failed=True)

                    # send success/failure notifications when we've finished handling the playbook_on_stats event
                    from awx.main.tasks.system import handle_success_and_failure_notifications  # circular import

                    def _send_notifications():
                        handle_success_and_failure_notifications.apply_async(
                            [job.id])

                    connection.on_commit(_send_notifications)

        for field in ('playbook', 'play', 'task', 'role'):
            value = force_text(event_data.get(field, '')).strip()
            if value != getattr(self, field):
                setattr(self, field, value)
        if settings.LOG_AGGREGATOR_ENABLED:
            analytics_logger.info(
                'Event data saved.',
                extra=dict(python_objects=dict(job_event=self)))

    @classmethod
    def create_from_data(cls, **kwargs):
        #
        # ⚠️  D-D-D-DANGER ZONE ⚠️
        # This function is called by the callback receiver *once* for *every
        # event* emitted by Ansible as a playbook runs.  That means that
        # changes to this function are _very_ susceptible to introducing
        # performance regressions (which the user will experience as "my
        # playbook stdout takes too long to show up"), *especially* code which
        # might invoke additional database queries per event.
        #
        # Proceed with caution!
        #
        pk = None
        for key in ('job_id', 'project_update_id'):
            if key in kwargs:
                pk = key
        if pk is None:
            # payload must contain either a job_id or a project_update_id
            return

        # Convert the datetime for the job event's creation appropriately,
        # and include a time zone for it.
        #
        # In the event of any issue, throw it out, and Django will just save
        # the current time.
        try:
            if not isinstance(kwargs['created'], datetime.datetime):
                kwargs['created'] = parse_datetime(kwargs['created'])
            if not kwargs['created'].tzinfo:
                kwargs['created'] = kwargs['created'].replace(tzinfo=utc)
        except (KeyError, ValueError):
            kwargs.pop('created', None)

        # same as above, for job_created
        # TODO: if this approach, identical to above, works, can convert to for loop
        try:
            if not isinstance(kwargs['job_created'], datetime.datetime):
                kwargs['job_created'] = parse_datetime(kwargs['job_created'])
            if not kwargs['job_created'].tzinfo:
                kwargs['job_created'] = kwargs['job_created'].replace(
                    tzinfo=utc)
        except (KeyError, ValueError):
            kwargs.pop('job_created', None)

        host_map = kwargs.pop('host_map', {})

        sanitize_event_keys(kwargs, cls.VALID_KEYS)
        workflow_job_id = kwargs.pop('workflow_job_id', None)
        event = cls(**kwargs)
        if workflow_job_id:
            setattr(event, 'workflow_job_id', workflow_job_id)
        # shouldn't job_created _always_ be present?
        # if it's not, how could we save the event to the db?
        job_created = kwargs.pop('job_created', None)
        if job_created:
            setattr(event, 'job_created', job_created)
        setattr(event, 'host_map', host_map)
        event._update_from_event_data()
        return event

    @property
    def job_verbosity(self):
        return 0
コード例 #28
0
ファイル: events.py プロジェクト: sky-joker/awx-arm64arch
class BaseCommandEvent(CreatedModifiedModel):
    '''
    An event/message logged from a command for each host.
    '''

    VALID_KEYS = [
        'event_data', 'created', 'counter', 'uuid', 'stdout', 'start_line',
        'end_line', 'verbosity'
    ]

    class Meta:
        abstract = True

    event_data = JSONField(
        blank=True,
        default=dict,
    )
    uuid = models.CharField(
        max_length=1024,
        default='',
        editable=False,
    )
    counter = models.PositiveIntegerField(
        default=0,
        editable=False,
    )
    stdout = models.TextField(
        default='',
        editable=False,
    )
    verbosity = models.PositiveIntegerField(
        default=0,
        editable=False,
    )
    start_line = models.PositiveIntegerField(
        default=0,
        editable=False,
    )
    end_line = models.PositiveIntegerField(
        default=0,
        editable=False,
    )

    def __str__(self):
        return u'%s @ %s' % (self.get_event_display(),
                             self.created.isoformat())

    @classmethod
    def create_from_data(cls, **kwargs):
        # Convert the datetime for the event's creation
        # appropriately, and include a time zone for it.
        #
        # In the event of any issue, throw it out, and Django will just save
        # the current time.
        try:
            if not isinstance(kwargs['created'], datetime.datetime):
                kwargs['created'] = parse_datetime(kwargs['created'])
            if not kwargs['created'].tzinfo:
                kwargs['created'] = kwargs['created'].replace(tzinfo=utc)
        except (KeyError, ValueError):
            kwargs.pop('created', None)

        sanitize_event_keys(kwargs, cls.VALID_KEYS)
        return cls.objects.create(**kwargs)

    def get_event_display(self):
        '''
        Needed for __unicode__
        '''
        return self.event

    def get_host_status_counts(self):
        return create_host_status_counts(getattr(self, 'event_data', {}))
コード例 #29
0
ファイル: workflow.py プロジェクト: lp4775/awx
class WorkflowJobNode(WorkflowNodeBase):
    job = models.OneToOneField(
        'UnifiedJob',
        related_name='unified_job_node',
        blank=True,
        null=True,
        default=None,
        on_delete=models.SET_NULL,
    )
    workflow_job = models.ForeignKey(
        'WorkflowJob',
        related_name='workflow_job_nodes',
        blank=True,
        null=True,
        default=None,
        on_delete=models.CASCADE,
    )
    ancestor_artifacts = JSONField(
        blank=True,
        default=dict,
        editable=False,
    )
    do_not_run = models.BooleanField(
        default=False,
        help_text=
        _("Indicates that a job will not be created when True. Workflow runtime "
          "semantics will mark this True if the node is in a path that will "
          "decidedly not be ran. A value of False means the node may not run."
          ),
    )
    identifier = models.CharField(
        max_length=512,
        blank=True,  # blank denotes pre-migration job nodes
        help_text=
        _('An identifier coresponding to the workflow job template node that this node was created from.'
          ),
    )

    class Meta:
        app_label = 'main'
        indexes = [
            models.Index(fields=["identifier", "workflow_job"]),
            models.Index(fields=['identifier']),
        ]

    @property
    def event_processing_finished(self):
        return True

    def get_absolute_url(self, request=None):
        return reverse('api:workflow_job_node_detail',
                       kwargs={'pk': self.pk},
                       request=request)

    def prompts_dict(self, *args, **kwargs):
        r = super(WorkflowJobNode, self).prompts_dict(*args, **kwargs)
        # Explanation - WFJT extra_vars still break pattern, so they are not
        # put through prompts processing, but inventory and others are only accepted
        # if JT prompts for it, so it goes through this mechanism
        if self.workflow_job:
            if self.workflow_job.inventory_id:
                # workflow job inventory takes precedence
                r['inventory'] = self.workflow_job.inventory
            if self.workflow_job.char_prompts:
                r.update(self.workflow_job.char_prompts)
        return r

    def get_job_kwargs(self):
        """
        In advance of creating a new unified job as part of a workflow,
        this method builds the attributes to use
        It alters the node by saving its updated version of
        ancestor_artifacts, making it available to subsequent nodes.
        """
        # reject/accept prompted fields
        data = {}
        ujt_obj = self.unified_job_template
        if ujt_obj is not None:
            # MERGE note: move this to prompts_dict method on node when merging
            # with the workflow inventory branch
            prompts_data = self.prompts_dict()
            if isinstance(ujt_obj, WorkflowJobTemplate):
                if self.workflow_job.extra_vars:
                    prompts_data.setdefault('extra_vars', {})
                    prompts_data['extra_vars'].update(
                        self.workflow_job.extra_vars_dict)
            accepted_fields, ignored_fields, errors = ujt_obj._accept_or_ignore_job_kwargs(
                **prompts_data)
            if errors:
                logger.info(
                    _('Bad launch configuration starting template {template_pk} as part of '
                      'workflow {workflow_pk}. Errors:\n{error_text}').format(
                          template_pk=ujt_obj.pk,
                          workflow_pk=self.pk,
                          error_text=errors))
            data.update(
                accepted_fields)  # missing fields are handled in the scheduler
            try:
                # config saved on the workflow job itself
                wj_config = self.workflow_job.launch_config
            except ObjectDoesNotExist:
                wj_config = None
            if wj_config:
                accepted_fields, ignored_fields, errors = ujt_obj._accept_or_ignore_job_kwargs(
                    **wj_config.prompts_dict())
                accepted_fields.pop(
                    'extra_vars',
                    None)  # merge handled with other extra_vars later
                data.update(accepted_fields)
        # build ancestor artifacts, save them to node model for later
        aa_dict = {}
        is_root_node = True
        for parent_node in self.get_parent_nodes():
            is_root_node = False
            aa_dict.update(parent_node.ancestor_artifacts)
            if parent_node.job and hasattr(parent_node.job, 'artifacts'):
                aa_dict.update(parent_node.job.artifacts)
        if aa_dict and not is_root_node:
            self.ancestor_artifacts = aa_dict
            self.save(update_fields=['ancestor_artifacts'])
        # process password list
        password_dict = {}
        if '_ansible_no_log' in aa_dict:
            for key in aa_dict:
                if key != '_ansible_no_log':
                    password_dict[key] = REPLACE_STR
        if self.workflow_job.survey_passwords:
            password_dict.update(self.workflow_job.survey_passwords)
        if self.survey_passwords:
            password_dict.update(self.survey_passwords)
        if password_dict:
            data['survey_passwords'] = password_dict
        # process extra_vars
        extra_vars = data.get('extra_vars', {})
        if ujt_obj and isinstance(ujt_obj, (JobTemplate, WorkflowJobTemplate)):
            if aa_dict:
                functional_aa_dict = copy(aa_dict)
                functional_aa_dict.pop('_ansible_no_log', None)
                extra_vars.update(functional_aa_dict)
        if ujt_obj and isinstance(ujt_obj, JobTemplate):
            # Workflow Job extra_vars higher precedence than ancestor artifacts
            if self.workflow_job and self.workflow_job.extra_vars:
                extra_vars.update(self.workflow_job.extra_vars_dict)
        if extra_vars:
            data['extra_vars'] = extra_vars
        # ensure that unified jobs created by WorkflowJobs are marked
        data['_eager_fields'] = {'launch_type': 'workflow'}
        if self.workflow_job and self.workflow_job.created_by:
            data['_eager_fields']['created_by'] = self.workflow_job.created_by
        # Extra processing in the case that this is a slice job
        if 'job_slice' in self.ancestor_artifacts and is_root_node:
            data['_eager_fields']['allow_simultaneous'] = True
            data['_eager_fields'][
                'job_slice_number'] = self.ancestor_artifacts['job_slice']
            data['_eager_fields'][
                'job_slice_count'] = self.workflow_job.workflow_job_nodes.count(
                )
            data['_prevent_slicing'] = True
        return data
コード例 #30
0
class Project(UnifiedJobTemplate, ProjectOptions, ResourceMixin,
              CustomVirtualEnvMixin):
    '''
    A project represents a playbook git repo that can access a set of inventories
    '''

    SOFT_UNIQUE_TOGETHER = [('polymorphic_ctype', 'name', 'organization')]
    FIELDS_TO_PRESERVE_AT_COPY = ['labels', 'instance_groups', 'credentials']
    FIELDS_TO_DISCARD_AT_COPY = ['local_path']

    class Meta:
        app_label = 'main'
        ordering = ('id', )

    organization = models.ForeignKey(
        'Organization',
        blank=True,
        null=True,
        on_delete=models.CASCADE,
        related_name='projects',
    )
    scm_delete_on_next_update = models.BooleanField(
        default=False,
        editable=False,
    )
    scm_update_on_launch = models.BooleanField(
        default=False,
        help_text=_(
            'Update the project when a job is launched that uses the project.'
        ),
    )
    scm_update_cache_timeout = models.PositiveIntegerField(
        default=0,
        blank=True,
        help_text=_(
            'The number of seconds after the last project update ran that a new'
            'project update will be launched as a job dependency.'),
    )

    scm_revision = models.CharField(
        max_length=1024,
        blank=True,
        default='',
        editable=False,
        verbose_name=_('SCM Revision'),
        help_text=_('The last revision fetched by a project update'),
    )

    playbook_files = JSONField(
        blank=True,
        default=[],
        editable=False,
        verbose_name=_('Playbook Files'),
        help_text=_('List of playbooks found in the project'),
    )

    inventory_files = JSONField(
        blank=True,
        default=[],
        editable=False,
        verbose_name=_('Inventory Files'),
        help_text=
        _('Suggested list of content that could be Ansible inventory in the project'
          ),
    )

    admin_role = ImplicitRoleField(parent_role=[
        'organization.project_admin_role',
        'singleton:' + ROLE_SINGLETON_SYSTEM_ADMINISTRATOR,
    ])

    use_role = ImplicitRoleField(parent_role='admin_role', )

    update_role = ImplicitRoleField(parent_role='admin_role', )

    read_role = ImplicitRoleField(parent_role=[
        'organization.auditor_role',
        'singleton:' + ROLE_SINGLETON_SYSTEM_AUDITOR,
        'use_role',
        'update_role',
    ])

    @classmethod
    def _get_unified_job_class(cls):
        return ProjectUpdate

    @classmethod
    def _get_unified_job_field_names(cls):
        return set(f.name for f in ProjectOptions._meta.fields) | set(
            ['name', 'description', 'schedule'])

    def save(self, *args, **kwargs):
        new_instance = not bool(self.pk)
        # If update_fields has been specified, add our field names to it,
        # if it hasn't been specified, then we're just doing a normal save.
        update_fields = kwargs.get('update_fields', [])
        skip_update = bool(kwargs.pop('skip_update', False))
        # Check if scm_type or scm_url changes.
        if self.pk:
            project_before = self.__class__.objects.get(pk=self.pk)
            if project_before.scm_type != self.scm_type or project_before.scm_url != self.scm_url:
                self.scm_delete_on_next_update = True
                if 'scm_delete_on_next_update' not in update_fields:
                    update_fields.append('scm_delete_on_next_update')
        # Create auto-generated local path if project uses SCM.
        if self.pk and self.scm_type and not self.local_path.startswith('_'):
            slug_name = slugify(six.text_type(self.name)).replace(u'-', u'_')
            self.local_path = u'_%d__%s' % (int(self.pk), slug_name)
            if 'local_path' not in update_fields:
                update_fields.append('local_path')
        # Do the actual save.
        super(Project, self).save(*args, **kwargs)
        if new_instance:
            update_fields = []
            # Generate local_path for SCM after initial save (so we have a PK).
            if self.scm_type and not self.local_path.startswith('_'):
                update_fields.append('local_path')
            if update_fields:
                from awx.main.signals import disable_activity_stream
                with disable_activity_stream():
                    self.save(update_fields=update_fields)
        # If we just created a new project with SCM, start the initial update.
        if new_instance and self.scm_type and not skip_update:
            self.update()

    def _get_current_status(self):
        if self.scm_type:
            if self.current_job and self.current_job.status:
                return self.current_job.status
            elif not self.last_job:
                return 'never updated'
            # inherit the child job status on failure
            elif self.last_job_failed:
                return self.last_job.status
            # Return the successful status
            else:
                return self.last_job.status
        elif not self.get_project_path():
            return 'missing'
        else:
            return 'ok'

    def _get_last_job_run(self):
        if self.scm_type and self.last_job:
            return self.last_job.finished
        else:
            project_path = self.get_project_path()
            if project_path:
                try:
                    mtime = os.path.getmtime(smart_str(project_path))
                    dt = datetime.datetime.fromtimestamp(mtime)
                    return make_aware(dt, get_default_timezone())
                except os.error:
                    pass

    def _can_update(self):
        return bool(self.scm_type)

    def _update_unified_job_kwargs(self, create_kwargs, kwargs):
        '''
        :param create_kwargs: key-worded arguments to be updated and later used for creating unified job.
        :type create_kwargs: dict
        :param kwargs: request parameters used to override unified job template fields with runtime values.
        :type kwargs: dict
        :return: modified create_kwargs.
        :rtype: dict
        '''
        if self.scm_delete_on_next_update:
            create_kwargs['scm_delete_on_update'] = True
        return create_kwargs

    def create_project_update(self, **kwargs):
        return self.create_unified_job(**kwargs)

    @property
    def cache_timeout_blocked(self):
        if not self.last_job_run:
            return False
        if (self.last_job_run + datetime.timedelta(
                seconds=self.scm_update_cache_timeout)) > now():
            return True
        return False

    @property
    def needs_update_on_launch(self):
        if self.scm_type and self.scm_update_on_launch:
            if not self.last_job_run:
                return True
            if (self.last_job_run + datetime.timedelta(
                    seconds=self.scm_update_cache_timeout)) <= now():
                return True
        return False

    @property
    def notification_templates(self):
        base_notification_templates = NotificationTemplate.objects
        error_notification_templates = list(
            base_notification_templates.filter(
                unifiedjobtemplate_notification_templates_for_errors=self))
        success_notification_templates = list(
            base_notification_templates.filter(
                unifiedjobtemplate_notification_templates_for_success=self))
        any_notification_templates = list(
            base_notification_templates.filter(
                unifiedjobtemplate_notification_templates_for_any=self))
        # Get Organization NotificationTemplates
        if self.organization is not None:
            error_notification_templates = set(
                error_notification_templates + list(
                    base_notification_templates.filter(
                        organization_notification_templates_for_errors=self.
                        organization)))
            success_notification_templates = set(
                success_notification_templates + list(
                    base_notification_templates.filter(
                        organization_notification_templates_for_success=self.
                        organization)))
            any_notification_templates = set(any_notification_templates + list(
                base_notification_templates.filter(
                    organization_notification_templates_for_any=self.
                    organization)))
        return dict(error=list(error_notification_templates),
                    success=list(success_notification_templates),
                    any=list(any_notification_templates))

    def get_absolute_url(self, request=None):
        return reverse('api:project_detail',
                       kwargs={'pk': self.pk},
                       request=request)