def test_credential_migration(old_enc, new_enc, value):
    with mock.patch('awx.main.models.credential.encrypt_field', encrypt_field):
        cred_type = ssh()
        cred_type.save()

        cred = Credential.objects.create(credential_type=cred_type,
                                         inputs=dict(password=value))

    assert cred.password.startswith(old_enc)

    _credentials(apps)
    cred.refresh_from_db()

    assert cred.password.startswith(new_enc)
    assert decrypt_field(cred, 'password') == value

    # This is here for a side-effect.
    # Exception if the encryption type of AESCBC is not properly skipped, ensures
    # our `startswith` calls don't have typos
    _credentials(apps)
Exemple #2
0
 def send(self, subject, body):
     for field in filter(lambda x: self.notification_class.init_parameters[x]['type'] == "password",
                         self.notification_class.init_parameters):
         if field in self.notification_configuration:
             self.notification_configuration[field] = decrypt_field(self,
                                                                    'notification_configuration',
                                                                    subfield=field)
     recipients = self.notification_configuration.pop(self.notification_class.recipient_parameter)
     if not isinstance(recipients, list):
         recipients = [recipients]
     sender = self.notification_configuration.pop(self.notification_class.sender_parameter, None)
     notification_configuration = deepcopy(self.notification_configuration)
     for field, params in self.notification_class.init_parameters.items():
         if field not in notification_configuration:
             if 'default' in params:
                 notification_configuration[field] = params['default']
     backend_obj = self.notification_class(**notification_configuration)
     notification_obj = EmailMessage(subject, backend_obj.format_body(body), sender, recipients)
     with set_environ(**settings.AWX_TASK_ENV):
         return backend_obj.send_messages([notification_obj])
Exemple #3
0
def test_credential_update_with_prior(organization_factory,
                                      credentialtype_ssh):
    org = organization_factory('test').organization
    cred = Credential(credential_type=credentialtype_ssh,
                      name="Bob's Credential",
                      inputs={'password': '******'},
                      organization=org)
    cred.save()

    assert Credential.objects.count() == 1
    cred = Credential.objects.all()[:1].get()
    cred.inputs['username'] = '******'
    cred.inputs['password'] = '******'
    cred.save()

    assert Credential.objects.count() == 1
    cred = Credential.objects.all()[:1].get()
    assert cred.inputs['username'] == 'joe'
    assert cred.inputs['password'].startswith('$encrypted$')
    assert decrypt_field(cred, 'password') == 'testing123'
Exemple #4
0
def test_vmware_create_ok(post, organization, admin):
    params = {
        'credential_type': 1,
        'name': 'Best credential ever',
        'inputs': {
            'host': 'some_host',
            'username': '******',
            'password': '******'
        }
    }
    vmware = CredentialType.defaults['vmware']()
    vmware.save()
    params['organization'] = organization.id
    response = post(reverse('api:credential_list'), params, admin)
    assert response.status_code == 201

    assert Credential.objects.count() == 1
    cred = Credential.objects.all()[:1].get()
    assert cred.inputs['host'] == 'some_host'
    assert cred.inputs['username'] == 'some_username'
    assert decrypt_field(cred, 'password') == 'some_password'
Exemple #5
0
def test_gce_create_ok(post, organization, admin):
    params = {
        'credential_type': 1,
        'name': 'Best credential ever',
        'inputs': {
            'username': '******',
            'project': 'some_project',
            'ssh_key_data': EXAMPLE_PRIVATE_KEY,
        },
    }
    gce = CredentialType.defaults['gce']()
    gce.save()
    params['organization'] = organization.id
    response = post(reverse('api:credential_list'), params, admin)
    assert response.status_code == 201

    assert Credential.objects.count() == 1
    cred = Credential.objects.all()[:1].get()
    assert cred.inputs['username'] == 'some_username'
    assert cred.inputs['project'] == 'some_project'
    assert decrypt_field(cred, 'ssh_key_data') == EXAMPLE_PRIVATE_KEY
Exemple #6
0
    def generate_dependencies(self, undeped_tasks):
        created_dependencies = []
        for task in undeped_tasks:
            task.log_lifecycle("acknowledged")
            dependencies = []
            if not type(task) is Job:
                continue
            # TODO: Can remove task.project None check after scan-job-default-playbook is removed
            if task.project is not None and task.project.scm_update_on_launch is True:
                latest_project_update = self.get_latest_project_update(task)
                if self.should_update_related_project(task, latest_project_update):
                    project_task = self.create_project_update(task)
                    created_dependencies.append(project_task)
                    dependencies.append(project_task)
                else:
                    dependencies.append(latest_project_update)

            # Inventory created 2 seconds behind job
            try:
                start_args = json.loads(decrypt_field(task, field_name="start_args"))
            except ValueError:
                start_args = dict()
            for inventory_source in [invsrc for invsrc in self.all_inventory_sources if invsrc.inventory == task.inventory]:
                if "inventory_sources_already_updated" in start_args and inventory_source.id in start_args['inventory_sources_already_updated']:
                    continue
                if not inventory_source.update_on_launch:
                    continue
                latest_inventory_update = self.get_latest_inventory_update(inventory_source)
                if self.should_update_inventory_source(task, latest_inventory_update):
                    inventory_task = self.create_inventory_update(task, inventory_source)
                    created_dependencies.append(inventory_task)
                    dependencies.append(inventory_task)
                else:
                    dependencies.append(latest_inventory_update)

            if len(dependencies) > 0:
                self.capture_chain_failure_dependencies(task, dependencies)

        UnifiedJob.objects.filter(pk__in=[task.pk for task in undeped_tasks]).update(dependencies_processed=True)
        return created_dependencies
def test_notification_template_migration():
    # Doesn't get tagged as UTF8 because the the internal save call explicitly sets skip_utf8=True
    with mock.patch('awx.main.models.notifications.encrypt_field',
                    encrypt_field):
        nt = NotificationTemplate.objects.create(
            notification_type='slack',
            notification_configuration=dict(token='test'))

    assert nt.notification_configuration['token'].startswith('$encrypted$AES$')

    _notification_templates(apps)
    nt.refresh_from_db()

    assert nt.notification_configuration['token'].startswith(
        '$encrypted$AESCBC$')
    assert decrypt_field(nt, 'notification_configuration',
                         subfield='token') == 'test'

    # This is here for a side-effect.
    # Exception if the encryption type of AESCBC is not properly skipped, ensures
    # our `startswith` calls don't have typos
    _notification_templates(apps)
Exemple #8
0
def test_secret_encryption_on_create(get, post, organization, admin, credentialtype_ssh):
    params = {
        'credential_type': 1,
        'inputs': {
            'username': '******',
            'password': '******',
        },
        'name': 'Best credential ever',
        'organization': organization.id,
    }
    response = post(reverse('api:credential_list'), params, admin)
    assert response.status_code == 201

    response = get(reverse('api:credential_list'), admin)
    assert response.status_code == 200
    assert response.data['count'] == 1
    cred = response.data['results'][0]
    assert cred['inputs']['username'] == 'joe'
    assert cred['inputs']['password'] == '$encrypted$'

    cred = Credential.objects.all()[:1].get()
    assert cred.inputs['password'].startswith('$encrypted$UTF8$AES')
    assert decrypt_field(cred, 'password') == 'secret'
Exemple #9
0
    def get_input(self, field_name, **kwargs):
        """
        Get an injectable and decrypted value for an input field.

        Retrieves the value for a given credential input field name. Return
        values for secret input fields are decrypted. If the credential doesn't
        have an input value defined for the given field name, an AttributeError
        is raised unless a default value is provided.

        :param field_name(str):        The name of the input field.
        :param default(optional[str]): A default return value to use.
        """
        if field_name in self.credential_type.secret_fields:
            try:
                return decrypt_field(self, field_name)
            except AttributeError:
                if 'default' in kwargs:
                    return kwargs['default']
                raise AttributeError
        if field_name in self.inputs:
            return self.inputs[field_name]
        if 'default' in kwargs:
            return kwargs['default']
        raise AttributeError(field_name)
Exemple #10
0
 def _get_local(self, name, validate=True):
     self._preload_cache()
     cache_key = Setting.get_cache_key(name)
     try:
         cache_value = self.cache.get(cache_key, default=empty)
     except ValueError:
         cache_value = empty
     if cache_value == SETTING_CACHE_NOTSET:
         value = empty
     elif cache_value == SETTING_CACHE_NONE:
         value = None
     elif cache_value == SETTING_CACHE_EMPTY_LIST:
         value = []
     elif cache_value == SETTING_CACHE_EMPTY_DICT:
         value = {}
     else:
         value = cache_value
     field = self.registry.get_setting_field(name)
     if value is empty:
         setting = None
         setting_id = None
         # this value is read-only, however we *do* want to fetch its value from the database
         if not field.read_only or name == 'INSTALL_UUID':
             setting = Setting.objects.filter(
                 key=name, user__isnull=True).order_by('pk').first()
         if setting:
             if getattr(field, 'encrypted', False):
                 value = decrypt_field(setting, 'value')
                 setting_id = setting.id
             else:
                 value = setting.value
         else:
             value = SETTING_CACHE_NOTSET
             if SETTING_CACHE_DEFAULTS:
                 try:
                     value = field.get_default()
                     if getattr(field, 'encrypted', False):
                         setting_id = SETTING_CACHE_NONE
                 except SkipField:
                     pass
         # If None implies not set, convert when reading the value.
         if value is None and SETTING_CACHE_NOTSET == SETTING_CACHE_NONE:
             value = SETTING_CACHE_NOTSET
         if cache_value != value:
             if setting_id:
                 logger.debug('Saving id in cache for encrypted setting %s',
                              cache_key)
                 self.cache.cache.set(Setting.get_cache_id_key(cache_key),
                                      setting_id)
             self.cache.set(cache_key,
                            get_cache_value(value),
                            timeout=SETTING_CACHE_TIMEOUT)
     if value == SETTING_CACHE_NOTSET and not SETTING_CACHE_DEFAULTS:
         try:
             value = field.get_default()
         except SkipField:
             pass
     if value not in (empty, SETTING_CACHE_NOTSET):
         try:
             if field.read_only:
                 internal_value = field.to_internal_value(value)
                 field.run_validators(internal_value)
                 return internal_value
             else:
                 if validate:
                     return field.run_validation(value)
                 else:
                     return value
         except Exception:
             logger.warning(
                 'The current value "%r" for setting "%s" is invalid.',
                 value,
                 name,
                 exc_info=True)
     return empty
Exemple #11
0
 def _preload_cache(self):
     # Ensure we're only modifying local preload timeout from one thread.
     with self._awx_conf_preload_lock:
         # If local preload timeout has not expired, skip preloading.
         if self._awx_conf_preload_expires and self._awx_conf_preload_expires > time.time(
         ):
             return
         # Otherwise update local preload timeout.
         self.__dict__['_awx_conf_preload_expires'] = time.time(
         ) + SETTING_CACHE_TIMEOUT
         # Check for any settings that have been defined in Python files and
         # make those read-only to avoid overriding in the database.
         if not self._awx_conf_init_readonly:
             defaults_snapshot = self._get_default('DEFAULTS_SNAPSHOT')
             for key in get_writeable_settings(self.registry):
                 init_default = defaults_snapshot.get(key, None)
                 try:
                     file_default = self._get_default(key)
                 except AttributeError:
                     file_default = None
                 if file_default != init_default and file_default is not None:
                     logger.debug('Setting %s has been marked read-only!',
                                  key)
                     self.registry._registry[key]['read_only'] = True
                     self.registry._registry[key]['defined_in_file'] = True
                 self.__dict__['_awx_conf_init_readonly'] = True
     # If local preload timer has expired, check to see if another process
     # has already preloaded the cache and skip preloading if so.
     if self.cache.get('_awx_conf_preload_expires',
                       default=empty) is not empty:
         return
     # Initialize all database-configurable settings with a marker value so
     # to indicate from the cache that the setting is not configured without
     # a database lookup.
     settings_to_cache = get_settings_to_cache(self.registry)
     setting_ids = {}
     # Load all settings defined in the database.
     for setting in Setting.objects.filter(
             key__in=settings_to_cache.keys(),
             user__isnull=True).order_by('pk'):
         if settings_to_cache[setting.key] != SETTING_CACHE_NOTSET:
             continue
         if self.registry.is_setting_encrypted(setting.key):
             setting_ids[setting.key] = setting.id
             value = decrypt_field(setting, 'value')
         else:
             value = setting.value
         settings_to_cache[setting.key] = get_cache_value(value)
     # Load field default value for any settings not found in the database.
     if SETTING_CACHE_DEFAULTS:
         for key, value in settings_to_cache.items():
             if value != SETTING_CACHE_NOTSET:
                 continue
             field = self.registry.get_setting_field(key)
             try:
                 settings_to_cache[key] = get_cache_value(
                     field.get_default())
                 if self.registry.is_setting_encrypted(key):
                     # No database pk, so None will be passed to encryption algorithm
                     setting_ids[key] = SETTING_CACHE_NOTSET
             except SkipField:
                 pass
     # Generate a cache key for each setting and store them all at once.
     settings_to_cache = dict([(Setting.get_cache_key(k), v)
                               for k, v in settings_to_cache.items()])
     for k, id_val in setting_ids.items():
         logger.debug('Saving id in cache for encrypted setting %s, %s',
                      Setting.get_cache_id_key(k), id_val)
         self.cache.cache.set(Setting.get_cache_id_key(k), id_val)
     settings_to_cache[
         '_awx_conf_preload_expires'] = self._awx_conf_preload_expires
     self.cache.set_many(settings_to_cache, timeout=SETTING_CACHE_TIMEOUT)
Exemple #12
0
    def spawn_workflow_graph_jobs(self):
        result = []
        for workflow_job in self.all_tasks:
            if self.timed_out():
                logger.warning(
                    "Workflow manager has reached time out while processing running workflows, exiting loop early"
                )
                ScheduleWorkflowManager().schedule()
                # Do not process any more workflow jobs. Stop here.
                # Maybe we should schedule another WorkflowManager run
                break
            dag = WorkflowDAG(workflow_job)
            status_changed = False
            if workflow_job.cancel_flag:
                workflow_job.workflow_nodes.filter(
                    do_not_run=False, job__isnull=True).update(do_not_run=True)
                logger.debug(
                    'Canceling spawned jobs of %s due to cancel flag.',
                    workflow_job.log_format)
                cancel_finished = dag.cancel_node_jobs()
                if cancel_finished:
                    logger.info(
                        'Marking %s as canceled, all spawned jobs have concluded.',
                        workflow_job.log_format)
                    workflow_job.status = 'canceled'
                    workflow_job.start_args = ''  # blank field to remove encrypted passwords
                    workflow_job.save(update_fields=['status', 'start_args'])
                    status_changed = True
            else:
                workflow_nodes = dag.mark_dnr_nodes()
                WorkflowJobNode.objects.bulk_update(workflow_nodes,
                                                    ['do_not_run'])
                # If workflow is now done, we do special things to mark it as done.
                is_done = dag.is_workflow_done()
                if is_done:
                    has_failed, reason = dag.has_workflow_failed()
                    logger.debug('Marking %s as %s.', workflow_job.log_format,
                                 'failed' if has_failed else 'successful')
                    result.append(workflow_job.id)
                    new_status = 'failed' if has_failed else 'successful'
                    logger.debug("Transitioning {} to {} status.".format(
                        workflow_job.log_format, new_status))
                    update_fields = ['status', 'start_args']
                    workflow_job.status = new_status
                    if reason:
                        logger.info(
                            f'Workflow job {workflow_job.id} failed due to reason: {reason}'
                        )
                        workflow_job.job_explanation = gettext_noop(
                            "No error handling paths found, marking workflow as failed"
                        )
                        update_fields.append('job_explanation')
                    workflow_job.start_args = ''  # blank field to remove encrypted passwords
                    workflow_job.save(update_fields=update_fields)
                    status_changed = True

            if status_changed:
                if workflow_job.spawned_by_workflow:
                    ScheduleWorkflowManager().schedule()
                workflow_job.websocket_emit_status(workflow_job.status)
                # Operations whose queries rely on modifications made during the atomic scheduling session
                workflow_job.send_notification_templates(
                    'succeeded' if workflow_job.status ==
                    'successful' else 'failed')

            if workflow_job.status == 'running':
                spawn_nodes = dag.bfs_nodes_to_run()
                if spawn_nodes:
                    logger.debug('Spawning jobs for %s',
                                 workflow_job.log_format)
                else:
                    logger.debug('No nodes to spawn for %s',
                                 workflow_job.log_format)
                for spawn_node in spawn_nodes:
                    if spawn_node.unified_job_template is None:
                        continue
                    kv = spawn_node.get_job_kwargs()
                    job = spawn_node.unified_job_template.create_unified_job(
                        **kv)
                    spawn_node.job = job
                    spawn_node.save()
                    logger.debug('Spawned %s in %s for node %s',
                                 job.log_format, workflow_job.log_format,
                                 spawn_node.pk)
                    can_start = True
                    if isinstance(spawn_node.unified_job_template,
                                  WorkflowJobTemplate):
                        workflow_ancestors = job.get_ancestor_workflows()
                        if spawn_node.unified_job_template in set(
                                workflow_ancestors):
                            can_start = False
                            logger.info(
                                'Refusing to start recursive workflow-in-workflow id={}, wfjt={}, ancestors={}'
                                .format(job.id,
                                        spawn_node.unified_job_template.pk,
                                        [wa.pk for wa in workflow_ancestors]))
                            display_list = [spawn_node.unified_job_template
                                            ] + workflow_ancestors
                            job.job_explanation = gettext_noop(
                                "Workflow Job spawned from workflow could not start because it "
                                "would result in recursion (spawn order, most recent first: {})"
                            ).format(', '.join('<{}>'.format(tmp)
                                               for tmp in display_list))
                        else:
                            logger.debug(
                                'Starting workflow-in-workflow id={}, wfjt={}, ancestors={}'
                                .format(job.id,
                                        spawn_node.unified_job_template.pk,
                                        [wa.pk for wa in workflow_ancestors]))
                    if not job._resources_sufficient_for_launch():
                        can_start = False
                        job.job_explanation = gettext_noop(
                            "Job spawned from workflow could not start because it was missing a related resource such as project or inventory"
                        )
                    if can_start:
                        if workflow_job.start_args:
                            start_args = json.loads(
                                decrypt_field(workflow_job, 'start_args'))
                        else:
                            start_args = {}
                        can_start = job.signal_start(**start_args)
                        if not can_start:
                            job.job_explanation = gettext_noop(
                                "Job spawned from workflow could not start because it was not in the right state or required manual credentials"
                            )
                    if not can_start:
                        job.status = 'failed'
                        job.save(update_fields=['status', 'job_explanation'])
                        job.websocket_emit_status('failed')

                    # TODO: should we emit a status on the socket here similar to tasks.py awx_periodic_scheduler() ?
                    # emit_websocket_notification('/socket.io/jobs', '', dict(id=))

        return result
Exemple #13
0
    def validate(self, value, model_instance):
        # decrypt secret values so we can validate their contents (i.e.,
        # ssh_key_data format)

        if not isinstance(value, dict):
            return super(CredentialInputField,
                         self).validate(value, model_instance)

        # Backwards compatability: in prior versions, if you submit `null` for
        # a credential field value, it just considers the value an empty string
        for unset in [
                key for key, v in model_instance.inputs.items() if not v
        ]:
            default_value = model_instance.credential_type.default_for_field(
                unset)
            if default_value is not None:
                model_instance.inputs[unset] = default_value

        decrypted_values = {}
        for k, v in value.items():
            if all([
                    k in model_instance.credential_type.secret_fields,
                    v != '$encrypted$', model_instance.pk
            ]):
                if not isinstance(getattr(model_instance, k),
                                  six.string_types):
                    raise django_exceptions.ValidationError(
                        _('secret values must be of type string, not {}').
                        format(type(v).__name__),
                        code='invalid',
                        params={'value': v},
                    )
                decrypted_values[k] = utils.decrypt_field(model_instance, k)
            else:
                decrypted_values[k] = v

        super(JSONSchemaField, self).validate(decrypted_values, model_instance)
        errors = {}
        for error in Draft4Validator(
                self.schema(model_instance),
                format_checker=self.format_checker).iter_errors(
                    decrypted_values):
            if error.validator == 'pattern' and 'error' in error.schema:
                error.message = six.text_type(
                    error.schema['error']).format(instance=error.instance)
            if error.validator == 'dependencies':
                # replace the default error messaging w/ a better i18n string
                # I wish there was a better way to determine the parameters of
                # this validation failure, but the exception jsonschema raises
                # doesn't include them as attributes (just a hard-coded error
                # string)
                match = re.search(
                    # 'foo' is a dependency of 'bar'
                    "'"  # apostrophe
                    "([^']+)"  # one or more non-apostrophes (first group)
                    "'[\w ]+'"  # one or more words/spaces
                    "([^']+)",  # second group
                    error.message,
                )
                if match:
                    label, extraneous = match.groups()
                    if error.schema['properties'].get(label):
                        label = error.schema['properties'][label]['label']
                    errors[extraneous] = [
                        _('cannot be set unless "%s" is set') % label
                    ]
                    continue
            if 'id' not in error.schema:
                # If the error is not for a specific field, it's specific to
                # `inputs` in general
                raise django_exceptions.ValidationError(
                    error.message,
                    code='invalid',
                    params={'value': value},
                )
            errors[error.schema['id']] = [error.message]

        inputs = model_instance.credential_type.inputs
        for field in inputs.get('required', []):
            if not value.get(field, None):
                errors[field] = [
                    _('required for %s') %
                    (model_instance.credential_type.name)
                ]

        # `ssh_key_unlock` requirements are very specific and can't be
        # represented without complicated JSON schema
        if (model_instance.credential_type.managed_by_tower is True
                and 'ssh_key_unlock'
                in model_instance.credential_type.defined_fields):

            # in order to properly test the necessity of `ssh_key_unlock`, we
            # need to know the real value of `ssh_key_data`; for a payload like:
            # {
            #   'ssh_key_data': '$encrypted$',
            #   'ssh_key_unlock': 'do-you-need-me?',
            # }
            # ...we have to fetch the actual key value from the database
            if model_instance.pk and model_instance.ssh_key_data == '$encrypted$':
                model_instance.ssh_key_data = model_instance.__class__.objects.get(
                    pk=model_instance.pk).ssh_key_data

            if model_instance.has_encrypted_ssh_key_data and not value.get(
                    'ssh_key_unlock'):
                errors['ssh_key_unlock'] = [
                    _('must be set when SSH key is encrypted.')
                ]
            if all([
                    model_instance.ssh_key_data,
                    value.get('ssh_key_unlock'),
                    not model_instance.has_encrypted_ssh_key_data
            ]):
                errors['ssh_key_unlock'] = [
                    _('should not be set when SSH key is not encrypted.')
                ]

        if errors:
            raise serializers.ValidationError({'inputs': errors})
Exemple #14
0
 def spawn_workflow_graph_jobs(self, workflow_jobs):
     for workflow_job in workflow_jobs:
         if workflow_job.cancel_flag:
             logger.debug(
                 'Not spawning jobs for %s because it is pending cancelation.',
                 workflow_job.log_format)
             continue
         dag = WorkflowDAG(workflow_job)
         spawn_nodes = dag.bfs_nodes_to_run()
         if spawn_nodes:
             logger.debug('Spawning jobs for %s', workflow_job.log_format)
         else:
             logger.debug('No nodes to spawn for %s',
                          workflow_job.log_format)
         for spawn_node in spawn_nodes:
             if spawn_node.unified_job_template is None:
                 continue
             kv = spawn_node.get_job_kwargs()
             job = spawn_node.unified_job_template.create_unified_job(**kv)
             spawn_node.job = job
             spawn_node.save()
             logger.debug('Spawned %s in %s for node %s', job.log_format,
                          workflow_job.log_format, spawn_node.pk)
             can_start = True
             if isinstance(spawn_node.unified_job_template,
                           WorkflowJobTemplate):
                 workflow_ancestors = job.get_ancestor_workflows()
                 if spawn_node.unified_job_template in set(
                         workflow_ancestors):
                     can_start = False
                     logger.info(
                         'Refusing to start recursive workflow-in-workflow id={}, wfjt={}, ancestors={}'
                         .format(job.id, spawn_node.unified_job_template.pk,
                                 [wa.pk for wa in workflow_ancestors]))
                     display_list = [spawn_node.unified_job_template
                                     ] + workflow_ancestors
                     job.job_explanation = gettext_noop(
                         "Workflow Job spawned from workflow could not start because it "
                         "would result in recursion (spawn order, most recent first: {})"
                     ).format(', '.join(
                         ['<{}>'.format(tmp) for tmp in display_list]))
                 else:
                     logger.debug(
                         'Starting workflow-in-workflow id={}, wfjt={}, ancestors={}'
                         .format(job.id, spawn_node.unified_job_template.pk,
                                 [wa.pk for wa in workflow_ancestors]))
             if not job._resources_sufficient_for_launch():
                 can_start = False
                 job.job_explanation = gettext_noop(
                     "Job spawned from workflow could not start because it "
                     "was missing a related resource such as project or inventory"
                 )
             if can_start:
                 if workflow_job.start_args:
                     start_args = json.loads(
                         decrypt_field(workflow_job, 'start_args'))
                 else:
                     start_args = {}
                 can_start = job.signal_start(**start_args)
                 if not can_start:
                     job.job_explanation = gettext_noop(
                         "Job spawned from workflow could not start because it "
                         "was not in the right state or required manual credentials"
                     )
             if not can_start:
                 job.status = 'failed'
                 job.save(update_fields=['status', 'job_explanation'])
                 job.websocket_emit_status('failed')
Exemple #15
0
def aws(cred, env, private_data_dir):
    env['AWS_ACCESS_KEY_ID'] = cred.username
    env['AWS_SECRET_ACCESS_KEY'] = decrypt_field(cred, 'password')
    if len(cred.security_token) > 0:
        env['AWS_SECURITY_TOKEN'] = decrypt_field(cred, 'security_token')
Exemple #16
0
def vmware(cred, env, private_data_dir):
    env['VMWARE_USER'] = cred.username
    env['VMWARE_PASSWORD'] = decrypt_field(cred, 'password')
    env['VMWARE_HOST'] = cred.host
    env['VMWARE_VALIDATE_CERTS'] = str(settings.VMWARE_VALIDATE_CERTS)
Exemple #17
0
    def inject_credential(self, credential, env, safe_env, args, safe_args,
                          private_data_dir):
        """
        Inject credential data into the environment variables and arguments
        passed to `ansible-playbook`

        :param credential:       a :class:`awx.main.models.Credential` instance
        :param env:              a dictionary of environment variables used in
                                 the `ansible-playbook` call.  This method adds
                                 additional environment variables based on
                                 custom `env` injectors defined on this
                                 CredentialType.
        :param safe_env:         a dictionary of environment variables stored
                                 in the database for the job run
                                 (`UnifiedJob.job_env`); secret values should
                                 be stripped
        :param args:             a list of arguments passed to
                                 `ansible-playbook` in the style of
                                 `subprocess.call(args)`.  This method appends
                                 additional arguments based on custom
                                 `extra_vars` injectors defined on this
                                 CredentialType.
        :param safe_args:        a list of arguments stored in the database for
                                 the job run (`UnifiedJob.job_args`); secret
                                 values should be stripped
        :param private_data_dir: a temporary directory to store files generated
                                 by `file` injectors (like config files or key
                                 files)
        """
        if not self.injectors:
            return

        class TowerNamespace:
            filename = None

        tower_namespace = TowerNamespace()

        # maintain a normal namespace for building the ansible-playbook arguments (env and args)
        namespace = {'tower': tower_namespace}

        # maintain a sanitized namespace for building the DB-stored arguments (safe_env and safe_args)
        safe_namespace = {'tower': tower_namespace}

        # build a normal namespace with secret values decrypted (for
        # ansible-playbook) and a safe namespace with secret values hidden (for
        # DB storage)
        for field_name, value in credential.inputs.items():

            if type(value) is bool:
                # boolean values can't be secret/encrypted
                safe_namespace[field_name] = namespace[field_name] = value
                continue

            if field_name in self.secret_fields:
                value = decrypt_field(credential, field_name)
                safe_namespace[field_name] = '**********'
            elif len(value):
                safe_namespace[field_name] = value
            if len(value):
                namespace[field_name] = value

        file_tmpl = self.injectors.get('file', {}).get('template')
        if file_tmpl is not None:
            # If a file template is provided, render the file and update the
            # special `tower` template namespace so the filename can be
            # referenced in other injectors
            data = Template(file_tmpl).render(**namespace)
            _, path = tempfile.mkstemp(dir=private_data_dir)
            with open(path, 'w') as f:
                f.write(data)
            os.chmod(path, stat.S_IRUSR | stat.S_IWUSR)
            namespace['tower'].filename = path

        for env_var, tmpl in self.injectors.get('env', {}).items():
            if env_var.startswith('ANSIBLE_') or env_var in self.ENV_BLACKLIST:
                continue
            env[env_var] = Template(tmpl).render(**namespace)
            safe_env[env_var] = Template(tmpl).render(**safe_namespace)

        extra_vars = {}
        safe_extra_vars = {}
        for var_name, tmpl in self.injectors.get('extra_vars', {}).items():
            extra_vars[var_name] = Template(tmpl).render(**namespace)
            safe_extra_vars[var_name] = Template(tmpl).render(**safe_namespace)

        if extra_vars:
            args.extend(['-e', json.dumps(extra_vars)])

        if safe_extra_vars:
            safe_args.extend(['-e', json.dumps(safe_extra_vars)])
Exemple #18
0
    def inject_credential(self, credential, env, safe_env, args, safe_args,
                          private_data_dir):
        """
        Inject credential data into the environment variables and arguments
        passed to `ansible-playbook`

        :param credential:       a :class:`awx.main.models.Credential` instance
        :param env:              a dictionary of environment variables used in
                                 the `ansible-playbook` call.  This method adds
                                 additional environment variables based on
                                 custom `env` injectors defined on this
                                 CredentialType.
        :param safe_env:         a dictionary of environment variables stored
                                 in the database for the job run
                                 (`UnifiedJob.job_env`); secret values should
                                 be stripped
        :param args:             a list of arguments passed to
                                 `ansible-playbook` in the style of
                                 `subprocess.call(args)`.  This method appends
                                 additional arguments based on custom
                                 `extra_vars` injectors defined on this
                                 CredentialType.
        :param safe_args:        a list of arguments stored in the database for
                                 the job run (`UnifiedJob.job_args`); secret
                                 values should be stripped
        :param private_data_dir: a temporary directory to store files generated
                                 by `file` injectors (like config files or key
                                 files)
        """
        if not self.injectors:
            if self.managed_by_tower and credential.kind in dir(
                    builtin_injectors):
                injected_env = {}
                getattr(builtin_injectors, credential.kind)(credential,
                                                            injected_env)
                env.update(injected_env)
                safe_env.update(build_safe_env(injected_env))
            return

        class TowerNamespace:
            pass

        tower_namespace = TowerNamespace()

        # maintain a normal namespace for building the ansible-playbook arguments (env and args)
        namespace = {'tower': tower_namespace}

        # maintain a sanitized namespace for building the DB-stored arguments (safe_env and safe_args)
        safe_namespace = {'tower': tower_namespace}

        # build a normal namespace with secret values decrypted (for
        # ansible-playbook) and a safe namespace with secret values hidden (for
        # DB storage)
        for field_name, value in credential.inputs.items():

            if type(value) is bool:
                # boolean values can't be secret/encrypted
                safe_namespace[field_name] = namespace[field_name] = value
                continue

            if field_name in self.secret_fields:
                value = decrypt_field(credential, field_name)
                safe_namespace[field_name] = '**********'
            elif len(value):
                safe_namespace[field_name] = value
            if len(value):
                namespace[field_name] = value

        file_tmpls = self.injectors.get('file', {})
        # If any file templates are provided, render the files and update the
        # special `tower` template namespace so the filename can be
        # referenced in other injectors
        for file_label, file_tmpl in file_tmpls.items():
            data = Template(file_tmpl).render(**namespace)
            _, path = tempfile.mkstemp(dir=private_data_dir)
            with open(path, 'w') as f:
                f.write(data)
            os.chmod(path, stat.S_IRUSR | stat.S_IWUSR)

            # determine if filename indicates single file or many
            if file_label.find('.') == -1:
                tower_namespace.filename = path
            else:
                if not hasattr(tower_namespace, 'filename'):
                    tower_namespace.filename = TowerNamespace()
                file_label = file_label.split('.')[1]
                setattr(tower_namespace.filename, file_label, path)

        for env_var, tmpl in self.injectors.get('env', {}).items():
            if env_var.startswith('ANSIBLE_') or env_var in self.ENV_BLACKLIST:
                continue
            env[env_var] = Template(tmpl).render(**namespace)
            safe_env[env_var] = Template(tmpl).render(**safe_namespace)

        extra_vars = {}
        safe_extra_vars = {}
        for var_name, tmpl in self.injectors.get('extra_vars', {}).items():
            extra_vars[var_name] = Template(tmpl).render(**namespace)
            safe_extra_vars[var_name] = Template(tmpl).render(**safe_namespace)

        def build_extra_vars_file(vars, private_dir):
            handle, path = tempfile.mkstemp(dir=private_dir)
            f = os.fdopen(handle, 'w')
            f.write(json.dumps(vars))
            f.close()
            os.chmod(path, stat.S_IRUSR)
            return path

        if extra_vars:
            path = build_extra_vars_file(extra_vars, private_data_dir)
            args.extend(['-e', '@%s' % path])

        if safe_extra_vars:
            path = build_extra_vars_file(safe_extra_vars, private_data_dir)
            safe_args.extend(['-e', '@%s' % path])
Exemple #19
0
    def inject_credential(self, credential, env, safe_env, args, safe_args,
                          private_data_dir):
        """
        Inject credential data into the environment variables and arguments
        passed to `ansible-playbook`

        :param credential:       a :class:`awx.main.models.Credential` instance
        :param env:              a dictionary of environment variables used in
                                 the `ansible-playbook` call.  This method adds
                                 additional environment variables based on
                                 custom `env` injectors defined on this
                                 CredentialType.
        :param safe_env:         a dictionary of environment variables stored
                                 in the database for the job run
                                 (`UnifiedJob.job_env`); secret values should
                                 be stripped
        :param args:             a list of arguments passed to
                                 `ansible-playbook` in the style of
                                 `subprocess.call(args)`.  This method appends
                                 additional arguments based on custom
                                 `extra_vars` injectors defined on this
                                 CredentialType.
        :param safe_args:        a list of arguments stored in the database for
                                 the job run (`UnifiedJob.job_args`); secret
                                 values should be stripped
        :param private_data_dir: a temporary directory to store files generated
                                 by `file` injectors (like config files or key
                                 files)
        """
        if not self.injectors:
            if self.managed_by_tower and credential.kind in dir(
                    builtin_injectors):
                injected_env = {}
                getattr(builtin_injectors,
                        credential.kind)(credential, injected_env,
                                         private_data_dir)
                env.update(injected_env)
                safe_env.update(build_safe_env(injected_env))
            return

        class TowerNamespace:
            pass

        tower_namespace = TowerNamespace()

        # maintain a normal namespace for building the ansible-playbook arguments (env and args)
        namespace = {'tower': tower_namespace}

        # maintain a sanitized namespace for building the DB-stored arguments (safe_env and safe_args)
        safe_namespace = {'tower': tower_namespace}

        # build a normal namespace with secret values decrypted (for
        # ansible-playbook) and a safe namespace with secret values hidden (for
        # DB storage)
        for field_name, value in credential.inputs.items():

            if type(value) is bool:
                # boolean values can't be secret/encrypted
                safe_namespace[field_name] = namespace[field_name] = value
                continue

            if field_name in self.secret_fields:
                value = decrypt_field(credential, field_name)
                safe_namespace[field_name] = '**********'
            elif len(value):
                safe_namespace[field_name] = value
            if len(value):
                namespace[field_name] = value

        # default missing boolean fields to False
        for field in self.inputs.get('fields', []):
            if field['type'] == 'boolean' and field[
                    'id'] not in credential.inputs.keys():
                namespace[field['id']] = safe_namespace[field['id']] = False

        file_tmpls = self.injectors.get('file', {})
        # If any file templates are provided, render the files and update the
        # special `tower` template namespace so the filename can be
        # referenced in other injectors
        for file_label, file_tmpl in file_tmpls.items():
            data = Template(file_tmpl).render(**namespace)
            _, path = tempfile.mkstemp(dir=private_data_dir)
            with open(path, 'w') as f:
                f.write(data.encode('utf-8'))
            os.chmod(path, stat.S_IRUSR | stat.S_IWUSR)

            # determine if filename indicates single file or many
            if file_label.find('.') == -1:
                tower_namespace.filename = path
            else:
                if not hasattr(tower_namespace, 'filename'):
                    tower_namespace.filename = TowerNamespace()
                file_label = file_label.split('.')[1]
                setattr(tower_namespace.filename, file_label, path)

        injector_field = self._meta.get_field('injectors')
        for env_var, tmpl in self.injectors.get('env', {}).items():
            try:
                injector_field.validate_env_var_allowed(env_var)
            except ValidationError as e:
                logger.error(
                    six.text_type(
                        'Ignoring prohibited env var {}, reason: {}').format(
                            env_var, e))
                continue
            env[env_var] = Template(tmpl).render(**namespace)
            safe_env[env_var] = Template(tmpl).render(**safe_namespace)

        if 'INVENTORY_UPDATE_ID' not in env:
            # awx-manage inventory_update does not support extra_vars via -e
            extra_vars = {}
            for var_name, tmpl in self.injectors.get('extra_vars', {}).items():
                extra_vars[var_name] = Template(tmpl).render(**namespace)

            def build_extra_vars_file(vars, private_dir):
                handle, path = tempfile.mkstemp(dir=private_dir)
                f = os.fdopen(handle, 'w')
                f.write(safe_dump(vars))
                f.close()
                os.chmod(path, stat.S_IRUSR)
                return path

            path = build_extra_vars_file(extra_vars, private_data_dir)
            if extra_vars:
                args.extend(['-e', '@%s' % path])
                safe_args.extend(['-e', '@%s' % path])