示例#1
0
def create_roles(apps, schema_editor):
    '''
    Implicit role creation happens in our post_save hook for all of our
    resources. Here we iterate through all of our resource types and call
    .save() to ensure all that happens for every object in the system before we
    get busy with the actual migration work.

    This gets run after migrate_users, which does role creation for users a
    little differently.
    '''

    models = [
        apps.get_model('main', m) for m in [
            'Organization',
            'Team',
            'Inventory',
            'Project',
            'Credential',
            'CustomInventoryScript',
            'JobTemplate',
        ]
    ]

    with batch_role_ancestor_rebuilding():
        for model in models:
            for obj in model.objects.iterator():
                obj.save()
示例#2
0
    def _post_save(self, instance, created, *args, **kwargs):
        Role_ = utils.get_current_apps().get_model('main', 'Role')
        ContentType_ = utils.get_current_apps().get_model('contenttypes', 'ContentType')
        ct_id = ContentType_.objects.get_for_model(instance).id

        Model = utils.get_current_apps().get_model('main', instance.__class__.__name__)
        latest_instance = Model.objects.get(pk=instance.pk)

        with batch_role_ancestor_rebuilding():
            # Create any missing role objects
            missing_roles = []
            for implicit_role_field in getattr(latest_instance.__class__, '__implicit_role_fields'):
                cur_role = getattr(latest_instance, implicit_role_field.name, None)
                if cur_role is None:
                    missing_roles.append(Role_(role_field=implicit_role_field.name, content_type_id=ct_id, object_id=latest_instance.id))

            if len(missing_roles) > 0:
                Role_.objects.bulk_create(missing_roles)
                updates = {}
                role_ids = []
                for role in Role_.objects.filter(content_type_id=ct_id, object_id=latest_instance.id):
                    setattr(latest_instance, role.role_field, role)
                    updates[role.role_field] = role.id
                    role_ids.append(role.id)
                type(latest_instance).objects.filter(pk=latest_instance.pk).update(**updates)
                Role.rebuild_role_ancestor_list(role_ids, [])

            update_role_parentage_for_instance(latest_instance)
            instance.refresh_from_db()
示例#3
0
def create_roles(apps, schema_editor):
    """
    Implicit role creation happens in our post_save hook for all of our
    resources. Here we iterate through all of our resource types and call
    .save() to ensure all that happens for every object in the system.

    This can be used whenever new roles are introduced in a migration to
    create those roles for pre-existing objects that did not previously
    have them created via signals.
    """

    models = [
        apps.get_model('main', m) for m in [
            'Organization',
            'Team',
            'Inventory',
            'Project',
            'Credential',
            'JobTemplate',
        ]
    ]

    with batch_role_ancestor_rebuilding():
        for model in models:
            for obj in model.objects.iterator():
                obj.save()
示例#4
0
文件: fields.py 项目: zeitounator/awx
    def _post_save(self, instance, created, *args, **kwargs):
        Role_ = utils.get_current_apps().get_model('main', 'Role')
        ContentType_ = utils.get_current_apps().get_model(
            'contenttypes', 'ContentType')
        ct_id = ContentType_.objects.get_for_model(instance).id
        with batch_role_ancestor_rebuilding():
            # Create any missing role objects
            missing_roles = []
            for implicit_role_field in getattr(instance.__class__,
                                               '__implicit_role_fields'):
                cur_role = getattr(instance, implicit_role_field.name, None)
                if cur_role is None:
                    missing_roles.append(
                        Role_(role_field=implicit_role_field.name,
                              content_type_id=ct_id,
                              object_id=instance.id))
            if len(missing_roles) > 0:
                Role_.objects.bulk_create(missing_roles)
                updates = {}
                role_ids = []
                for role in Role_.objects.filter(content_type_id=ct_id,
                                                 object_id=instance.id):
                    setattr(instance, role.role_field, role)
                    updates[role.role_field] = role.id
                    role_ids.append(role.id)
                type(instance).objects.filter(pk=instance.pk).update(**updates)
                Role.rebuild_role_ancestor_list(role_ids, [])

            # Update parentage if necessary
            for implicit_role_field in getattr(instance.__class__,
                                               '__implicit_role_fields'):
                cur_role = getattr(instance, implicit_role_field.name)
                original_parents = set(json.loads(cur_role.implicit_parents))
                new_parents = implicit_role_field._resolve_parent_roles(
                    instance)
                cur_role.parents.remove(*list(original_parents - new_parents))
                cur_role.parents.add(*list(new_parents - original_parents))
                new_parents_list = list(new_parents)
                new_parents_list.sort()
                new_parents_json = json.dumps(new_parents_list)
                if cur_role.implicit_parents != new_parents_json:
                    cur_role.implicit_parents = new_parents_json
                    cur_role.save()
示例#5
0
def test_rbac_batch_rebuilding(rando, organization):
    with batch_role_ancestor_rebuilding():
        organization.admin_role.members.add(rando)
        inventory = organization.inventories.create(name='test-inventory')
        assert rando not in inventory.admin_role
    assert rando in inventory.admin_role
示例#6
0
    def perform_update(self, options, data, inventory_update):
        """Shared method for both awx-manage CLI updates and inventory updates
        from the tasks system.

        This saves the inventory data to the database, calling load_into_database
        but also wraps that method in a host of options processing
        """
        # outside of normal options, these are needed as part of programatic interface
        self.inventory = inventory_update.inventory
        self.inventory_source = inventory_update.inventory_source
        self.inventory_update = inventory_update

        # the update options, could be parser object or dict
        self.overwrite = bool(options.get('overwrite', False))
        self.overwrite_vars = bool(options.get('overwrite_vars', False))
        self.enabled_var = options.get('enabled_var', None)
        self.enabled_value = options.get('enabled_value', None)
        self.group_filter = options.get('group_filter', None) or r'^.+$'
        self.host_filter = options.get('host_filter', None) or r'^.+$'
        self.exclude_empty_groups = bool(options.get('exclude_empty_groups', False))
        self.instance_id_var = options.get('instance_id_var', None)

        try:
            self.group_filter_re = re.compile(self.group_filter)
        except re.error:
            raise CommandError('invalid regular expression for --group-filter')
        try:
            self.host_filter_re = re.compile(self.host_filter)
        except re.error:
            raise CommandError('invalid regular expression for --host-filter')

        begin = time.time()

        # Since perform_update can be invoked either through the awx-manage CLI
        # or from the task system, we need to create a new lock at this level
        # (even though inventory_import.Command.handle -- which calls
        # perform_update -- has its own lock, inventory_ID_import)
        with advisory_lock('inventory_{}_perform_update'.format(self.inventory.id)):

            try:
                self.check_license()
            except PermissionDenied as e:
                self.mark_license_failure(save=True)
                raise e

            try:
                # Check the per-org host limits
                self.check_org_host_limit()
            except PermissionDenied as e:
                self.mark_org_limits_failure(save=True)
                raise e

            if settings.SQL_DEBUG:
                queries_before = len(connection.queries)

            # Update inventory update for this command line invocation.
            with ignore_inventory_computed_fields():
                # TODO: move this to before perform_update
                iu = self.inventory_update
                if iu.status != 'running':
                    with transaction.atomic():
                        self.inventory_update.status = 'running'
                        self.inventory_update.save()

            logger.info('Processing JSON output...')
            inventory = MemInventory(group_filter_re=self.group_filter_re, host_filter_re=self.host_filter_re)
            inventory = dict_to_mem_data(data, inventory=inventory)

            logger.info('Loaded %d groups, %d hosts', len(inventory.all_group.all_groups), len(inventory.all_group.all_hosts))

            if self.exclude_empty_groups:
                inventory.delete_empty_groups()

            self.all_group = inventory.all_group

            if settings.DEBUG:
                # depending on inventory source, this output can be
                # *exceedingly* verbose - crawling a deeply nested
                # inventory/group data structure and printing metadata about
                # each host and its memberships
                #
                # it's easy for this scale of data to overwhelm pexpect,
                # (and it's likely only useful for purposes of debugging the
                # actual inventory import code), so only print it if we have to:
                # https://github.com/ansible/ansible-tower/issues/7414#issuecomment-321615104
                self.all_group.debug_tree()

            with batch_role_ancestor_rebuilding():
                # If using with transaction.atomic() with try ... catch,
                # with transaction.atomic() must be inside the try section of the code as per Django docs
                try:
                    # Ensure that this is managed as an atomic SQL transaction,
                    # and thus properly rolled back if there is an issue.
                    with transaction.atomic():
                        # Merge/overwrite inventory into database.
                        if settings.SQL_DEBUG:
                            logger.warning('loading into database...')
                        with ignore_inventory_computed_fields():
                            if getattr(settings, 'ACTIVITY_STREAM_ENABLED_FOR_INVENTORY_SYNC', True):
                                self.load_into_database()
                            else:
                                with disable_activity_stream():
                                    self.load_into_database()
                            if settings.SQL_DEBUG:
                                queries_before2 = len(connection.queries)
                            self.inventory.update_computed_fields()
                        if settings.SQL_DEBUG:
                            logger.warning('update computed fields took %d queries', len(connection.queries) - queries_before2)

                        # Check if the license is valid.
                        # If the license is not valid, a CommandError will be thrown,
                        # and inventory update will be marked as invalid.
                        # with transaction.atomic() will roll back the changes.
                        license_fail = True
                        self.check_license()

                        # Check the per-org host limits
                        license_fail = False
                        self.check_org_host_limit()
                except PermissionDenied as e:
                    if license_fail:
                        self.mark_license_failure(save=True)
                    else:
                        self.mark_org_limits_failure(save=True)
                    raise e

                if settings.SQL_DEBUG:
                    logger.warning('Inventory import completed for %s in %0.1fs', self.inventory_source.name, time.time() - begin)
                else:
                    logger.info('Inventory import completed for %s in %0.1fs', self.inventory_source.name, time.time() - begin)

            # If we're in debug mode, then log the queries and time
            # used to do the operation.
            if settings.SQL_DEBUG:
                queries_this_import = connection.queries[queries_before:]
                sqltime = sum(float(x['time']) for x in queries_this_import)
                logger.warning('Inventory import required %d queries ' 'taking %0.3fs', len(queries_this_import), sqltime)