def as_sql(self):
        # We don't need quote_name_unless_alias() here, since these are all
        # going to be column names (so we can avoid the extra overhead).
        qn = self.connection.ops.quote_name
        opts = self.query.get_meta()
        result = ['INSERT INTO %s' % qn(opts.db_table)]

        has_fields = bool(self.query.fields)
        fields = self.query.fields if has_fields else [opts.pk]
        result.append('(%s)' % ', '.join(qn(f.column) for f in fields))

        if has_fields:
            params = values = [
                [
                    f.get_db_prep_save(
                        getattr(obj, f.attname) if self.query.raw else f.pre_save(obj, True),
                        connection=self.connection
                    ) for f in fields
                    ]
                for obj in self.query.objs
                ]
        else:
            values = [[self.connection.ops.pk_default_value()] for obj in self.query.objs]
            params = [[]]
            fields = [None]
        can_bulk = (not any(hasattr(field, "get_placeholder") for field in fields) and
                    not self.return_id and self.connection.features.has_bulk_insert)

        if can_bulk:
            placeholders = [["%s"] * len(fields)]
        else:
            placeholders = [
                [self.placeholder(field, v) for field, v in zip(fields, val)]
                for val in values
                ]
            # Oracle Spatial needs to remove some values due to #10888
            params = self.connection.ops.modify_insert_params(placeholders, params)
        if self.return_id and self.connection.features.can_return_id_from_insert:
            params = params[0]
            col = "%s.%s" % (qn(opts.db_table), qn(opts.pk.column))
            result.append("VALUES (%s)" % ", ".join(placeholders[0]))
            r_fmt, r_params = self.connection.ops.return_insert_id()
            # Skip empty r_fmt to allow subclasses to customize behavior for
            # 3rd party backends. Refs #19096.
            if r_fmt:
                result.append(r_fmt % col)
                params += r_params
            return [(" ".join(result), tuple(params))]
        if can_bulk:
            result.append(self.connection.ops.bulk_insert_sql(fields, len(values)))
            return [(" ".join(result), tuple(v for val in values for v in val))]
        else:
            return [
                (" ".join(result + ["VALUES (%s)" % ", ".join(p)]), vals)
                for p, vals in zip(placeholders, params)
                ]
Example #2
0
def flatten_result(source):
    """
    Turns the given source sequence into a list of reg-exp possibilities and
    their arguments. Returns a list of strings and a list of argument lists.
    Each of the two lists will be of the same length.
    """
    if source is None:
        return [''], [[]]
    if isinstance(source, Group):
        if source[1] is None:
            params = []
        else:
            params = [source[1]]
        return [source[0]], [params]
    result = ['']
    result_args = [[]]
    pos = last = 0
    for pos, elt in enumerate(source):
        if isinstance(elt, six.string_types):
            continue
        piece = ''.join(source[last:pos])
        if isinstance(elt, Group):
            piece += elt[0]
            param = elt[1]
        else:
            param = None
        last = pos + 1
        for i in range(len(result)):
            result[i] += piece
            if param:
                result_args[i].append(param)
        if isinstance(elt, (Choice, NonCapture)):
            if isinstance(elt, NonCapture):
                elt = [elt]
            inner_result, inner_args = [], []
            for item in elt:
                res, args = flatten_result(item)
                inner_result.extend(res)
                inner_args.extend(args)
            new_result = []
            new_args = []
            for item, args in zip(result, result_args):
                for i_item, i_args in zip(inner_result, inner_args):
                    new_result.append(item + i_item)
                    new_args.append(args[:] + i_args)
            result = new_result
            result_args = new_args
    if pos >= last:
        piece = ''.join(source[last:])
        for i in range(len(result)):
            result[i] += piece
    return result, result_args
Example #3
0
def __dictfetchall(cursor):
    "Returns all rows from a cursor as a dict"
    desc = cursor.description
    return [
        dict(list(zip([col[0] for col in desc], row)))
        for row in cursor.fetchall()
    ]
Example #4
0
 def mget(self, keys):
     if not keys:
         return
     prefixed_keys = [self.add_prefix(key) for key in keys]
     for key, value in zip(keys, self._rd.mget(prefixed_keys)):
         if value:
             yield key, loads(value)
Example #5
0
def copy_plugins_to(old_plugins, to_placeholder,
                    to_language=None, parent_plugin_id=None, no_signals=False):
    """
    Copies a list of plugins to a placeholder to a language.
    """
    # TODO: Refactor this and copy_plugins to cleanly separate plugin tree/node
    # copying and remove the need for the mutating parameter old_parent_cache.
    old_parent_cache = {}
    # For subplugin copy, first plugin's parent must be nulled before copying.
    if old_plugins:
        old_plugins[0].parent = old_plugins[0].parent_id = None
    new_plugins = [old.copy_plugin(to_placeholder, to_language or old.language,
                                   old_parent_cache, no_signals)
                   for old in old_plugins]
    if new_plugins and parent_plugin_id:
        from cms.models import CMSPlugin
        new_plugins[0].parent_id = parent_plugin_id
        new_plugins[0].save()
        new_plugins[0].move(CMSPlugin.objects.get(pk=parent_plugin_id), pos='last-child')
        new_plugins[0] = CMSPlugin.objects.get(pk=new_plugins[0].pk)
    plugins_ziplist = list(zip(new_plugins, old_plugins))

    # this magic is needed for advanced plugins like Text Plugins that can have
    # nested plugins and need to update their content based on the new plugins.
    for new_plugin, old_plugin in plugins_ziplist:
        new_instance = new_plugin.get_plugin_instance()[0]
        if new_instance:
            new_instance._no_reorder = True
            new_instance.post_copy(old_plugin, plugins_ziplist)
    return new_plugins
Example #6
0
    def resolve_columns(self, row, fields=()):
        """
        This routine is necessary so that distances and geometries returned
        from extra selection SQL get resolved appropriately into Python
        objects.
        """
        values = []
        aliases = list(self.query.extra_select)

        # Have to set a starting row number offset that is used for
        # determining the correct starting row index -- needed for
        # doing pagination with Oracle.
        rn_offset = 0
        if self.connection.ops.oracle:
            if self.query.high_mark is not None or self.query.low_mark:
                rn_offset = 1
        index_start = rn_offset + len(aliases)

        # Converting any extra selection values (e.g., geometries and
        # distance objects added by GeoQuerySet methods).
        values = [self.query.convert_values(v,
                               self.query.extra_select_fields.get(a, None),
                               self.connection)
                  for v, a in zip(row[rn_offset:index_start], aliases)]
        if self.connection.ops.oracle or getattr(self.query, 'geo_values', False):
            # We resolve the rest of the columns if we're on Oracle or if
            # the `geo_values` attribute is defined.
            for value, field in zip_longest(row[index_start:], fields):
                values.append(self.query.convert_values(value, field, self.connection))
        else:
            values.extend(row[index_start:])
        return tuple(values)
Example #7
0
    def _test_order(self, ids, expected_order):
        newbundlename = 'testbundle-new'

        # need to define our querystring explicity to enforce ordering
        params = {'form': 'patchlistform',
                  'bundle_name': newbundlename,
                  'action': 'Create',
                  'project': self.project.id,
                  }

        data = urlencode(params) + \
            ''.join(['&patch_id:%d=checked' % i for i in ids])

        response = self.client.post(
            reverse('patch-list', kwargs={
                'project_id': self.project.linkname}),
            data=data,
            content_type='application/x-www-form-urlencoded',
        )

        self.assertContains(response, 'Bundle %s created' % newbundlename)
        self.assertContains(response, 'added to bundle %s' % newbundlename,
                            count=5)

        bundle = Bundle.objects.get(name=newbundlename)

        # BundlePatches should be sorted by .order by default
        bps = BundlePatch.objects.filter(bundle=bundle)

        for (bp, p) in zip(bps, expected_order):
            self.assertEqual(bp.patch.pk, p.pk)

        bundle.delete()
Example #8
0
 def handle_merge(self, loader, conflicts):
     """
     Handles merging together conflicted migrations interactively,
     if it's safe; otherwise, advises on how to fix it.
     """
     if self.interactive:
         questioner = InteractiveMigrationQuestioner()
     else:
         questioner = MigrationQuestioner(defaults={"ask_merge": True})
     for app_label, migration_names in conflicts.items():
         # Grab out the migrations in question, and work out their
         # common ancestor.
         merge_migrations = []
         for migration_name in migration_names:
             migration = loader.get_migration(app_label, migration_name)
             migration.ancestry = loader.graph.forwards_plan((app_label, migration_name))
             merge_migrations.append(migration)
         all_items_equal = lambda seq: all(item == seq[0] for item in seq[1:])
         merge_migrations_generations = zip(*[m.ancestry for m in merge_migrations])
         common_ancestor_count = sum(
             1 for common_ancestor_generation in takewhile(all_items_equal, merge_migrations_generations)
         )
         if not common_ancestor_count:
             raise ValueError("Could not find common ancestor of %s" % migration_names)
         # Now work out the operations along each divergent branch
         for migration in merge_migrations:
             migration.branch = migration.ancestry[common_ancestor_count:]
             migrations_ops = (
                 loader.get_migration(node_app, node_name).operations for node_app, node_name in migration.branch
             )
             migration.merged_operations = sum(migrations_ops, [])
         # In future, this could use some of the Optimizer code
         # (can_optimize_through) to automatically see if they're
         # mergeable. For now, we always just prompt the user.
         if self.verbosity > 0:
             self.stdout.write(self.style.MIGRATE_HEADING("Merging %s" % app_label))
             for migration in merge_migrations:
                 self.stdout.write(self.style.MIGRATE_LABEL("  Branch %s" % migration.name))
                 for operation in migration.merged_operations:
                     self.stdout.write("    - %s\n" % operation.describe())
         if questioner.ask_merge(app_label):
             # If they still want to merge it, then write out an empty
             # file depending on the migrations needing merging.
             numbers = [MigrationAutodetector.parse_number(migration.name) for migration in merge_migrations]
             try:
                 biggest_number = max([x for x in numbers if x is not None])
             except ValueError:
                 biggest_number = 1
             subclass = type(
                 "Migration",
                 (Migration,),
                 {"dependencies": [(app_label, migration.name) for migration in merge_migrations]},
             )
             new_migration = subclass("%04i_merge" % (biggest_number + 1), app_label)
             writer = MigrationWriter(new_migration)
             with open(writer.path, "wb") as fh:
                 fh.write(writer.as_string())
             if self.verbosity > 0:
                 self.stdout.write("\nCreated new merge migration %s" % writer.path)
Example #9
0
    def _test_sequence(self, response, test_fn):
        ids = self._extract_patch_ids(response)
        self.assertTrue(bool(ids))
        patches = [Patch.objects.get(id=i) for i in ids]
        pairs = list(zip(patches, patches[1:]))

        for p1, p2 in pairs:
            test_fn(p1, p2)
Example #10
0
    def results_iter(self):
        """
        Returns an iterator over the results from executing this query.
        """
        resolve_columns = hasattr(self, 'resolve_columns')
        fields = None
        has_aggregate_select = bool(self.query.aggregate_select)
        # Set transaction dirty if we're using SELECT FOR UPDATE to ensure
        # a subsequent commit/rollback is executed, so any database locks
        # are released.
        if self.query.select_for_update and transaction.is_managed(self.using):
            transaction.set_dirty(self.using)
        for rows in self.execute_sql(MULTI):
            for row in rows:
                if has_aggregate_select:
                    loaded_fields = self.query.get_loaded_field_names().get(self.query.model, set()) or self.query.select
                    aggregate_start = len(self.query.extra_select) + len(loaded_fields)
                    aggregate_end = aggregate_start + len(self.query.aggregate_select)
                if resolve_columns:
                    if fields is None:
                        # We only set this up here because
                        # related_select_fields isn't populated until
                        # execute_sql() has been called.

                        # We also include types of fields of related models that
                        # will be included via select_related() for the benefit
                        # of MySQL/MySQLdb when boolean fields are involved
                        # (#15040).

                        # This code duplicates the logic for the order of fields
                        # found in get_columns(). It would be nice to clean this up.
                        if self.query.select_fields:
                            fields = self.query.select_fields
                        else:
                            fields = self.query.model._meta.fields
                        fields = fields + self.query.related_select_fields

                        # If the field was deferred, exclude it from being passed
                        # into `resolve_columns` because it wasn't selected.
                        only_load = self.deferred_to_columns()
                        if only_load:
                            fields = [f for f in fields if f.model._meta.db_table not in only_load or
                                      f.column in only_load[f.model._meta.db_table]]
                        if has_aggregate_select:
                            # pad None in to fields for aggregates
                            fields = fields[:aggregate_start] + [
                                None for x in range(0, aggregate_end - aggregate_start)
                            ] + fields[aggregate_start:]
                    row = self.resolve_columns(row, fields)

                if has_aggregate_select:
                    row = tuple(row[:aggregate_start]) + tuple([
                        self.query.resolve_aggregate(value, aggregate, self.connection)
                        for (alias, aggregate), value
                        in zip(self.query.aggregate_select.items(), row[aggregate_start:aggregate_end])
                    ]) + tuple(row[aggregate_end:])

                yield row
Example #11
0
def _salt_cipher_secret(secret):
    """
    Given a secret (assumed to be a string of CSRF_ALLOWED_CHARS), generate a
    token by adding a salt and using it to encrypt the secret.
    """
    salt = _get_new_csrf_string()
    chars = CSRF_ALLOWED_CHARS
    pairs = zip((chars.index(x) for x in secret), (chars.index(x) for x in salt))
    cipher = ''.join(chars[(x + y) % len(chars)] for x, y in pairs)
    return salt + cipher
Example #12
0
    def results_iter(self):
        """
        Returns an iterator over the results from executing this query.
        """
        resolve_columns = hasattr(self, 'resolve_columns')
        fields = None
        has_aggregate_select = bool(self.query.aggregate_select)
        for rows in self.execute_sql(MULTI):
            for row in rows:
                if has_aggregate_select:
                    loaded_fields = self.query.get_loaded_field_names().get(self.query.model, set()) or self.query.select
                    aggregate_start = len(self.query.extra_select) + len(loaded_fields)
                    aggregate_end = aggregate_start + len(self.query.aggregate_select)
                if resolve_columns:
                    if fields is None:
                        # We only set this up here because
                        # related_select_cols isn't populated until
                        # execute_sql() has been called.

                        # We also include types of fields of related models that
                        # will be included via select_related() for the benefit
                        # of MySQL/MySQLdb when boolean fields are involved
                        # (#15040).

                        # This code duplicates the logic for the order of fields
                        # found in get_columns(). It would be nice to clean this up.
                        if self.query.select:
                            fields = [f.field for f in self.query.select]
                        else:
                            fields = self.query.get_meta().concrete_fields
                        fields = fields + [f.field for f in self.query.related_select_cols]

                        # If the field was deferred, exclude it from being passed
                        # into `resolve_columns` because it wasn't selected.
                        only_load = self.deferred_to_columns()
                        if only_load:
                            db_table = self.query.get_meta().db_table
                            fields = [f for f in fields if db_table in only_load and
                                      f.column in only_load[db_table]]
                        if has_aggregate_select:
                            # pad None in to fields for aggregates
                            fields = fields[:aggregate_start] + [
                                None for x in range(0, aggregate_end - aggregate_start)
                            ] + fields[aggregate_start:]
                    row = self.resolve_columns(row, fields)

                if has_aggregate_select:
                    row = tuple(row[:aggregate_start]) + tuple(
                        self.query.resolve_aggregate(value, aggregate, self.connection)
                        for (alias, aggregate), value
                        in zip(self.query.aggregate_select.items(), row[aggregate_start:aggregate_end])
                    ) + tuple(row[aggregate_end:])

                yield row
Example #13
0
def _unsalt_cipher_token(token):
    """
    Given a token (assumed to be a string of CSRF_ALLOWED_CHARS, of length
    CSRF_TOKEN_LENGTH, and that its first half is a salt), use it to decrypt
    the second half to produce the original secret.
    """
    salt = token[:CSRF_SECRET_LENGTH]
    token = token[CSRF_SECRET_LENGTH:]
    chars = CSRF_ALLOWED_CHARS
    pairs = zip((chars.index(x) for x in token), (chars.index(x) for x in salt))
    secret = ''.join(chars[x - y] for x, y in pairs)  # Note negative values are ok
    return secret
Example #14
0
    def test_yaml_mapping(self):
        from xapp.models import tables
        from xapp.models import ModelGenerator
        yaml_f = ModelGenerator.yaml_ordered_load(
            open(os.path.join(settings.BASE_DIR, 'model.yaml')),)

        for model_table, yaml_table in zip(tables,
                      [yaml_table for yaml_table in yaml_f]):
            self.assertEqual(yaml_table.capitalize(), model_table.__name__)
            self.assertEqual(yaml_f[yaml_table]['title'],
                model_table._meta.verbose_name)

            for model_field, yaml_field in zip(
                  [model_field for model_field in model_table._meta.fields
                      if model_field.name!='id'],
                  [yaml_field for yaml_field in yaml_f[yaml_table]['fields']]
            ):
                self.assertEqual(model_field.name, yaml_field['id'])
                self.assertEqual(model_field.verbose_name, yaml_field['title'])

                from xapp.models import CustomModelTypes
                self.assertEqual(model_field.__class__.__name__,
                    CustomModelTypes.yaml_types[yaml_field['type']])
Example #15
0
 def get_inline_formsets(self, request, formsets, inline_instances,
                         obj=None, allow_nested=False):
     inline_admin_formsets = []
     for inline, formset in zip(inline_instances, formsets):
         if not allow_nested and getattr(formset, 'is_nested', False):
             continue
         fieldsets = list(inline.get_fieldsets(request, obj))
         readonly = list(inline.get_readonly_fields(request, obj))
         prepopulated = dict(inline.get_prepopulated_fields(request, obj))
         inline_admin_formset = self.inline_admin_formset_helper_cls(
             inline, formset, fieldsets, prepopulated, readonly,
             model_admin=self, request=request)
         inline_admin_formsets.append(inline_admin_formset)
     return inline_admin_formsets
    def compare_item(self, item_rsp, default_reviewer):
        self.assertEqual(default_reviewer.name, item_rsp['name'])
        self.assertEqual(default_reviewer.file_regex, item_rsp['file_regex'])

        users = list(default_reviewer.people.all())

        for user_rsp, user in zip(item_rsp['users'], users):
            self.assertEqual(user_rsp['title'], user.username)

        self.assertEqual(len(item_rsp['users']), len(users))

        groups = list(default_reviewer.groups.all())

        for group_rsp, group in zip(item_rsp['groups'], groups):
            self.assertEqual(group_rsp['title'], group.name)

        self.assertEqual(len(item_rsp['groups']), len(groups))

        repos = list(default_reviewer.repository.all())

        for repo_rsp, repo in zip(item_rsp['repositories'], repos):
            self.assertEqual(repo_rsp['title'], repo.name)

        self.assertEqual(len(item_rsp['repositories']), len(repos))
Example #17
0
    def as_sql(self):
        # We don't need quote_name_unless_alias() here, since these are all
        # going to be column names (so we can avoid the extra overhead).
        qn = self.connection.ops.quote_name
        opts = self.query.model._meta
        result = ["INSERT INTO %s" % qn(opts.db_table)]

        has_fields = bool(self.query.fields)
        fields = self.query.fields if has_fields else [opts.pk]
        result.append("(%s)" % ", ".join([qn(f.column) for f in fields]))

        if has_fields:
            params = values = [
                [
                    f.get_db_prep_save(
                        getattr(obj, f.attname) if self.query.raw else f.pre_save(obj, True), connection=self.connection
                    )
                    for f in fields
                ]
                for obj in self.query.objs
            ]
        else:
            values = [[self.connection.ops.pk_default_value()] for obj in self.query.objs]
            params = [[]]
            fields = [None]
        can_bulk = (
            not any(hasattr(field, "get_placeholder") for field in fields)
            and not self.return_id
            and self.connection.features.has_bulk_insert
        )

        if can_bulk:
            placeholders = [["%s"] * len(fields)]
        else:
            placeholders = [[self.placeholder(field, v) for field, v in zip(fields, val)] for val in values]
        if self.return_id and self.connection.features.can_return_id_from_insert:
            params = params[0]
            col = "%s.%s" % (qn(opts.db_table), qn(opts.pk.column))
            result.append("VALUES (%s)" % ", ".join(placeholders[0]))
            r_fmt, r_params = self.connection.ops.return_insert_id()
            result.append(r_fmt % col)
            params += r_params
            return [(" ".join(result), tuple(params))]
        if can_bulk:
            result.append(self.connection.ops.bulk_insert_sql(fields, len(values)))
            return [(" ".join(result), tuple([v for val in values for v in val]))]
        else:
            return [(" ".join(result + ["VALUES (%s)" % ", ".join(p)]), vals) for p, vals in zip(placeholders, params)]
Example #18
0
    def results_iter(self):
        """
        Returns an iterator over the results from executing this query.
        """
        resolve_columns = hasattr(self, "resolve_columns")
        fields = None
        has_aggregate_select = bool(self.query.aggregate_select)
        # Set transaction dirty if we're using SELECT FOR UPDATE to ensure
        # a subsequent commit/rollback is executed, so any database locks
        # are released.
        if self.query.select_for_update and transaction.is_managed(self.using):
            transaction.set_dirty(self.using)
        for rows in self.execute_sql(MULTI):
            for row in rows:
                if resolve_columns:
                    if fields is None:
                        # We only set this up here because
                        # related_select_fields isn't populated until
                        # execute_sql() has been called.
                        if self.query.select_fields:
                            fields = self.query.select_fields + self.query.related_select_fields
                        else:
                            fields = self.query.model._meta.fields
                        # If the field was deferred, exclude it from being passed
                        # into `resolve_columns` because it wasn't selected.
                        only_load = self.deferred_to_columns()
                        if only_load:
                            db_table = self.query.model._meta.db_table
                            fields = [f for f in fields if db_table in only_load and f.column in only_load[db_table]]
                    row = self.resolve_columns(row, fields)

                if has_aggregate_select:
                    aggregate_start = len(self.query.extra_select) + len(self.query.select)
                    aggregate_end = aggregate_start + len(self.query.aggregate_select)
                    row = (
                        tuple(row[:aggregate_start])
                        + tuple(
                            [
                                self.query.resolve_aggregate(value, aggregate, self.connection)
                                for (alias, aggregate), value in zip(
                                    self.query.aggregate_select.items(), row[aggregate_start:aggregate_end]
                                )
                            ]
                        )
                        + tuple(row[aggregate_end:])
                    )

                yield row
Example #19
0
def copy_plugins_to(old_plugins, to_placeholder,
                    to_language=None, parent_plugin_id=None, no_signals=False):
    """
    Copies a list of plugins to a placeholder to a language.
    """
    # TODO: Refactor this and copy_plugins to cleanly separate plugin tree/node
    # copying and remove the need for the mutating parameter old_parent_cache.
    old_parent_cache = {}
    # For subplugin copy, top-level plugin's parent must be nulled
    # before copying.
    if old_plugins:
        old_parent = old_plugins[0].parent
        for old_plugin in old_plugins:
            if old_plugin.parent == old_parent:
                old_plugin.parent = old_plugin.parent_id = None
    new_plugins = []
    for old in old_plugins:
        new_plugins.append(
            old.copy_plugin(to_placeholder, to_language or old.language,
                            old_parent_cache, no_signals))

    if new_plugins and parent_plugin_id:
        from cms.models import CMSPlugin
        parent_plugin = CMSPlugin.objects.get(pk=parent_plugin_id)
        for idx, plugin in enumerate(new_plugins):
            if plugin.parent_id is None:
                plugin.parent_id = parent_plugin_id
                # Always use update fields to avoid side-effects.
                # In this case "plugin" has invalid values for internal fields
                # like numchild.
                # The invalid value is only in memory because the instance
                # was never updated.
                plugin.save(update_fields=['parent'])
                new_plugins[idx] = plugin.move(parent_plugin, pos="last-child")

    plugins_ziplist = list(zip(new_plugins, old_plugins))

    # this magic is needed for advanced plugins like Text Plugins that can have
    # nested plugins and need to update their content based on the new plugins.
    for new_plugin, old_plugin in plugins_ziplist:
        new_instance = new_plugin.get_plugin_instance()[0]
        if new_instance:
            new_instance._no_reorder = True
            new_instance.post_copy(old_plugin, plugins_ziplist)

    # returns information about originals and copies
    return plugins_ziplist
Example #20
0
def previous_current_next(items):
    """
    From http://www.wordaligned.org/articles/zippy-triples-served-with-python

    Creates an iterator which returns (previous, current, next) triples,
    with ``None`` filling in when there is no previous or next
    available.
    """
    extend = itertools.chain([None], items, [None])
    prev, cur, nex = itertools.tee(extend, 3)
    try:
        next(cur)
        next(nex)
        next(nex)
    except StopIteration:
        pass
    return zip(prev, cur, nex)
Example #21
0
def previous_current_next(items):
    """
    From http://www.wordaligned.org/articles/zippy-triples-served-with-python

    Creates an iterator which returns (previous, current, next) triples,
    with ``None`` filling in when there is no previous or next
    available.
    """
    extend = itertools.chain([None], items, [None])
    prev, cur, nex = itertools.tee(extend, 3)
    # Advancing an iterator twice when we know there are two items (the
    # two Nones at the start and at the end) will never fail except if
    # `items` is some funny StopIteration-raising generator. There's no point
    # in swallowing this exception.
    next(cur)
    next(nex)
    next(nex)
    return zip(prev, cur, nex)
Example #22
0
def diff_histories(old_history, new_history):
    """Yield the entries in the diff between the old and new histories.

    Args:
        old_history (list of reviewboard.diffviewer.models.diffcommit.
                     DiffCommit):
            The list of commits from a previous
            :py:class:`~reviewboard.diffviewer.models.diffset.DiffSet`.

        new_history (list of reviewboard.diffviewer.models.diffcommit.
                     DiffCommit):
            The list of commits from the new
            :py:class:`~reviewboard.diffviewer.models.diffset.DiffSet`.

    Yields:
        CommitHistoryDiffEntry:
        The history entries.
    """
    i = 0

    # This is not quite the same as ``enumerate(...)`` because if we run out
    # of history, ``i`` will not be incremented.

    for old_commit, new_commit in zip(old_history, new_history):
        if old_commit.commit_id != new_commit.commit_id:
            break

        yield CommitHistoryDiffEntry(
            entry_type=CommitHistoryDiffEntry.COMMIT_UNMODIFIED,
            old_commit=old_commit,
            new_commit=new_commit)

        i += 1

    for old_commit in old_history[i:]:
        yield CommitHistoryDiffEntry(
            entry_type=CommitHistoryDiffEntry.COMMIT_REMOVED,
            old_commit=old_commit)

    for new_commit in new_history[i:]:
        yield CommitHistoryDiffEntry(
            entry_type=CommitHistoryDiffEntry.COMMIT_ADDED,
            new_commit=new_commit)
Example #23
0
    def build_query(self, **filters):

        applicable_filters = None
        query_param = getattr(self.backend, "query_param", None)

        value = filters.pop(query_param, None)
        if value:
            try:
                term, val = chain.from_iterable(zip(self.tokenize(value, self.view.lookup_sep)))
            except ValueError:
                raise ValueError("Cannot convert the '%s' query parameter to a valid boost filter."
                                 % query_param)
            else:
                try:
                    applicable_filters = {"term": term, "boost": float(val)}
                except ValueError:
                    raise ValueError("Cannot convert boost to float value. Make sure to provide a "
                                     "numerical boost value.")

        return applicable_filters
Example #24
0
    def _final_join_removal(self, cols, alias):
        """
        A helper method for get_distinct and get_ordering. This method will
        trim extra not-needed joins from the tail of the join chain.

        This is very similar to what is done in trim_joins, but we will
        trim LEFT JOINS here. It would be a good idea to consolidate this
        method and query.trim_joins().
        """
        if alias:
            while 1:
                join = self.query.alias_map[alias]
                lhs_cols, rhs_cols = zip(*[(lhs_col, rhs_col) for lhs_col, rhs_col in join.join_cols])
                if set(cols) != set(rhs_cols):
                    break

                cols = [lhs_cols[rhs_cols.index(col)] for col in cols]
                self.query.unref_alias(alias)
                alias = join.lhs_alias
        return cols, alias
Example #25
0
    def context(self):
        if not self.content:
            return ''

        raw = self.content.raw
        if not raw.endswith('\n'):
            raw += '\n'
        start_line = raw.count('\n', 0, self.start)
        end_line = raw.count('\n', 0, self.end)
        ctx_start_line = max(0, start_line - 2)
        ctx_end_line = min(end_line + 3, raw.count('\n'))
        digits = len(text_type(ctx_end_line))
        context_lines = raw.split('\n')[ctx_start_line:ctx_end_line]

        # Highlight the errored portion
        err_page_bits = []
        err_line_count = 1
        for p, c in enumerate(raw):  # pragma: no branch
            if c == '\n':
                err_page_bits.append(c)
                err_line_count += 1
                if err_line_count > ctx_end_line:
                    break
            elif p < self.start or p >= self.end:
                err_page_bits.append(' ')
            else:
                err_page_bits.append('^')
        err_page = ''.join(err_page_bits)
        err_lines = err_page.split('\n')[ctx_start_line:ctx_end_line]

        out = []
        for num, (line, err_line) in enumerate(zip(context_lines, err_lines)):
            lnum = ctx_start_line + num + 1
            out.append(
                text_type(lnum).rjust(digits) + ' ' + line)
            if '^' in err_line:
                out.append('*' * digits + ' ' + err_line)

        return '\n'.join(out)
Example #26
0
    def context(self):
        if not self.content:
            return ""

        raw = self.content.raw
        if not raw.endswith("\n"):
            raw += "\n"
        start_line = raw.count("\n", 0, self.start)
        end_line = raw.count("\n", 0, self.end)
        ctx_start_line = max(0, start_line - 2)
        ctx_end_line = min(end_line + 3, raw.count("\n"))
        digits = len(text_type(ctx_end_line))
        context_lines = raw.split("\n")[ctx_start_line:ctx_end_line]

        # Highlight the errored portion
        err_page_bits = []
        err_line_count = 1
        for p, c in enumerate(raw):  # pragma: no branch
            if c == "\n":
                err_page_bits.append(c)
                err_line_count += 1
                if err_line_count > ctx_end_line:
                    break
            elif p < self.start or p >= self.end:
                err_page_bits.append(" ")
            else:
                err_page_bits.append("^")
        err_page = "".join(err_page_bits)
        err_lines = err_page.split("\n")[ctx_start_line:ctx_end_line]

        out = []
        for num, (line, err_line) in enumerate(zip(context_lines, err_lines)):
            lnum = ctx_start_line + num + 1
            out.append(text_type(lnum).rjust(digits) + " " + line)
            if "^" in err_line:
                out.append("*" * digits + " " + err_line)

        return "\n".join(out)
Example #27
0
    def __init__(self, *args, **kwargs):
        signals.pre_init.send(sender=self.__class__, args=args, kwargs=kwargs)

        # Set up the storage for instance state
        self._state = ModelState()

        # There is a rather weird disparity here; if kwargs, it's set, then args
        # overrides it. It should be one or the other; don't duplicate the work
        # The reason for the kwargs check is that standard iterator passes in by
        # args, and instantiation for iteration is 33% faster.
        args_len = len(args)
        if args_len > len(self._meta.concrete_fields):
            # Daft, but matches old exception sans the err msg.
            raise IndexError("Number of args exceeds number of fields")

        if not kwargs:
            fields_iter = iter(self._meta.concrete_fields)
            # The ordering of the zip calls matter - zip throws StopIteration
            # when an iter throws it. So if the first iter throws it, the second
            # is *not* consumed. We rely on this, so don't change the order
            # without changing the logic.
            for val, field in zip(args, fields_iter):
                setattr(self, field.attname, val)
        else:
            # Slower, kwargs-ready version.
            fields_iter = iter(self._meta.fields)
            for val, field in zip(args, fields_iter):
                setattr(self, field.attname, val)
                kwargs.pop(field.name, None)
                # Maintain compatibility with existing calls.
                if isinstance(field.rel, ManyToOneRel):
                    kwargs.pop(field.attname, None)

        # Now we're left with the unprocessed fields that *must* come from
        # keywords, or default.

        for field in fields_iter:
            is_related_object = False
            # This slightly odd construct is so that we can access any
            # data-descriptor object (DeferredAttribute) without triggering its
            # __get__ method.
            if (field.attname not in kwargs and
                    (isinstance(self.__class__.__dict__.get(field.attname), DeferredAttribute)
                     or field.column is None)):
                # This field will be populated on request.
                continue
            if kwargs:
                if isinstance(field.rel, ForeignObjectRel):
                    try:
                        # Assume object instance was passed in.
                        rel_obj = kwargs.pop(field.name)
                        is_related_object = True
                    except KeyError:
                        try:
                            # Object instance wasn't passed in -- must be an ID.
                            val = kwargs.pop(field.attname)
                        except KeyError:
                            val = field.get_default()
                    else:
                        # Object instance was passed in. Special case: You can
                        # pass in "None" for related objects if it's allowed.
                        if rel_obj is None and field.null:
                            val = None
                else:
                    try:
                        val = kwargs.pop(field.attname)
                    except KeyError:
                        # This is done with an exception rather than the
                        # default argument on pop because we don't want
                        # get_default() to be evaluated, and then not used.
                        # Refs #12057.
                        val = field.get_default()
            else:
                val = field.get_default()

            if is_related_object:
                # If we are passed a related instance, set it using the
                # field.name instead of field.attname (e.g. "user" instead of
                # "user_id") so that the object gets properly cached (and type
                # checked) by the RelatedObjectDescriptor.
                setattr(self, field.name, rel_obj)
            else:
                setattr(self, field.attname, val)

        if kwargs:
            for prop in list(kwargs):
                try:
                    if isinstance(getattr(self.__class__, prop), property):
                        setattr(self, prop, kwargs.pop(prop))
                except AttributeError:
                    pass
            if kwargs:
                raise TypeError("'%s' is an invalid keyword argument for this function" % list(kwargs)[0])
        super(Model, self).__init__()
        signals.post_init.send(sender=self.__class__, instance=self)
Example #28
0
def _ogrinspect(data_source, model_name, geom_name='geom', layer_key=0, srid=None,
                multi_geom=False, name_field=None, imports=True,
                decimal=False, blank=False, null=False):
    """
    Helper routine for `ogrinspect` that generates GeoDjango models corresponding
    to the given data source.  See the `ogrinspect` docstring for more details.
    """
    # Getting the DataSource
    if isinstance(data_source, six.string_types):
        data_source = DataSource(data_source)
    elif isinstance(data_source, DataSource):
        pass
    else:
        raise TypeError('Data source parameter must be a string or a DataSource object.')

    # Getting the layer corresponding to the layer key and getting
    # a string listing of all OGR fields in the Layer.
    layer = data_source[layer_key]
    ogr_fields = layer.fields

    # Creating lists from the `null`, `blank`, and `decimal`
    # keyword arguments.
    def process_kwarg(kwarg):
        if isinstance(kwarg, (list, tuple)):
            return [s.lower() for s in kwarg]
        elif kwarg:
            return [s.lower() for s in ogr_fields]
        else:
            return []

    null_fields = process_kwarg(null)
    blank_fields = process_kwarg(blank)
    decimal_fields = process_kwarg(decimal)

    # Gets the `null` and `blank` keywords for the given field name.
    def get_kwargs_str(field_name):
        kwlist = []
        if field_name.lower() in null_fields:
            kwlist.append('null=True')
        if field_name.lower() in blank_fields:
            kwlist.append('blank=True')
        if kwlist:
            return ', ' + ', '.join(kwlist)
        else:
            return ''

    # For those wishing to disable the imports.
    if imports:
        yield '# This is an auto-generated Django model module created by ogrinspect.'
        yield 'from django.contrib.gis.db import models'
        yield ''

    yield 'class %s(models.Model):' % model_name

    for field_name, width, precision, field_type in zip(
            ogr_fields, layer.field_widths, layer.field_precisions, layer.field_types):
        # The model field name.
        mfield = field_name.lower()
        if mfield[-1:] == '_':
            mfield += 'field'

        # Getting the keyword args string.
        kwargs_str = get_kwargs_str(field_name)

        if field_type is OFTReal:
            # By default OFTReals are mapped to `FloatField`, however, they
            # may also be mapped to `DecimalField` if specified in the
            # `decimal` keyword.
            if field_name.lower() in decimal_fields:
                yield '    %s = models.DecimalField(max_digits=%d, decimal_places=%d%s)' % (
                    mfield, width, precision, kwargs_str
                )
            else:
                yield '    %s = models.FloatField(%s)' % (mfield, kwargs_str[2:])
        elif field_type is OFTInteger:
            yield '    %s = models.IntegerField(%s)' % (mfield, kwargs_str[2:])
        elif field_type is OFTInteger64:
            yield '    %s = models.BigIntegerField(%s)' % (mfield, kwargs_str[2:])
        elif field_type is OFTString:
            yield '    %s = models.CharField(max_length=%s%s)' % (mfield, width, kwargs_str)
        elif field_type is OFTDate:
            yield '    %s = models.DateField(%s)' % (mfield, kwargs_str[2:])
        elif field_type is OFTDateTime:
            yield '    %s = models.DateTimeField(%s)' % (mfield, kwargs_str[2:])
        elif field_type is OFTTime:
            yield '    %s = models.TimeField(%s)' % (mfield, kwargs_str[2:])
        else:
            raise TypeError('Unknown field type %s in %s' % (field_type, mfield))

    # TODO: Autodetection of multigeometry types (see #7218).
    gtype = layer.geom_type
    if multi_geom:
        gtype.to_multi()
    geom_field = gtype.django

    # Setting up the SRID keyword string.
    if srid is None:
        if layer.srs is None:
            srid_str = 'srid=-1'
        else:
            srid = layer.srs.srid
            if srid is None:
                srid_str = 'srid=-1'
            elif srid == 4326:
                # WGS84 is already the default.
                srid_str = ''
            else:
                srid_str = 'srid=%s' % srid
    else:
        srid_str = 'srid=%s' % srid

    yield '    %s = models.%s(%s)' % (geom_name, geom_field, srid_str)

    if name_field:
        yield ''
        yield '    def __%s__(self): return self.%s' % (
            'str' if six.PY3 else 'unicode', name_field)
Example #29
0
def normalize(pattern):
    """
    Given a reg-exp pattern, normalizes it to an iterable of forms that
    suffice for reverse matching. This does the following:

    (1) For any repeating sections, keeps the minimum number of occurrences
        permitted (this means zero for optional groups).
    (2) If an optional group includes parameters, include one occurrence of
        that group (along with the zero occurrence case from step (1)).
    (3) Select the first (essentially an arbitrary) element from any character
        class. Select an arbitrary character for any unordered class (e.g. '.'
        or '\w') in the pattern.
    (5) Ignore comments and any of the reg-exp flags that won't change
        what we construct ("iLmsu"). "(?x)" is an error, however.
    (6) Raise an error on all other non-capturing (?...) forms (e.g.
        look-ahead and look-behind matches) and any disjunctive ('|')
        constructs.

    Django's URLs for forward resolving are either all positional arguments or
    all keyword arguments. That is assumed here, as well. Although reverse
    resolving can be done using positional args when keyword args are
    specified, the two cannot be mixed in the same reverse() call.
    """
    # Do a linear scan to work out the special features of this pattern. The
    # idea is that we scan once here and collect all the information we need to
    # make future decisions.
    result = []
    non_capturing_groups = []
    consume_next = True
    pattern_iter = next_char(iter(pattern))
    num_args = 0

    # A "while" loop is used here because later on we need to be able to peek
    # at the next character and possibly go around without consuming another
    # one at the top of the loop.
    try:
        ch, escaped = next(pattern_iter)
    except StopIteration:
        return [('', [])]

    try:
        while True:
            if escaped:
                result.append(ch)
            elif ch == '.':
                # Replace "any character" with an arbitrary representative.
                result.append(".")
            elif ch == '|':
                # FIXME: One day we'll should do this, but not in 1.0.
                raise NotImplementedError('Awaiting Implementation')
            elif ch == "^":
                pass
            elif ch == '$':
                break
            elif ch == ')':
                # This can only be the end of a non-capturing group, since all
                # other unescaped parentheses are handled by the grouping
                # section later (and the full group is handled there).
                #
                # We regroup everything inside the capturing group so that it
                # can be quantified, if necessary.
                start = non_capturing_groups.pop()
                inner = NonCapture(result[start:])
                result = result[:start] + [inner]
            elif ch == '[':
                # Replace ranges with the first character in the range.
                ch, escaped = next(pattern_iter)
                result.append(ch)
                ch, escaped = next(pattern_iter)
                while escaped or ch != ']':
                    ch, escaped = next(pattern_iter)
            elif ch == '(':
                # Some kind of group.
                ch, escaped = next(pattern_iter)
                if ch != '?' or escaped:
                    # A positional group
                    name = "_%d" % num_args
                    num_args += 1
                    result.append(Group((("%%(%s)s" % name), name)))
                    walk_to_end(ch, pattern_iter)
                else:
                    ch, escaped = next(pattern_iter)
                    if ch in "iLmsu#":
                        # All of these are ignorable. Walk to the end of the
                        # group.
                        walk_to_end(ch, pattern_iter)
                    elif ch == ':':
                        # Non-capturing group
                        non_capturing_groups.append(len(result))
                    elif ch != 'P':
                        # Anything else, other than a named group, is something
                        # we cannot reverse.
                        raise ValueError("Non-reversible reg-exp portion: '(?%s'" % ch)
                    else:
                        ch, escaped = next(pattern_iter)
                        if ch not in ('<', '='):
                            raise ValueError("Non-reversible reg-exp portion: '(?P%s'" % ch)
                        # We are in a named capturing group. Extra the name and
                        # then skip to the end.
                        if ch == '<':
                            terminal_char = '>'
                        # We are in a named backreference.
                        else:
                            terminal_char = ')'
                        name = []
                        ch, escaped = next(pattern_iter)
                        while ch != terminal_char:
                            name.append(ch)
                            ch, escaped = next(pattern_iter)
                        param = ''.join(name)
                        # Named backreferences have already consumed the
                        # parenthesis.
                        if terminal_char != ')':
                            result.append(Group((("%%(%s)s" % param), param)))
                            walk_to_end(ch, pattern_iter)
                        else:
                            result.append(Group((("%%(%s)s" % param), None)))
            elif ch in "*?+{":
                # Quanitifers affect the previous item in the result list.
                count, ch = get_quantifier(ch, pattern_iter)
                if ch:
                    # We had to look ahead, but it wasn't need to compute the
                    # quanitifer, so use this character next time around the
                    # main loop.
                    consume_next = False

                if count == 0:
                    if contains(result[-1], Group):
                        # If we are quantifying a capturing group (or
                        # something containing such a group) and the minimum is
                        # zero, we must also handle the case of one occurrence
                        # being present. All the quantifiers (except {0,0},
                        # which we conveniently ignore) that have a 0 minimum
                        # also allow a single occurrence.
                        result[-1] = Choice([None, result[-1]])
                    else:
                        result.pop()
                elif count > 1:
                    result.extend([result[-1]] * (count - 1))
            else:
                # Anything else is a literal.
                result.append(ch)

            if consume_next:
                ch, escaped = next(pattern_iter)
            else:
                consume_next = True
    except StopIteration:
        pass
    except NotImplementedError:
        # A case of using the disjunctive form. No results for you!
        return [('', [])]

    return list(zip(*flatten_result(result)))
Example #30
0
def _ogrinspect(
    data_source,
    model_name,
    geom_name="geom",
    layer_key=0,
    srid=None,
    multi_geom=False,
    name_field=None,
    imports=True,
    decimal=False,
    blank=False,
    null=False,
):
    """
    Helper routine for `ogrinspect` that generates GeoDjango models corresponding
    to the given data source.  See the `ogrinspect` docstring for more details.
    """
    # Getting the DataSource
    if isinstance(data_source, six.string_types):
        data_source = DataSource(data_source)
    elif isinstance(data_source, DataSource):
        pass
    else:
        raise TypeError("Data source parameter must be a string or a DataSource object.")

    # Getting the layer corresponding to the layer key and getting
    # a string listing of all OGR fields in the Layer.
    layer = data_source[layer_key]
    ogr_fields = layer.fields

    # Creating lists from the `null`, `blank`, and `decimal`
    # keyword arguments.
    def process_kwarg(kwarg):
        if isinstance(kwarg, (list, tuple)):
            return [s.lower() for s in kwarg]
        elif kwarg:
            return [s.lower() for s in ogr_fields]
        else:
            return []

    null_fields = process_kwarg(null)
    blank_fields = process_kwarg(blank)
    decimal_fields = process_kwarg(decimal)

    # Gets the `null` and `blank` keywords for the given field name.
    def get_kwargs_str(field_name):
        kwlist = []
        if field_name.lower() in null_fields:
            kwlist.append("null=True")
        if field_name.lower() in blank_fields:
            kwlist.append("blank=True")
        if kwlist:
            return ", " + ", ".join(kwlist)
        else:
            return ""

    # For those wishing to disable the imports.
    if imports:
        yield "# This is an auto-generated Django model module created by ogrinspect."
        yield "from django.contrib.gis.db import models"
        yield ""

    yield "class %s(models.Model):" % model_name

    for field_name, width, precision, field_type in zip(
        ogr_fields, layer.field_widths, layer.field_precisions, layer.field_types
    ):
        # The model field name.
        mfield = field_name.lower()
        if mfield[-1:] == "_":
            mfield += "field"

        # Getting the keyword args string.
        kwargs_str = get_kwargs_str(field_name)

        if field_type is OFTReal:
            # By default OFTReals are mapped to `FloatField`, however, they
            # may also be mapped to `DecimalField` if specified in the
            # `decimal` keyword.
            if field_name.lower() in decimal_fields:
                yield "    %s = models.DecimalField(max_digits=%d, decimal_places=%d%s)" % (
                    mfield,
                    width,
                    precision,
                    kwargs_str,
                )
            else:
                yield "    %s = models.FloatField(%s)" % (mfield, kwargs_str[2:])
        elif field_type is OFTInteger:
            yield "    %s = models.IntegerField(%s)" % (mfield, kwargs_str[2:])
        elif field_type is OFTString:
            yield "    %s = models.CharField(max_length=%s%s)" % (mfield, width, kwargs_str)
        elif field_type is OFTDate:
            yield "    %s = models.DateField(%s)" % (mfield, kwargs_str[2:])
        elif field_type is OFTDateTime:
            yield "    %s = models.DateTimeField(%s)" % (mfield, kwargs_str[2:])
        elif field_type is OFTTime:
            yield "    %s = models.TimeField(%s)" % (mfield, kwargs_str[2:])
        else:
            raise TypeError("Unknown field type %s in %s" % (field_type, mfield))

    # TODO: Autodetection of multigeometry types (see #7218).
    gtype = layer.geom_type
    if multi_geom:
        gtype.to_multi()
    geom_field = gtype.django

    # Setting up the SRID keyword string.
    if srid is None:
        if layer.srs is None:
            srid_str = "srid=-1"
        else:
            srid = layer.srs.srid
            if srid is None:
                srid_str = "srid=-1"
            elif srid == 4326:
                # WGS84 is already the default.
                srid_str = ""
            else:
                srid_str = "srid=%s" % srid
    else:
        srid_str = "srid=%s" % srid

    yield "    %s = models.%s(%s)" % (geom_name, geom_field, srid_str)
    yield "    objects = models.GeoManager()"

    if name_field:
        yield ""
        yield "    def __%s__(self): return self.%s" % ("str" if six.PY3 else "unicode", name_field)
Example #31
0
    def get_columns(self, with_aliases=False):
        """
        Return the list of columns to use in the select statement. If no
        columns have been specified, returns all columns relating to fields in
        the model.

        If 'with_aliases' is true, any column names that are duplicated
        (without the table names) are given unique aliases. This is needed in
        some cases to avoid ambiguitity with nested queries.

        This routine is overridden from Query to handle customized selection of
        geometry columns.
        """
        qn = self.quote_name_unless_alias
        qn2 = self.connection.ops.quote_name
        result = ['(%s) AS %s' % (self.get_extra_select_format(alias) % col[0], qn2(alias))
                  for alias, col in six.iteritems(self.query.extra_select)]
        aliases = set(self.query.extra_select.keys())
        if with_aliases:
            col_aliases = aliases.copy()
        else:
            col_aliases = set()
        if self.query.select:
            only_load = self.deferred_to_columns()
            # This loop customized for GeoQuery.
            for col, field in zip(self.query.select, self.query.select_fields):
                if isinstance(col, (list, tuple)):
                    alias, column = col
                    table = self.query.alias_map[alias].table_name
                    if table in only_load and column not in only_load[table]:
                        continue
                    r = self.get_field_select(field, alias, column)
                    if with_aliases:
                        if col[1] in col_aliases:
                            c_alias = 'Col%d' % len(col_aliases)
                            result.append('%s AS %s' % (r, c_alias))
                            aliases.add(c_alias)
                            col_aliases.add(c_alias)
                        else:
                            result.append('%s AS %s' % (r, qn2(col[1])))
                            aliases.add(r)
                            col_aliases.add(col[1])
                    else:
                        result.append(r)
                        aliases.add(r)
                        col_aliases.add(col[1])
                else:
                    result.append(col.as_sql(qn, self.connection))

                    if hasattr(col, 'alias'):
                        aliases.add(col.alias)
                        col_aliases.add(col.alias)

        elif self.query.default_cols:
            cols, new_aliases = self.get_default_columns(with_aliases,
                    col_aliases)
            result.extend(cols)
            aliases.update(new_aliases)

        max_name_length = self.connection.ops.max_name_length()
        result.extend([
                '%s%s' % (
                    self.get_extra_select_format(alias) % aggregate.as_sql(qn, self.connection),
                    alias is not None
                        and ' AS %s' % qn(truncate_name(alias, max_name_length))
                        or ''
                    )
                for alias, aggregate in self.query.aggregate_select.items()
        ])

        # This loop customized for GeoQuery.
        for (table, col), field in zip(self.query.related_select_cols, self.query.related_select_fields):
            r = self.get_field_select(field, table, col)
            if with_aliases and col in col_aliases:
                c_alias = 'Col%d' % len(col_aliases)
                result.append('%s AS %s' % (r, c_alias))
                aliases.add(c_alias)
                col_aliases.add(c_alias)
            else:
                result.append(r)
                aliases.add(r)
                col_aliases.add(col)

        self._select_aliases = aliases
        return result
Example #32
0
    def get_columns(self, with_aliases=False):
        """
        Return the list of columns to use in the select statement. If no
        columns have been specified, returns all columns relating to fields in
        the model.

        If 'with_aliases' is true, any column names that are duplicated
        (without the table names) are given unique aliases. This is needed in
        some cases to avoid ambiguitity with nested queries.

        This routine is overridden from Query to handle customized selection of
        geometry columns.
        """
        qn = self.quote_name_unless_alias
        qn2 = self.connection.ops.quote_name
        result = [
            '(%s) AS %s' %
            (self.get_extra_select_format(alias) % col[0], qn2(alias))
            for alias, col in six.iteritems(self.query.extra_select)
        ]
        aliases = set(self.query.extra_select.keys())
        if with_aliases:
            col_aliases = aliases.copy()
        else:
            col_aliases = set()
        if self.query.select:
            only_load = self.deferred_to_columns()
            # This loop customized for GeoQuery.
            for col, field in zip(self.query.select, self.query.select_fields):
                if isinstance(col, (list, tuple)):
                    alias, column = col
                    table = self.query.alias_map[alias].table_name
                    if table in only_load and column not in only_load[table]:
                        continue
                    r = self.get_field_select(field, alias, column)
                    if with_aliases:
                        if col[1] in col_aliases:
                            c_alias = 'Col%d' % len(col_aliases)
                            result.append('%s AS %s' % (r, c_alias))
                            aliases.add(c_alias)
                            col_aliases.add(c_alias)
                        else:
                            result.append('%s AS %s' % (r, qn2(col[1])))
                            aliases.add(r)
                            col_aliases.add(col[1])
                    else:
                        result.append(r)
                        aliases.add(r)
                        col_aliases.add(col[1])
                else:
                    result.append(col.as_sql(qn, self.connection))

                    if hasattr(col, 'alias'):
                        aliases.add(col.alias)
                        col_aliases.add(col.alias)

        elif self.query.default_cols:
            cols, new_aliases = self.get_default_columns(
                with_aliases, col_aliases)
            result.extend(cols)
            aliases.update(new_aliases)

        max_name_length = self.connection.ops.max_name_length()
        result.extend([
            '%s%s' %
            (self.get_extra_select_format(alias) %
             aggregate.as_sql(qn, self.connection), alias is not None
             and ' AS %s' % qn(truncate_name(alias, max_name_length)) or '')
            for alias, aggregate in self.query.aggregate_select.items()
        ])

        # This loop customized for GeoQuery.
        for (table, col), field in zip(self.query.related_select_cols,
                                       self.query.related_select_fields):
            r = self.get_field_select(field, table, col)
            if with_aliases and col in col_aliases:
                c_alias = 'Col%d' % len(col_aliases)
                result.append('%s AS %s' % (r, c_alias))
                aliases.add(c_alias)
                col_aliases.add(c_alias)
            else:
                result.append(r)
                aliases.add(r)
                col_aliases.add(col)

        self._select_aliases = aliases
        return result
Example #33
0
    def handle_merge(self, loader, conflicts):
        """
        Handles merging together conflicted migrations interactively,
        if it's safe; otherwise, advises on how to fix it.
        """
        if self.interactive:
            questioner = InteractiveMigrationQuestioner()
        else:
            questioner = MigrationQuestioner(defaults={'ask_merge': True})

        for app_label, migration_names in conflicts.items():
            # Grab out the migrations in question, and work out their
            # common ancestor.
            merge_migrations = []
            for migration_name in migration_names:
                migration = loader.get_migration(app_label, migration_name)
                migration.ancestry = [
                    mig for mig in loader.graph.forwards_plan((app_label,
                                                               migration_name))
                    if mig[0] == migration.app_label
                ]
                merge_migrations.append(migration)

            def all_items_equal(seq):
                return all(item == seq[0] for item in seq[1:])

            merge_migrations_generations = zip(
                *[m.ancestry for m in merge_migrations])
            common_ancestor_count = sum(
                1 for common_ancestor_generation in takewhile(
                    all_items_equal, merge_migrations_generations))
            if not common_ancestor_count:
                raise ValueError("Could not find common ancestor of %s" %
                                 migration_names)
            # Now work out the operations along each divergent branch
            for migration in merge_migrations:
                migration.branch = migration.ancestry[common_ancestor_count:]
                migrations_ops = (loader.get_migration(node_app,
                                                       node_name).operations
                                  for node_app, node_name in migration.branch)
                migration.merged_operations = sum(migrations_ops, [])
            # In future, this could use some of the Optimizer code
            # (can_optimize_through) to automatically see if they're
            # mergeable. For now, we always just prompt the user.
            if self.verbosity > 0:
                self.stdout.write(
                    self.style.MIGRATE_HEADING("Merging %s" % app_label))
                for migration in merge_migrations:
                    self.stdout.write(
                        self.style.MIGRATE_LABEL("  Branch %s" %
                                                 migration.name))
                    for operation in migration.merged_operations:
                        self.stdout.write("    - %s\n" % operation.describe())
            if questioner.ask_merge(app_label):
                # If they still want to merge it, then write out an empty
                # file depending on the migrations needing merging.
                numbers = [
                    MigrationAutodetector.parse_number(migration.name)
                    for migration in merge_migrations
                ]
                try:
                    biggest_number = max(x for x in numbers if x is not None)
                except ValueError:
                    biggest_number = 1
                subclass = type(
                    "Migration", (Migration, ), {
                        "dependencies": [(app_label, migration.name)
                                         for migration in merge_migrations],
                    })
                migration_name = "%04i_%s" % (
                    biggest_number + 1, self.migration_name or
                    ("merge_%s" % get_migration_name_timestamp()))
                new_migration = subclass(migration_name, app_label)
                writer = MigrationWriter(new_migration)

                if not self.dry_run:
                    # Write the merge migrations file to the disk
                    with io.open(writer.path, "w", encoding='utf-8') as fh:
                        fh.write(writer.as_string())
                    if self.verbosity > 0:
                        self.stdout.write("\nCreated new merge migration %s" %
                                          writer.path)
                elif self.verbosity == 3:
                    # Alternatively, makemigrations --merge --dry-run --verbosity 3
                    # will output the merge migrations to stdout rather than saving
                    # the file to the disk.
                    self.stdout.write(
                        self.style.MIGRATE_HEADING(
                            "Full merge migrations file '%s':" %
                            writer.filename) + "\n")
                    self.stdout.write("%s\n" % writer.as_string())
Example #34
0
    def __init__(self, *args, **kwargs):
        signals.pre_init.send(sender=self.__class__, args=args, kwargs=kwargs)

        # Set up the storage for instance state
        self._state = ModelState()

        # There is a rather weird disparity here; if kwargs, it's set, then args
        # overrides it. It should be one or the other; don't duplicate the work
        # The reason for the kwargs check is that standard iterator passes in by
        # args, and instantiation for iteration is 33% faster.
        args_len = len(args)
        if args_len > len(self._meta.fields):
            # Daft, but matches old exception sans the err msg.
            raise IndexError("Number of args exceeds number of fields")

        fields_iter = iter(self._meta.fields)
        if not kwargs:
            # The ordering of the zip calls matter - zip throws StopIteration
            # when an iter throws it. So if the first iter throws it, the second
            # is *not* consumed. We rely on this, so don't change the order
            # without changing the logic.
            for val, field in zip(args, fields_iter):
                setattr(self, field.attname, val)
        else:
            # Slower, kwargs-ready version.
            for val, field in zip(args, fields_iter):
                setattr(self, field.attname, val)
                kwargs.pop(field.name, None)
                # Maintain compatibility with existing calls.
                if isinstance(field.rel, ManyToOneRel):
                    kwargs.pop(field.attname, None)

        # Now we're left with the unprocessed fields that *must* come from
        # keywords, or default.

        for field in fields_iter:
            is_related_object = False
            # This slightly odd construct is so that we can access any
            # data-descriptor object (DeferredAttribute) without triggering its
            # __get__ method.
            if (field.attname not in kwargs
                    and isinstance(self.__class__.__dict__.get(field.attname),
                                   DeferredAttribute)):
                # This field will be populated on request.
                continue
            if kwargs:
                if isinstance(field.rel, ManyToOneRel):
                    try:
                        # Assume object instance was passed in.
                        rel_obj = kwargs.pop(field.name)
                        is_related_object = True
                    except KeyError:
                        try:
                            # Object instance wasn't passed in -- must be an ID.
                            val = kwargs.pop(field.attname)
                        except KeyError:
                            val = field.get_default()
                    else:
                        # Object instance was passed in. Special case: You can
                        # pass in "None" for related objects if it's allowed.
                        if rel_obj is None and field.null:
                            val = None
                else:
                    try:
                        val = kwargs.pop(field.attname)
                    except KeyError:
                        # This is done with an exception rather than the
                        # default argument on pop because we don't want
                        # get_default() to be evaluated, and then not used.
                        # Refs #12057.
                        val = field.get_default()
            else:
                val = field.get_default()
            if is_related_object:
                # If we are passed a related instance, set it using the
                # field.name instead of field.attname (e.g. "user" instead of
                # "user_id") so that the object gets properly cached (and type
                # checked) by the RelatedObjectDescriptor.
                setattr(self, field.name, rel_obj)
            else:
                setattr(self, field.attname, val)

        if kwargs:
            for prop in list(kwargs):
                try:
                    if isinstance(getattr(self.__class__, prop), property):
                        setattr(self, prop, kwargs.pop(prop))
                except AttributeError:
                    pass
            if kwargs:
                raise TypeError(
                    "'%s' is an invalid keyword argument for this function" %
                    list(kwargs)[0])
        super(Model, self).__init__()
        signals.post_init.send(sender=self.__class__, instance=self)
Example #35
0
    def fill_related_selections(self, opts=None, root_alias=None, cur_depth=1,
            requested=None, restricted=None, nullable=None):
        """
        Fill in the information needed for a select_related query. The current
        depth is measured as the number of connections away from the root model
        (for example, cur_depth=1 means we are looking at models with direct
        connections to the root model).
        """
        if not restricted and self.query.max_depth and cur_depth > self.query.max_depth:
            # We've recursed far enough; bail out.
            return

        if not opts:
            opts = self.query.get_meta()
            root_alias = self.query.get_initial_alias()
            self.query.related_select_cols = []
        only_load = self.query.get_loaded_field_names()

        # Setup for the case when only particular related fields should be
        # included in the related selection.
        if requested is None:
            if isinstance(self.query.select_related, dict):
                requested = self.query.select_related
                restricted = True
            else:
                restricted = False

        for f, model in opts.get_fields_with_model():
            # The get_fields_with_model() returns None for fields that live
            # in the field's local model. So, for those fields we want to use
            # the f.model - that is the field's local model.
            field_model = model or f.model
            if not select_related_descend(f, restricted, requested,
                                          only_load.get(field_model)):
                continue
            table = f.rel.to._meta.db_table
            promote = nullable or f.null
            alias = self.query.join_parent_model(opts, model, root_alias, {})
            join_cols = f.get_joining_columns()
            alias = self.query.join((alias, table, join_cols),
                    outer_if_first=promote, join_field=f)
            columns, aliases = self.get_default_columns(start_alias=alias,
                    opts=f.rel.to._meta, as_pairs=True)
            self.query.related_select_cols.extend(
                SelectInfo(col, field) for col, field in zip(columns, f.rel.to._meta.concrete_fields))
            if restricted:
                next = requested.get(f.name, {})
            else:
                next = False
            new_nullable = f.null or promote
            self.fill_related_selections(f.rel.to._meta, alias, cur_depth + 1,
                    next, restricted, new_nullable)

        if restricted:
            related_fields = [
                (o.field, o.model)
                for o in opts.get_all_related_objects()
                if o.field.unique
            ]
            for f, model in related_fields:
                if not select_related_descend(f, restricted, requested,
                                              only_load.get(model), reverse=True):
                    continue

                alias = self.query.join_parent_model(opts, f.rel.to, root_alias, {})
                table = model._meta.db_table
                alias = self.query.join(
                    (alias, table, f.get_joining_columns(reverse_join=True)),
                    outer_if_first=True, join_field=f
                )
                from_parent = (opts.model if issubclass(model, opts.model)
                               else None)
                columns, aliases = self.get_default_columns(start_alias=alias,
                    opts=model._meta, as_pairs=True, from_parent=from_parent)
                self.query.related_select_cols.extend(
                    SelectInfo(col, field) for col, field
                    in zip(columns, model._meta.concrete_fields))
                next = requested.get(f.related_query_name(), {})
                # Use True here because we are looking at the _reverse_ side of
                # the relation, which is always nullable.
                new_nullable = True

                self.fill_related_selections(model._meta, table, cur_depth+1,
                    next, restricted, new_nullable)
Example #36
0
    def _create_formsets(self, request, obj, change):
        orig_formsets, orig_inline_instances = (super(NestedModelAdminMixin,
                                                      self)._create_formsets(
                                                          request, obj,
                                                          change))

        formsets = []
        inline_instances = []
        prefixes = {}

        for formset, inline_instance in zip(orig_formsets,
                                            orig_inline_instances):
            if not hasattr(formset, 'nesting_depth'):
                formset.nesting_depth = 1

            formsets.append(formset)
            inline_instances.append(inline_instance)

            if getattr(inline_instance, 'inlines', []):
                inlines_and_formsets = [
                    (nested, formset)
                    for nested in inline_instance.get_inline_instances(request)
                ]
                i = 0
                while i < len(inlines_and_formsets):
                    nested, formset = inlines_and_formsets[i]
                    i += 1
                    formset_forms = list(formset.forms) + [None]
                    for form in formset_forms:
                        if form is not None:
                            form.parent_formset = formset
                            form_prefix = form.prefix
                            form_obj = form.instance
                        else:
                            form_prefix = formset.add_prefix('empty')
                            form_obj = None
                        InlineFormSet = nested.get_formset(request, form_obj)
                        prefix = '%s-%s' % (form_prefix,
                                            InlineFormSet.get_default_prefix())
                        prefixes[prefix] = prefixes.get(prefix, 0) + 1
                        if prefixes[prefix] != 1:
                            prefix = "%s-%s" % (prefix, prefixes[prefix])

                        formset_params = {
                            'instance': form_obj,
                            'prefix': prefix,
                            'queryset': nested.get_queryset(request),
                        }
                        if request.method == 'POST':
                            formset_params.update({
                                'data':
                                request.POST,
                                'files':
                                request.FILES,
                                'save_as_new':
                                '_saveasnew' in request.POST
                            })

                        nested_formset = InlineFormSet(**formset_params)
                        # We set `is_nested` to True so that we have a way
                        # to identify this formset as such and skip it if
                        # there is an error in the POST and we have to create
                        # inline admin formsets.
                        nested_formset.is_nested = True
                        nested_formset.nesting_depth = formset.nesting_depth + 1
                        nested_formset.parent_form = form

                        if form is None:
                            obj = formset
                        else:
                            obj = form
                            if request.method == 'POST':
                                formsets.append(nested_formset)
                                inline_instances.append(nested)
                        obj.nested_formsets = getattr(obj, 'nested_formsets',
                                                      None) or []
                        obj.nested_inlines = getattr(obj, 'nested_inlines',
                                                     None) or []
                        obj.nested_formsets.append(nested_formset)
                        obj.nested_inlines.append(nested)

                        if hasattr(nested, 'get_inline_instances'):
                            inlines_and_formsets += [
                                (nested_nested, nested_formset)
                                for nested_nested in
                                nested.get_inline_instances(request)
                            ]
        return formsets, inline_instances
Example #37
0
    def fill_related_selections(self, opts=None, root_alias=None, cur_depth=1,
            requested=None, restricted=None, nullable=None):
        """
        Fill in the information needed for a select_related query. The current
        depth is measured as the number of connections away from the root model
        (for example, cur_depth=1 means we are looking at models with direct
        connections to the root model).
        """
        if not restricted and self.query.max_depth and cur_depth > self.query.max_depth:
            # We've recursed far enough; bail out.
            return

        if not opts:
            opts = self.query.get_meta()
            root_alias = self.query.get_initial_alias()
            self.query.related_select_cols = []
        only_load = self.query.get_loaded_field_names()

        # Setup for the case when only particular related fields should be
        # included in the related selection.
        if requested is None:
            if isinstance(self.query.select_related, dict):
                requested = self.query.select_related
                restricted = True
            else:
                restricted = False

        for f, model in opts.get_fields_with_model():
            # The get_fields_with_model() returns None for fields that live
            # in the field's local model. So, for those fields we want to use
            # the f.model - that is the field's local model.
            field_model = model or f.model
            if not select_related_descend(f, restricted, requested,
                                          only_load.get(field_model)):
                continue
            table = f.rel.to._meta.db_table
            promote = nullable or f.null
            alias = self.query.join_parent_model(opts, model, root_alias, {})

            alias = self.query.join((alias, table, f.column,
                    f.rel.get_related_field().column),
                    promote=promote, join_field=f)
            columns, aliases = self.get_default_columns(start_alias=alias,
                    opts=f.rel.to._meta, as_pairs=True)
            self.query.related_select_cols.extend(
                SelectInfo(col, field) for col, field in zip(columns, f.rel.to._meta.fields))
            if restricted:
                next = requested.get(f.name, {})
            else:
                next = False
            new_nullable = f.null or promote
            self.fill_related_selections(f.rel.to._meta, alias, cur_depth + 1,
                    next, restricted, new_nullable)

        if restricted:
            related_fields = [
                (o.field, o.model)
                for o in opts.get_all_related_objects()
                if o.field.unique
            ]
            for f, model in related_fields:
                if not select_related_descend(f, restricted, requested,
                                              only_load.get(model), reverse=True):
                    continue

                alias = self.query.join_parent_model(opts, f.rel.to, root_alias, {})
                table = model._meta.db_table
                alias = self.query.join(
                    (alias, table, f.rel.get_related_field().column, f.column),
                    promote=True, join_field=f
                )
                from_parent = (opts.model if issubclass(model, opts.model)
                               else None)
                columns, aliases = self.get_default_columns(start_alias=alias,
                    opts=model._meta, as_pairs=True, from_parent=from_parent)
                self.query.related_select_cols.extend(
                    SelectInfo(col, field) for col, field
                    in zip(columns, model._meta.fields))
                next = requested.get(f.related_query_name(), {})
                # Use True here because we are looking at the _reverse_ side of
                # the relation, which is always nullable.
                new_nullable = True

                self.fill_related_selections(model._meta, table, cur_depth+1,
                    next, restricted, new_nullable)
Example #38
0
 def _test_sequence(self, response, test_fn):
     ids = self._extract_patch_ids(response)
     self.assertTrue(bool(ids))
     patches = [Patch.objects.get(id=i) for i in ids]
     pairs = list(zip(patches, patches[1:]))
     [test_fn(p1, p2) for (p1, p2) in pairs]