コード例 #1
0
ファイル: widgets.py プロジェクト: MattBlack85/django
    def merge(*lists):
        """
        Merge lists while trying to keep the relative order of the elements.
        Warn if the lists have the same elements in a different relative order.

        For static assets it can be important to have them included in the DOM
        in a certain order. In JavaScript you may not be able to reference a
        global or in CSS you might want to override a style.
        """
        dependency_graph = defaultdict(set)
        all_items = OrderedSet()
        for list_ in filter(None, lists):
            head = list_[0]
            # The first items depend on nothing but have to be part of the
            # dependency graph to be included in the result.
            dependency_graph.setdefault(head, set())
            for item in list_:
                all_items.add(item)
                # No self dependencies
                if head != item:
                    dependency_graph[item].add(head)
                head = item
        try:
            return stable_topological_sort(all_items, dependency_graph)
        except CyclicDependencyError:
            warnings.warn(
                'Detected duplicate Media files in an opposite order: {}'.format(
                    ', '.join(repr(l) for l in lists)
                ), MediaOrderConflictWarning,
            )
            return list(all_items)
コード例 #2
0
ファイル: test_datastructures.py プロジェクト: velis74/django
 def test_len(self):
     s = OrderedSet()
     self.assertEqual(len(s), 0)
     s.add(1)
     s.add(2)
     s.add(2)
     self.assertEqual(len(s), 2)
コード例 #3
0
ファイル: export.py プロジェクト: dimagi/commcare-hq
def navigation_event_ids_by_user(user, start_date=None, end_date=None):
    database = NavigationEventAudit.get_db()

    def _date_key(date):
        return [date.year, date.month, date.day]

    startkey = [user]
    if start_date:
        startkey.extend(_date_key(start_date))

    endkey = [user]
    if end_date:
        end = end_date + timedelta(days=1)
        endkey.extend(_date_key(end))
    else:
        endkey.append({})

    ids = OrderedSet()
    results = database.view(
        'auditcare/urlpath_by_user_date',
        startkey=startkey,
        endkey=endkey,
        reduce=False,
        include_docs=False,
    )
    for row in results:
        ids.add(row['id'])
    return ids
コード例 #4
0
ファイル: options.py プロジェクト: agmond/django
 def get_parent_list(self):
     """
     Return all the ancestors of this model as a list ordered by MRO.
     Useful for determining if something is an ancestor, regardless of lineage.
     """
     result = OrderedSet(self.parents)
     for parent in self.parents:
         for ancestor in parent._meta.get_parent_list():
             result.add(ancestor)
     return list(result)
コード例 #5
0
def fields_for_document(document, fields=None, exclude=None, widgets=None,
                        formfield_callback=None,
                        field_generator=MongoFormFieldGenerator):
    """
    Returns a ``OrderedSet`` containing form fields for the given model.

    ``fields`` is an optional list of field names. If provided, only the named
    fields will be included in the returned fields.

    ``exclude`` is an optional list of field names. If provided, the named
    fields will be excluded from the returned fields, even if they are listed
    in the ``fields`` argument.
    """
    field_list = []
    ignored = []
    if isinstance(field_generator, type):
        field_generator = field_generator()

    sorted_fields = sorted(document._fields.values(),
                           key=lambda field: field.creation_counter)

    for f in sorted_fields:
        if isinstance(f, ObjectIdField):
            continue
        if isinstance(f, ListField) and not (f.field.choices or isinstance(f.field, ReferenceField)):
            continue
        if fields is not None and not f.name in fields:
            continue
        if exclude and f.name in exclude:
            continue
        if widgets and f.name in widgets:
            kwargs = {'widget': widgets[f.name]}
        else:
            kwargs = {}

        if formfield_callback is None:
            form_field = field_generator.generate(f, **kwargs)
        elif not callable(formfield_callback):
            raise TypeError('formfield_callback must be a function or callable')
        else:
            form_field = formfield_callback(f, **kwargs)

        if form_field:
            field_list.append((f.name, form_field))
        else:
            ignored.append(f.name)

    field_dict = OrderedSet(field_list)
    if fields:
        field_dict = OrderedSet(
            [(f, field_dict.get(f)) for f in fields
                if ((not exclude) or (exclude and f not in exclude)) and (f not in ignored)]
        )
    return field_dict
コード例 #6
0
def reorder_suite(suite, classes, reverse=False):
    """
    Reorder a test suite by test type.

    `classes` is a sequence of types

    All tests of type classes[0] are placed first, then tests of type
    classes[1], etc. Tests with no match in classes are placed last.

    If `reverse` is True, sort tests within classes in opposite order but
    don't reverse test classes.
    """
    class_count = len(classes)
    suite_class = type(suite)
    bins = [OrderedSet() for i in range(class_count + 1)]
    partition_suite_by_type(suite, classes, bins, reverse=reverse)
    reordered_suite = suite_class()
    for i in range(class_count + 1):
        reordered_suite.addTests(bins[i])
    return reordered_suite
コード例 #7
0
    def python(self, options):
        import code
        # Set up a dictionary to serve as the environment for the shell, so
        # that tab completion works on objects that are imported at runtime.
        imported_objects = {}
        try:  # Try activating rlcompleter, because it's handy.
            import readline
        except ImportError:
            pass
        else:
            # We don't have to wrap the following import in a 'try', because
            # we already know 'readline' was imported successfully.
            import rlcompleter
            readline.set_completer(
                rlcompleter.Completer(imported_objects).complete)
            # Enable tab completion on systems using libedit (e.g. Mac OSX).
            # These lines are copied from Lib/site.py on Python 3.4.
            readline_doc = getattr(readline, '__doc__', '')
            if readline_doc is not None and 'libedit' in readline_doc:
                readline.parse_and_bind("bind ^I rl_complete")
            else:
                readline.parse_and_bind("tab:complete")

        # We want to honor both $PYTHONSTARTUP and .pythonrc.py, so follow system
        # conventions and get $PYTHONSTARTUP first then .pythonrc.py.
        if not options['no_startup']:
            for pythonrc in OrderedSet([
                    os.environ.get("PYTHONSTARTUP"),
                    os.path.expanduser('~/.pythonrc.py')
            ]):
                if not pythonrc:
                    continue
                if not os.path.isfile(pythonrc):
                    continue
                try:
                    with open(pythonrc) as handle:
                        exec(compile(handle.read(), pythonrc, 'exec'),
                             imported_objects)
                except NameError:
                    pass
        code.interact(local=imported_objects)
コード例 #8
0
ファイル: apps.py プロジェクト: bashu/fluentcms-publishing
        def _filter_candidates_by_published_status(candidates):
            from fluentcms_publishing.middleware import is_draft_request_context

            # Filter candidate results by published status, using
            # instance attributes instead of queryset filtering to
            # handle unpublishable and fluentcms publishing-enabled items.
            objs = OrderedSet()  # preserve order & remove dupes
            if is_draft_request_context():
                for candidate in candidates:
                    # Keep candidates that are publishable draft copies, or
                    # that are not publishable (i.e. they don't have the
                    # `is_draft` attribute at all)
                    if getattr(candidate, 'is_draft', True):
                        objs.add(candidate)
                    # Also keep candidates where we have the published copy and
                    # can exchange to get the draft copy with an identical URL
                    elif hasattr(candidate, 'get_draft'):
                        draft_copy = candidate.get_draft()
                        if draft_copy.get_absolute_url() == \
                                candidate.get_absolute_url():
                            objs.add(draft_copy)
            else:
                for candidate in candidates:
                    # Keep candidates that are published, or that are not
                    # publishable (i.e. they don't have the `is_published`
                    # attribute)
                    if getattr(candidate, 'is_published', True):
                        # Skip candidates that are not within any publication
                        # date restrictions
                        if (hasattr(candidate, 'is_within_publication_dates')
                                and
                                not candidate.is_within_publication_dates()):
                            pass
                        else:
                            objs.add(candidate)
            # Convert `OrderedSet` to a list which supports `len`, see
            # https://code.djangoproject.com/ticket/25093
            return list(objs)
コード例 #9
0
    def iterative_dfs(self, start, forwards=True):
        """
        Iterative depth first search, for finding dependencies.
        """
        visited = deque()
        visited.append(start)
        if forwards:
            stack = deque(sorted(start.parents))
        else:
            stack = deque(sorted(start.children))
        while stack:
            node = stack.popleft()
            visited.appendleft(node)
            if forwards:
                children = sorted(node.parents, reverse=True)
            else:
                children = sorted(node.children, reverse=True)
            # reverse sorting is needed because prepending using deque.extendleft
            # also effectively reverses values
            stack.extendleft(children)

        return list(OrderedSet(visited))
コード例 #10
0
    def get_plain(self, options):
        # Using normal Python shell
        import code
        imported_objects = self.get_imported_objects(options)
        try:
            # Try activating rlcompleter, because it's handy.
            import readline
        except ImportError:
            pass
        else:
            # We don't have to wrap the following import in a 'try', because
            # we already know 'readline' was imported successfully.
            import rlcompleter
            readline.set_completer(rlcompleter.Completer(imported_objects).complete)
            readline.parse_and_bind("tab:complete")

        use_pythonrc = options['use_pythonrc']
        no_startup = options['no_startup']

        # We want to honor both $PYTHONSTARTUP and .pythonrc.py, so follow system
        # conventions and get $PYTHONSTARTUP first then .pythonrc.py.
        if use_pythonrc or not no_startup:
            for pythonrc in OrderedSet([os.environ.get("PYTHONSTARTUP"), os.path.expanduser('~/.pythonrc.py')]):
                if not pythonrc:
                    continue
                if not os.path.isfile(pythonrc):
                    continue
                with open(pythonrc) as handle:
                    pythonrc_code = handle.read()
                # Match the behavior of the cpython shell where an error in
                # PYTHONSTARTUP prints an exception and continues.
                try:
                    exec(compile(pythonrc_code, pythonrc, 'exec'), imported_objects) in globals(), locals()
                except Exception:
                    traceback.print_exc()

        def run_plain():
            code.interact(local=imported_objects)
        return run_plain
コード例 #11
0
 def _dfs(start, get_children, path):
     # If we already computed this, use that (dynamic programming)
     if (start, get_children) in cache:
         return cache[(start, get_children)]
     # If we've traversed here before, that's a circular dep
     if start in path:
         raise CircularDependencyError(path[path.index(start):] + [start])
     # Build our own results list, starting with us
     results = []
     results.append(start)
     # We need to add to results all the migrations this one depends on
     children = sorted(get_children(start))
     path.append(start)
     for n in children:
         results = _dfs(n, get_children, path) + results
     path.pop()
     # Use OrderedSet to ensure only one instance of each result
     results = list(OrderedSet(results))
     # Populate DP cache
     cache[(start, get_children)] = results
     # Done!
     return results
コード例 #12
0
    def __getitem__(self, key):
        try:
            return super().__getitem__(key)
        except KeyError:
            pass

        year = self.year_for_user(key)
        self[key] = value = {
            "year": year,
            "months": [dt.date(year.year, i, 1) for i in range(1, 13)],
            "target_days": year.months,
            "percentage": [Z1 for i in range(12)],
            "available_vacation_days": [Z1 for i in range(12)],
            "absence_vacation": [Z1 for i in range(12)],
            "absence_sickness": [Z1 for i in range(12)],
            "absence_paid": [Z1 for i in range(12)],
            "absence_correction": [Z1 for i in range(12)],
            "vacation_days_correction": [Z1 for i in range(12)],
            "target": [Z1 for i in range(12)],
            "hours": [Z1 for i in range(12)],
            "employments": OrderedSet(),
        }
        return value
コード例 #13
0
    def _expand_fields(self, fields, aliased_only=False):
        """ Expand '*' in fields to those that will be queried, and optionally alias them to avoid clashes """

        if isinstance(fields, str):
            fields = fields.split(',')
        if not any(f == '*' or '.' in f for f in fields):
            return self._alias_fields(fields)

        fields_to_expand = [self.object_id_field]
        fields_to_expand.extend(r.source_column for r in self.relations)

        for field in fields:
            if field == '*':
                fields_to_expand.extend(f['name'] for f in self.fields)
            elif field.endswith('.*'):
                related_prefix = field[:-1]
                fields_to_expand.extend(f for f in self.related_fields if f.startswith(related_prefix))
            else:
                fields_to_expand.append(field)

        field_format = '{0}' if aliased_only else '{0} AS "{1}"'

        return ', '.join(field_format.format(self._alias_fields(f), f) for f in OrderedSet(fields_to_expand))
コード例 #14
0
ファイル: utils.py プロジェクト: nhour/nhour
def entry_shortcuts(user, year, week):
    # Couldn't figure out how to do these using the database.
    max_results = 150
    max_shortcuts = 15

    entires_with_ids = RegularEntry.objects \
               .filter(user=user) \
               .exclude(year=year, week=week) \
               .values("system", "project", "task")[:max_results]
    tuple_results = [(entry["system"], entry["project"], entry["task"])
                     for entry in entires_with_ids]
    unique_results = list(OrderedSet(tuple_results))[:15]
    unordered_shortcuts = \
    [{"system": System.objects.get(id=e[0]),
      "project": Project.objects.filter(id=e[1]).first(),
      "task": Task.objects.get(id=e[2])} for e in unique_results]

    def sorter(k):
        if k["project"]:
            return k["system"].name, k["project"].name, k["task"].name
        return k["system"].name, k["task"].name

    return sorted(unordered_shortcuts, key=sorter)
コード例 #15
0
    def process_rhs(self, compiler, connection):
        db_rhs = getattr(self.rhs, '_db', None)
        if db_rhs is not None and db_rhs != connection.alias:
            raise ValueError(
                "Subqueries aren't allowed across different databases. Force "
                "the inner query to be evaluated using `list(inner_query)`.")

        if self.rhs_is_direct_value():
            try:
                rhs = OrderedSet(self.rhs)
            except TypeError:  # Unhashable items in self.rhs
                rhs = self.rhs

            if not rhs:
                raise EmptyResultSet

            # rhs should be an iterable; use batch_process_rhs() to
            # prepare/transform those values.
            sqls, sqls_params = self.batch_process_rhs(compiler, connection,
                                                       rhs)
            placeholder = '(' + ', '.join(sqls) + ')'
            return (placeholder, sqls_params)
        else:
            return super().process_rhs(compiler, connection)
コード例 #16
0
def manage_event_sponsors(request, event_id):
    event = get_object_or_404(Event, id=event_id)
    if not request.user.profile.can_edit_event(event):
        messages.add_message(
            request,
            messages.WARNING,
            message=_("You can not manage this event's sponsorss."),
        )
        return redirect(event.get_absolute_url())

    team_sponsors = list(event.team.sponsors.all())
    events_sponsors = list(Sponsor.objects.filter(events__team=event.team))

    if request.method == "POST":
        sponsor_form = SponsorForm(request.POST, request.FILES)
        if sponsor_form.is_valid():
            new_sponsor = sponsor_form.save()
            event.sponsors.add(new_sponsor)
            event.team.sponsors.add(new_sponsor)
            messages.add_message(
                request,
                messages.SUCCESS,
                message=_("Your sponsor has been added to this event."),
            )
            return redirect("manage-event-sponsors", event.id)

    else:
        sponsor_form = SponsorForm()
    context = {
        "event": event,
        "sponsors": OrderedSet(events_sponsors + team_sponsors),
        "sponsor_form": sponsor_form,
        "can_edit_event": request.user.profile.can_edit_event(event),
    }
    return render(request, "get_together/events/manage_event_sponsors.html",
                  context)
コード例 #17
0
ファイル: graph.py プロジェクト: anshp/new
    def dfs(self, start, get_children):
        """
        Dynamic programming based depth first search, for finding dependencies.
        """
        visited = []
        visited.append(start)
        path = [start]
        stack = sorted(get_children(start))
        while stack:
            node = stack.pop(0)

            if node in path:
                raise CircularDependencyError()
            path.append(node)

            visited.insert(0, node)
            children = sorted(get_children(node))

            if not children:
                path = []

            stack = children + stack

        return list(OrderedSet(visited))
コード例 #18
0
ファイル: integration.py プロジェクト: wangjianweiwei/sentry
    def get_repositories(self, query=None):
        username = self.model.metadata.get("uuid", self.username)
        if not query:
            resp = self.get_client().get_repos(username)
            return [
                {"identifier": repo["full_name"], "name": repo["full_name"]}
                for repo in resp.get("values", [])
            ]

        exact_query = f'name="{query}"'.encode()
        fuzzy_query = f'name~"{query}"'.encode()
        exact_search_resp = self.get_client().search_repositories(username, exact_query)
        fuzzy_search_resp = self.get_client().search_repositories(username, fuzzy_query)

        result = OrderedSet()

        for j in exact_search_resp.get("values", []):
            result.add(j["full_name"])

        for i in fuzzy_search_resp.get("values", []):
            result.add(i["full_name"])

        return [{"identifier": full_name, "name": full_name} for full_name in result]
コード例 #19
0
    def get_repositories(self, query=None):
        if not query:
            resp = self.get_client().get_repos(self.username)
            return [
                {"identifier": repo["full_name"], "name": repo["full_name"]}
                for repo in resp.get("values", [])
            ]

        exact_query = (u'name="%s"' % (query)).encode("utf-8")
        fuzzy_query = (u'name~"%s"' % (query)).encode("utf-8")

        exact_search_resp = self.get_client().search_repositories(self.username, exact_query)
        fuzzy_search_resp = self.get_client().search_repositories(self.username, fuzzy_query)

        result = OrderedSet()

        for j in exact_search_resp.get("values", []):
            result.add(j["full_name"])

        for i in fuzzy_search_resp.get("values", []):
            result.add(i["full_name"])

        return [{"identifier": full_name, "name": full_name} for full_name in result]
コード例 #20
0
ファイル: test_datastructures.py プロジェクト: zw3n/django
 def test_bool(self):
     # Refs #23664
     s = OrderedSet()
     self.assertFalse(s)
     s.add(1)
     self.assertTrue(s)
コード例 #21
0
ファイル: views.py プロジェクト: zhanghtt/mtianyanSearch
 def get(request):
     key_words = request.GET.get('s', '')
     current_type = request.GET.get('s_type', '')
     if current_type == "article":
         return_suggest_list = []
         if key_words:
             s = JobboleBlogIndex.search()
             """fuzzy模糊搜索, fuzziness 编辑距离, prefix_length前面不变化的前缀长度"""
             s = s.suggest('my_suggest',
                           key_words,
                           completion={
                               "field": "suggest",
                               "fuzzy": {
                                   "fuzziness": 2
                               },
                               "size": 10
                           })
             suggestions = s.execute()
             for match in suggestions.suggest.my_suggest[0].options[:10]:
                 source = match._source
                 return_suggest_list.append(source["title"])
         return HttpResponse(json.dumps(return_suggest_list),
                             content_type="application/json")
     elif current_type == "job":
         return_suggest_list = []
         if key_words:
             s = LagouJobIndex.search()
             s = s.suggest('my_suggest',
                           key_words,
                           completion={
                               "field": "suggest",
                               "fuzzy": {
                                   "fuzziness": 2
                               },
                               "size": 10
                           })
             suggestions = s.execute()
             # 对于不同公司同名职位去重,提高用户体验
             name_set = OrderedSet()
             for match in suggestions.suggest.my_suggest[0].options[:10]:
                 source = match._source
                 name_set.add(source["title"])
             for name in name_set:
                 return_suggest_list.append(name)
         return HttpResponse(json.dumps(return_suggest_list),
                             content_type="application/json")
     elif current_type == "question":
         return_suggest_list = []
         if key_words:
             s_question = ZhiHuQuestionIndex.search()
             """fuzzy模糊搜索, fuzziness 编辑距离, prefix_length前面不变化的前缀长度"""
             s_question = s_question.suggest('my_suggest',
                                             key_words,
                                             completion={
                                                 "field": "suggest",
                                                 "fuzzy": {
                                                     "fuzziness": 2
                                                 },
                                                 "size": 10
                                             })
             suggestions_question = s_question.execute()
             for match in suggestions_question.suggest.my_suggest[
                     0].options[:10]:
                 source = match._source
                 return_suggest_list.append(source["title"])
         return HttpResponse(json.dumps(return_suggest_list),
                             content_type="application/json")
コード例 #22
0
 def headings(self):
     headings = OrderedSet()
     for entry in self.entry_set.all():
         for meta_data in entry.metadata_set.all():
             headings.add(meta_data.key)
     return headings
コード例 #23
0
ファイル: test_datastructures.py プロジェクト: carlio/django
 def test_bool(self):
     # Refs #23664
     s = OrderedSet()
     self.assertFalse(s)
     s.add(1)
     self.assertTrue(s)
コード例 #24
0
def ordered_models_to_delete(app_config, connection):
    """Models of the given app to delete.
    @return A tuple (models, loop_error).
            'models' is a list of the models classes to delete ;
                     the order respects the dependencies between the models.
            'loop_error' is a boolean which indicates dependencies loop error.
    """
    from django.db import router
    from django.utils.datastructures import OrderedSet

    from creme.creme_core.utils.dependence_sort import dependence_sort, DependenciesLoopError

    class ModelInfo:
        # def __init__(self, model, dependencies, sql_cmd):
        def __init__(self, model, dependencies):
            self.model = model
            self.dependencies = dependencies
            # self.sql_cmd = sql_cmd

        def __str__(self):
            return 'ModelInfo(model={model}, dependencies={dependencies})'.format(
                model=self.model.__name__,
                dependencies=[d.__name__ for d in self.dependencies],
            )

    models_info = []
    cursor = connection.cursor()

    try:
        table_names = set(connection.introspection.table_names(cursor))
        app_models = OrderedSet(
            router.get_migratable_models(
                app_config,
                connection.alias,
                # include_auto_created=True,
                # NB: the auto created tables are automatically
                #     deleted by schema_editor.delete_model(model)
                include_auto_created=False,
            ))

        for model in app_models:
            meta = model._meta

            if connection.introspection.table_name_converter(
                    meta.db_table) in table_names:
                # dependencies = []
                dependencies = set()  # We use a set to avoid duplicates

                for f in meta.local_fields:
                    # if f.rel:
                    if f.remote_field:
                        # related_model = f.rel.to
                        related_model = f.remote_field.model

                        # if related_model in app_models:
                        # NB: we avoid self-referencing (TODO: improve dependence_sort() ?)
                        if related_model is not model and related_model in app_models:
                            # dependencies.append(related_model)
                            dependencies.add(related_model)

                models_info.append(
                    ModelInfo(
                        model=model,
                        dependencies=dependencies,
                        # sql_cmd=connection.creation.sql_destroy_model(model, [], style)[0],
                    ))
    finally:
        cursor.close()

    dep_error = False
    try:
        models_info = dependence_sort(
            models_info,
            get_key=lambda mi: mi.model,
            get_dependencies=lambda mi: mi.dependencies,
        )
    except DependenciesLoopError:
        dep_error = True
    else:
        models_info.reverse()  # The dependencies must be deleted _after_

    # return [mi.sql_cmd for mi in models_info], dep_error
    return [mi.model for mi in models_info], dep_error
コード例 #25
0
 def test_init_with_iterable(self):
     s = OrderedSet([1, 2, 3])
     self.assertEqual(list(s.dict.keys()), [1, 2, 3])
コード例 #26
0
ファイル: introspection.py プロジェクト: zc-andy/DbMgrSystem
 def get_constraints(self, cursor, table_name):
     """
     Retrieve any constraints or keys (unique, pk, fk, check, index) across
     one or more columns.
     """
     constraints = {}
     # Get the actual constraint names and columns
     name_query = """
         SELECT kc.`constraint_name`, kc.`column_name`,
             kc.`referenced_table_name`, kc.`referenced_column_name`
         FROM information_schema.key_column_usage AS kc
         WHERE
             kc.table_schema = DATABASE() AND
             kc.table_name = %s
         ORDER BY kc.`ordinal_position`
     """
     cursor.execute(name_query, [table_name])
     for constraint, column, ref_table, ref_column in cursor.fetchall():
         if constraint not in constraints:
             constraints[constraint] = {
                 'columns': OrderedSet(),
                 'primary_key': False,
                 'unique': False,
                 'index': False,
                 'check': False,
                 'foreign_key':
                 (ref_table, ref_column) if ref_column else None,
             }
         constraints[constraint]['columns'].add(column)
     # Now get the constraint types
     type_query = """
         SELECT c.constraint_name, c.constraint_type
         FROM information_schema.table_constraints AS c
         WHERE
             c.table_schema = DATABASE() AND
             c.table_name = %s
     """
     cursor.execute(type_query, [table_name])
     for constraint, kind in cursor.fetchall():
         if kind.lower() == "primary key":
             constraints[constraint]['primary_key'] = True
             constraints[constraint]['unique'] = True
         elif kind.lower() == "unique":
             constraints[constraint]['unique'] = True
     # Add check constraints.
     if self.connection.features.can_introspect_check_constraints:
         unnamed_constraints_index = 0
         columns = {
             info.name
             for info in self.get_table_description(cursor, table_name)
         }
         if self.connection.mysql_is_mariadb:
             type_query = """
                 SELECT c.constraint_name, c.check_clause
                 FROM information_schema.check_constraints AS c
                 WHERE
                     c.constraint_schema = DATABASE() AND
                     c.table_name = %s
             """
         else:
             type_query = """
                 SELECT cc.constraint_name, cc.check_clause
                 FROM
                     information_schema.check_constraints AS cc,
                     information_schema.table_constraints AS tc
                 WHERE
                     cc.constraint_schema = DATABASE() AND
                     tc.table_schema = cc.constraint_schema AND
                     cc.constraint_name = tc.constraint_name AND
                     tc.constraint_type = 'CHECK' AND
                     tc.table_name = %s
             """
         cursor.execute(type_query, [table_name])
         for constraint, check_clause in cursor.fetchall():
             constraint_columns = self._parse_constraint_columns(
                 check_clause, columns)
             # Ensure uniqueness of unnamed constraints. Unnamed unique
             # and check columns constraints have the same name as
             # a column.
             if set(constraint_columns) == {constraint}:
                 unnamed_constraints_index += 1
                 constraint = '__unnamed_constraint_%s__' % unnamed_constraints_index
             constraints[constraint] = {
                 'columns': constraint_columns,
                 'primary_key': False,
                 'unique': False,
                 'index': False,
                 'check': True,
                 'foreign_key': None,
             }
     # Now add in the indexes
     cursor.execute("SHOW INDEX FROM %s" %
                    self.connection.ops.quote_name(table_name))
     for table, non_unique, index, colseq, column, type_ in [
             x[:5] + (x[10], ) for x in cursor.fetchall()
     ]:
         if index not in constraints:
             constraints[index] = {
                 'columns': OrderedSet(),
                 'primary_key': False,
                 'unique': False,
                 'check': False,
                 'foreign_key': None,
             }
         constraints[index]['index'] = True
         constraints[index][
             'type'] = Index.suffix if type_ == 'BTREE' else type_.lower()
         constraints[index]['columns'].add(column)
     # Convert the sorted sets to lists
     for constraint in constraints.values():
         constraint['columns'] = list(constraint['columns'])
     return constraints
コード例 #27
0
ファイル: introspection.py プロジェクト: ZekriSara/pfe
 def get_constraints(self, cursor, table_name):
     """
     Retrieve any constraints or keys (unique, pk, fk, check, index) across
     one or more columns.
     """
     constraints = {}
     # Get the actual constraint names and columns
     name_query = """
         SELECT kc.`constraint_name`, kc.`column_name`,
             kc.`referenced_table_name`, kc.`referenced_column_name`,
             c.`constraint_type`
         FROM
             information_schema.key_column_usage AS kc,
             information_schema.table_constraints AS c
         WHERE
             kc.table_schema = DATABASE() AND
             c.table_schema = kc.table_schema AND
             c.constraint_name = kc.constraint_name AND
             c.constraint_type != 'CHECK' AND
             kc.table_name = %s
         ORDER BY kc.`ordinal_position`
     """
     cursor.execute(name_query, [table_name])
     for constraint, column, ref_table, ref_column, kind in cursor.fetchall(
     ):
         if constraint not in constraints:
             constraints[constraint] = {
                 "columns": OrderedSet(),
                 "primary_key": kind == "PRIMARY KEY",
                 "unique": kind in {"PRIMARY KEY", "UNIQUE"},
                 "index": False,
                 "check": False,
                 "foreign_key":
                 (ref_table, ref_column) if ref_column else None,
             }
             if self.connection.features.supports_index_column_ordering:
                 constraints[constraint]["orders"] = []
         constraints[constraint]["columns"].add(column)
     # Add check constraints.
     if self.connection.features.can_introspect_check_constraints:
         unnamed_constraints_index = 0
         columns = {
             info.name
             for info in self.get_table_description(cursor, table_name)
         }
         if self.connection.mysql_is_mariadb:
             type_query = """
                 SELECT c.constraint_name, c.check_clause
                 FROM information_schema.check_constraints AS c
                 WHERE
                     c.constraint_schema = DATABASE() AND
                     c.table_name = %s
             """
         else:
             type_query = """
                 SELECT cc.constraint_name, cc.check_clause
                 FROM
                     information_schema.check_constraints AS cc,
                     information_schema.table_constraints AS tc
                 WHERE
                     cc.constraint_schema = DATABASE() AND
                     tc.table_schema = cc.constraint_schema AND
                     cc.constraint_name = tc.constraint_name AND
                     tc.constraint_type = 'CHECK' AND
                     tc.table_name = %s
             """
         cursor.execute(type_query, [table_name])
         for constraint, check_clause in cursor.fetchall():
             constraint_columns = self._parse_constraint_columns(
                 check_clause, columns)
             # Ensure uniqueness of unnamed constraints. Unnamed unique
             # and check columns constraints have the same name as
             # a column.
             if set(constraint_columns) == {constraint}:
                 unnamed_constraints_index += 1
                 constraint = "__unnamed_constraint_%s__" % unnamed_constraints_index
             constraints[constraint] = {
                 "columns": constraint_columns,
                 "primary_key": False,
                 "unique": False,
                 "index": False,
                 "check": True,
                 "foreign_key": None,
             }
     # Now add in the indexes
     cursor.execute("SHOW INDEX FROM %s" %
                    self.connection.ops.quote_name(table_name))
     for table, non_unique, index, colseq, column, order, type_ in [
             x[:6] + (x[10], ) for x in cursor.fetchall()
     ]:
         if index not in constraints:
             constraints[index] = {
                 "columns": OrderedSet(),
                 "primary_key": False,
                 "unique": not non_unique,
                 "check": False,
                 "foreign_key": None,
             }
             if self.connection.features.supports_index_column_ordering:
                 constraints[index]["orders"] = []
         constraints[index]["index"] = True
         constraints[index]["type"] = (Index.suffix if type_ == "BTREE" else
                                       type_.lower())
         constraints[index]["columns"].add(column)
         if self.connection.features.supports_index_column_ordering:
             constraints[index]["orders"].append("DESC" if order ==
                                                 "D" else "ASC")
     # Convert the sorted sets to lists
     for constraint in constraints.values():
         constraint["columns"] = list(constraint["columns"])
     return constraints
コード例 #28
0
 def test_discard(self):
     s = OrderedSet()
     self.assertEqual(len(s), 0)
     s.add(1)
     s.discard(2)
     self.assertEqual(len(s), 1)
コード例 #29
0
ファイル: introspection.py プロジェクト: DavTho1983/UPress2
 def get_constraints(self, cursor, table_name):
     """
     Retrieve any constraints or keys (unique, pk, fk, check, index) across
     one or more columns.
     """
     constraints = {}
     # Get the actual constraint names and columns
     name_query = """
         SELECT kc.`constraint_name`, kc.`column_name`,
             kc.`referenced_table_name`, kc.`referenced_column_name`
         FROM information_schema.key_column_usage AS kc
         WHERE
             kc.table_schema = DATABASE() AND
             kc.table_name = %s
         ORDER BY kc.`ordinal_position`
     """
     cursor.execute(name_query, [table_name])
     for constraint, column, ref_table, ref_column in cursor.fetchall():
         if constraint not in constraints:
             constraints[constraint] = {
                 "columns": OrderedSet(),
                 "primary_key": False,
                 "unique": False,
                 "index": False,
                 "check": False,
                 "foreign_key":
                 (ref_table, ref_column) if ref_column else None,
             }
         constraints[constraint]["columns"].add(column)
     # Now get the constraint types
     type_query = """
         SELECT c.constraint_name, c.constraint_type
         FROM information_schema.table_constraints AS c
         WHERE
             c.table_schema = DATABASE() AND
             c.table_name = %s
     """
     cursor.execute(type_query, [table_name])
     for constraint, kind in cursor.fetchall():
         if kind.lower() == "primary key":
             constraints[constraint]["primary_key"] = True
             constraints[constraint]["unique"] = True
         elif kind.lower() == "unique":
             constraints[constraint]["unique"] = True
     # Now add in the indexes
     cursor.execute("SHOW INDEX FROM %s" %
                    self.connection.ops.quote_name(table_name))
     for table, non_unique, index, colseq, column, type_ in [
             x[:5] + (x[10], ) for x in cursor.fetchall()
     ]:
         if index not in constraints:
             constraints[index] = {
                 "columns": OrderedSet(),
                 "primary_key": False,
                 "unique": False,
                 "check": False,
                 "foreign_key": None,
             }
         constraints[index]["index"] = True
         constraints[index]["type"] = (Index.suffix if type_ == "BTREE" else
                                       type_.lower())
         constraints[index]["columns"].add(column)
     # Convert the sorted sets to lists
     for constraint in constraints.values():
         constraint["columns"] = list(constraint["columns"])
     return constraints
コード例 #30
0
 def __init__(self):
     self._line_classes = OrderedSet()
コード例 #31
0
 def test_reversed(self):
     s = reversed(OrderedSet([1, 2, 3]))
     self.assertIsInstance(s, collections.abc.Iterator)
     self.assertEqual(list(s), [3, 2, 1])
コード例 #32
0
 def test_contains(self):
     s = OrderedSet()
     self.assertEqual(len(s), 0)
     s.add(1)
     self.assertIn(1, s)
コード例 #33
0
 def test_repr(self):
     self.assertEqual(repr(OrderedSet()), 'OrderedSet()')
     self.assertEqual(repr(OrderedSet([2, 3, 2, 1])), 'OrderedSet([2, 3, 1])')
コード例 #34
0
 def slots(self):
     return OrderedSet(
         self.slot_set.order_by('weekday',
                                'start').values_list('weekday', 'start'))
コード例 #35
0
 def get_constraints(self, cursor, table_name):
     """
     Retrieves any constraints or keys (unique, pk, fk, check, index) across one or more columns.
     """
     constraints = {}
     # Get the actual constraint names and columns
     name_query = """
         SELECT kc.`constraint_name`, kc.`column_name`,
             kc.`referenced_table_name`, kc.`referenced_column_name`
         FROM information_schema.key_column_usage AS kc
         WHERE
             kc.table_schema = %s AND
             kc.table_name = %s
     """
     cursor.execute(name_query,
                    [self.connection.settings_dict['NAME'], table_name])
     for constraint, column, ref_table, ref_column in cursor.fetchall():
         if constraint not in constraints:
             constraints[constraint] = {
                 'columns': OrderedSet(),
                 'primary_key': False,
                 'unique': False,
                 'index': False,
                 'check': False,
                 'foreign_key':
                 (ref_table, ref_column) if ref_column else None,
             }
         constraints[constraint]['columns'].add(column)
     # Now get the constraint types
     type_query = """
         SELECT c.constraint_name, c.constraint_type
         FROM information_schema.table_constraints AS c
         WHERE
             c.table_schema = %s AND
             c.table_name = %s
     """
     cursor.execute(type_query,
                    [self.connection.settings_dict['NAME'], table_name])
     for constraint, kind in cursor.fetchall():
         if kind.lower() == "primary key":
             constraints[constraint]['primary_key'] = True
             constraints[constraint]['unique'] = True
         elif kind.lower() == "unique":
             constraints[constraint]['unique'] = True
     # Now add in the indexes
     cursor.execute("SHOW INDEX FROM %s" %
                    self.connection.ops.quote_name(table_name))
     for table, non_unique, index, colseq, column in [
             x[:5] for x in cursor.fetchall()
     ]:
         if index not in constraints:
             constraints[index] = {
                 'columns': OrderedSet(),
                 'primary_key': False,
                 'unique': False,
                 'index': True,
                 'check': False,
                 'foreign_key': None,
             }
         constraints[index]['index'] = True
         constraints[index]['columns'].add(column)
     # Convert the sorted sets to lists
     for constraint in constraints.values():
         constraint['columns'] = list(constraint['columns'])
     return constraints
コード例 #36
0
def unique_hashtags(text: str) -> List[str]:
    hashtags = re.findall('#(\w+)', text)
    return list(OrderedSet(hashtags))