Esempio n. 1
0
def test_uniqify():
    values = [1, 1, 'foo', 2, 'foo', 'bar', 'baz']
    expected = [1, 'foo', 2, 'bar', 'baz']

    unique_values = utils.uniqify(values)

    assert_list_equal(expected, unique_values)
Esempio n. 2
0
def test_uniqify():
    values = [1, 1, 'foo', 2, 'foo', 'bar', 'baz']
    expected = [1, 'foo', 2, 'bar', 'baz']

    unique_values = utils.uniqify(values)

    assert_list_equal(expected, unique_values)
Esempio n. 3
0
    def execute(self, using, optimize=False): #TODO do ssomething with optimize
        first = getter(0)
        conditions = uniqify(self.conditions)
        #gather all indexed fields
        #TODO exclude those that can't be evaluated against (eg exact=None, etc)
        indexed = [c for c in conditions if first(c).indexed]
        unindexed = [c for c in conditions if c not in indexed]

        results = {} #TODO this could perhaps be cached - think about it
        filtered_results = set()

        #TODO order by type - check against the global type first, so that if
        #we get an empty result set, we can return none

        for index, group in groupby(indexed, lambda c:c.field.index(using)):
            q = None
            for condition in group:
                new_q = q_from_condition(condition)
                if not new_q:
                    break
                if q:
                    q &= new_q
                else:
                    q = new_q
            result_set = set(index.query(q))
            #TODO results is currently worthless
            results[q] = result_set
            #TODO also needs to match at least one type, if any have been provided
            #filter for unindexed conditions, as well
            filtered_result = set(n for n in result_set \
                               if all(matches_condition(n, c) for c in unindexed)\
                                  and n not in filtered_results)
            filtered_results |= filtered_result
            for r in filtered_result:
                yield r

        if unindexed or not indexed:
            return_filter = return_filter_from_conditions(unindexed + indexed)
            rel_types = [neo4j.Outgoing.get('<<TYPE>>'),
                         neo4j.Outgoing.get('<<INSTANCE>>')]
            type_node = self.nodetype._type_node(using)
            pages = type_node.traverse(types=rel_types,
                                            returnable=return_filter,
                                            page_size=QUERY_CHUNK_SIZE)
            for result_set in pages:
                filtered_result = set(n for n in result_set \
                                     if n not in filtered_results)
                filtered_results |= filtered_result
                for r in filtered_result:
                    yield r
Esempio n. 4
0
    def execute(self, using):
        conditions = uniqify(self.conditions)

        # TODO exclude those that can't be evaluated against (eg exact=None, etc)
        id_conditions = []
        indexed = []
        unindexed = []

        for c in conditions:
            if getattr(c.field, "id", False):
                id_conditions.append(c)
            elif c.field.indexed:
                indexed.append(c)
            else:
                unindexed.append(c)

        id_lookups = dict(itertools.groupby(id_conditions, lambda c: c.operator))
        exact_id_lookups = list(id_lookups.get(OPERATORS.EXACT, []))
        # if we have an exact lookup, do it and return
        if len(exact_id_lookups) == 1:
            id_val = exact_id_lookups[0].value
            try:
                node = connections[using].nodes[int(id_val)]
                # TODO also check type!!
                if all(matches_condition(node, c) for c in itertools.chain(indexed, unindexed)):
                    yield node
            except:
                pass
            return
        elif len(exact_id_lookups) > 1:
            raise ValueError("Conflicting exact id lookups - a node can't have two ids.")

        # if we have any id__in lookups, do the intersection and return
        in_id_lookups = list(id_lookups.get(OPERATORS.IN, []))
        if in_id_lookups:
            id_set = reduce(and_, (set(c.value) for c in in_id_lookups))
            if id_set:
                ext = connections[using].extensions["GremlinPlugin"]
                gremlin_script = "g.v(%s)"
                gremlin_script %= ",".join(str(i) for i in id_set)
                nodes = ext.execute_script(gremlin_script)
                # TODO also check type!!
                for node in nodes:
                    if all(matches_condition(node, c) for c in itertools.chain(indexed, unindexed)):
                        yield node
                return
            else:
                raise ValueError("Conflicting id__in lookups - the intersection" " of the queried id lists is empty.")

        # TODO order by type - check against the global type first, so that if
        # we get an empty result set, we can return none

        results = {}  # TODO this could perhaps be cached - think2 about it
        filtered_results = set()

        cond_by_ind = itertools.groupby(indexed, lambda c: c.field.index(using))
        for index, group in cond_by_ind:
            q = None
            for condition in group:
                new_q = q_from_condition(condition)
                if not new_q:
                    break
                if q:
                    q &= new_q
                else:
                    q = new_q
            result_set = set(index.query(q))
            # TODO results is currently worthless
            results[q] = result_set
            # TODO also needs to match at least one type, if any have been provided
            # filter for unindexed conditions, as well
            filtered_result = set(
                n for n in result_set if all(matches_condition(n, c) for c in unindexed) and n not in filtered_results
            )
            filtered_results |= filtered_result
            for r in filtered_result:
                yield r

        if not indexed:
            return_filter = return_filter_from_conditions(unindexed + indexed)
            rel_types = [neo4j.Outgoing.get("<<TYPE>>"), neo4j.Outgoing.get("<<INSTANCE>>")]
            type_node = self.nodetype._type_node(using)
            pages = type_node.traverse(types=rel_types, returnable=return_filter, page_size=QUERY_CHUNK_SIZE)
            for result_set in pages:
                filtered_result = set(n for n in result_set if n not in filtered_results)
                filtered_results |= filtered_result
                for r in filtered_result:
                    yield r