Esempio n. 1
0
    def test_group_by(self):
        gen = flu([1, 1, 1, 2, 2, 2, 2, 3]).zip(range(100)).group_by(lambda x: x[0])
        g1, g2, g3 = gen.map(lambda x: (x[0], x[1].collect())).collect()
        # Standard usage
        assert g1 == (1, [(1, 0), (1, 1), (1, 2)])
        assert g2 == (2, [(2, 3), (2, 4), (2, 5), (2, 6)])
        assert g3 == (3, [(3, 7)])
        # No param usage
        v1 = flu(range(10)).group_by().map(lambda x: (x[0], list(x[1])))
        v2 = flu(range(10)).map(lambda x: (x, [x]))
        assert v1.collect() == v2.collect()
        # Sort
        gen = flu([1, 2, 1, 2]).group_by(lambda x: x, sort=False)
        assert gen.count() == 4
        gen = flu([1, 2, 1, 2]).group_by(lambda x: x, sort=True)
        assert gen.count() == 2

        # Identity Function
        points = [
                {'x': 1, 'y': 0},
                {'x': 4, 'y': 3},
                {'x': 1, 'y': 5}
        ]
        key_func = lambda u: u['x']
        gen = flu.group_by(points, key=key_func, sort=True).collect()
        assert len(gen) == 2
        assert gen[0][0] == 1
        assert gen[1][0] == 4
        assert len(gen[0][1].collect()) == 2
        assert len(gen[1][1].collect()) == 1
Esempio n. 2
0
    def compare_registered_entities(autogen_context, upgrade_ops,
                                    sqla_schemas: List[Optional[str]]):
        engine = autogen_context.connection.engine

        # Ensure pg_functions have unique identities (not registered twice)
        for ident, function_group in flu(entities).group_by(
                key=lambda x: x.identity):
            if len(function_group.collect()) > 1:
                raise DuplicateRegistration(
                    f"PGFunction with identity {ident} was registered multiple times"
                )

        # User registered schemas + automatically registered schemas (from SQLA Metadata)
        observed_schemas: List[str] = []
        if schemas is not None:
            for schema in schemas:
                observed_schemas.append(schema)

        sqla_schemas = [
            schema for schema in sqla_schemas or [] if schema is not None
        ]
        observed_schemas.extend(sqla_schemas)

        for entity in entities:
            observed_schemas.append(entity.schema)

        # Remove excluded schemas
        observed_schemas = [
            x for x in set(observed_schemas)
            if x not in (exclude_schemas or [])
        ]

        with engine.connect() as connection:

            # Check for new or updated entities
            for local_entity in entities:
                maybe_op = local_entity.get_required_migration_op(connection)
                if maybe_op is not None:
                    upgrade_ops.ops.append(maybe_op)

            # Entities grouped by class (e.g. PGFunction, PGView, etc)
            entity_groups = flu(entities).group_by(lambda x: x.__class__,
                                                   sort=False)

            # Check if anything needs to drop
            for schema in observed_schemas:
                # Entities within the schemas that are live
                for entity_class in ReplaceableEntity.__subclasses__():
                    db_entities = entity_class.from_database(connection,
                                                             schema=schema)

                    # Check for functions that were deleted locally
                    for db_entity in db_entities:
                        for local_entity in entities:
                            if db_entity.is_equal_identity(
                                    local_entity, connection):
                                break
                        else:
                            # No match was found locally
                            upgrade_ops.ops.append(DropOp(db_entity))
Esempio n. 3
0
 def test_zip_longest(self):
     gen = flu(range(3)).zip_longest(range(5))
     assert gen.collect() == [(0, 0), (1, 1), (2, 2), (None, 3), (None, 4)]
     gen = flu(range(3)).zip_longest(range(5), fill_value='a')
     assert gen.collect() == [(0, 0), (1, 1), (2, 2), ('a', 3), ('a', 4)]
     gen = flu(range(3)).zip_longest(range(5), range(4), fill_value='a')
     assert gen.collect() == [(0, 0, 0), (1, 1, 1), (2, 2, 2), ('a', 3, 3), ('a', 4, 'a')]
Esempio n. 4
0
def main():
    args = parse_args(sys.argv[1:])

    _command = args.command
    _file = args.file
    _import = getattr(args, "import")

    execute_imports(_import)

    if _file:
        _ = flu(read_file(_file)).map(str.rstrip)
    else:
        # Do not raise exception for Broken Pipe
        signal(SIGPIPE, SIG_DFL)
        _ = flu(sys.stdin).map(str.rstrip)

    pipeline = eval(_command)

    if hasattr(pipeline,
               "__iter__") and not isinstance(pipeline, (str, bytes)):
        for r in pipeline:
            sys.stdout.write(str(r) + "\n")

    elif pipeline is None:
        pass
    else:
        sys.stdout.write(str(pipeline) + "\n")
Esempio n. 5
0
 def test_head(self):
     gen = flu(range(30))
     assert gen.head(n=2) == [0, 1]
     gen = flu(range(30))
     assert gen.head(n=3, container_type=set) == set([0, 1, 2])
     gen = flu(range(3))
     assert gen.head(n=50) == [0, 1, 2]
Esempio n. 6
0
def test_zip_longest():
    gen = flu(range(3)).zip_longest(range(5))
    assert gen.collect() == [(0, 0), (1, 1), (2, 2), (None, 3), (None, 4)]
    gen = flu(range(3)).zip_longest(range(5), fill_value="a")
    assert gen.collect() == [(0, 0), (1, 1), (2, 2), ("a", 3), ("a", 4)]
    gen = flu(range(3)).zip_longest(range(5), range(4), fill_value="a")
    assert gen.collect() == [(0, 0, 0), (1, 1, 1), (2, 2, 2), ("a", 3, 3), ("a", 4, "a")]
Esempio n. 7
0
 def test_tail(self):
     gen = flu(range(30))
     assert gen.tail(n=2) == [28, 29]
     gen = flu(range(30))
     assert gen.tail(n=3, container_type=set) == set([27, 28, 29])
     gen = flu(range(3))
     assert gen.tail(n=50) == [0, 1, 2]
Esempio n. 8
0
def test_last():
    gen = flu(range(3))
    assert gen.last() == 2
    gen = flu([])
    with pytest.raises(IndexError):
        gen.last()
    gen = flu([])
    assert gen.last(default=1) == 1
Esempio n. 9
0
    def test_enumerate(self):
        # Check default
        gen = flu(range(3)).enumerate()
        assert gen.collect() == [(0, 0), (1, 1), (2, 2)]

        # Check start param
        gen = flu(range(3)).enumerate(start=1)
        assert gen.collect() == [(1, 0), (2, 1), (3, 2)]
Esempio n. 10
0
 def test_last(self):
     gen = flu(range(3))
     assert gen.last() == 2
     gen = flu([])
     with self.assertRaises(IndexError):
         gen.last()
     gen = flu([])
     assert gen.last(default=1) == 1
Esempio n. 11
0
    def test_window(self):
        # Check default
        gen = flu(range(5)).window(n=3)
        assert gen.collect() == [(0, 1, 2), (1, 2, 3), (2, 3, 4)]

        # Check step param
        gen = flu(range(5)).window(n=3, step=3)
        assert gen.collect() == [(0, 1, 2), (3, 4, None)]

        # Check fill_value param
        gen = flu(range(5)).window(n=3, step=3, fill_value='i')
        assert gen.collect() == [(0, 1, 2), (3, 4, 'i')]
Esempio n. 12
0
    def test_tee(self):
        # Default unpacking
        gen1, gen2 = flu(range(100)).tee()
        assert gen1.sum() == gen2.sum()

        # adjusting *n* paramter
        gen1, gen2, gen3 = flu(range(100)).tee(3)
        assert gen1.sum() == gen3.sum()

        # No sync progress
        gen1, gen2 = flu(range(100)).tee()
        assert next(gen1) == next(gen2)

        # No break chaining
        assert flu(range(5)).tee().map(sum).sum() == 20
Esempio n. 13
0
 def test_flu(self):
     gen = flu(count()) \
             .map(lambda x: x**2) \
             .filter(lambda x: x % 517 == 0) \
             .chunk(5) \
             .take(3)
     assert next(gen) == [0, 267289, 1069156, 2405601, 4276624]
Esempio n. 14
0
def walk_dirs(path: str = ".") -> Iterable[str]:
    """Yield files recursively starting from *path"""
    def _impl():
        for d, _, _ in os.walk(path):
            yield d

    return flu(_impl())
Esempio n. 15
0
    def test_map_attr(self):
        class Person:
            def __init__(self, age: int) -> None:
                self.age = age

        gen = flu(range(3)).map(lambda x: Person(x)).map_attr('age')
        assert gen.collect() == [0, 1, 2]
Esempio n. 16
0
def test_unique():
    class NoHash:
        def __init__(self, letter, keyf):
            self.letter = letter
            self.keyf = keyf

    a = NoHash("a", 1)
    b = NoHash("b", 1)
    c = NoHash("c", 2)

    gen = flu([a, b, c]).unique()
    assert gen.collect() == [a, b, c]
    gen = flu([a, b, c]).unique(lambda x: x.letter)
    assert gen.collect() == [a, b, c]
    gen = flu([a, b, c]).unique(lambda x: x.keyf)
    assert gen.collect() == [a, c]
Esempio n. 17
0
def test___getitem__():
    assert flu(range(3))[1] == 1
    assert flu(range(3))[1:].collect() == [1, 2]
    assert flu(range(35))[1:2].collect() == [1]
    assert flu(range(35))[1:3].collect() == [1, 2]
    with pytest.raises(IndexError):
        flu([1])[4]
    with pytest.raises((KeyError, TypeError)):
        flu([1])["not an index"]
Esempio n. 18
0
def handle_pipes(x):
    grouped = (flu(x).group_by(
        lambda x: isinstance(x, Pipe),
        sort=False).filter(lambda x: not x[0]).map(lambda x: x[1].collect()).
               map(lambda x: Group(x) if len(x) > 1 else x[0]).collect())
    if len(grouped) == 1:
        return grouped[0]
    return Choice(grouped)
Esempio n. 19
0
def precommit():
    """Secondary entrypoing for pre-commit hook to handle multiple files
    as positional arguments

    For internal use only
    """
    def precommit_parse_args(args: List[str]):
        parser = argparse.ArgumentParser(
            description="flupy: a fluent interface for python",
            formatter_class=argparse.RawTextHelpFormatter)
        parser.add_argument("files", type=str, nargs="+", help="file pathes")
        parser.add_argument("--command",
                            help="command to execute against input")
        parser.add_argument("-i", "--import", nargs="*", default=[])
        return parser.parse_args(args)

    args = precommit_parse_args(sys.argv[1:])

    # Pull command from
    _command = args.command
    _files = args.files
    _import = getattr(args, "import")

    execute_imports(_import)

    if _files:
        from pathlib import Path

        _ = flu(_files).map(Path).filter(lambda x: x.is_file())
    else:
        # Do not raise exception for Broken Pipe
        signal(SIGPIPE, SIG_DFL)
        _ = flu(sys.stdin).map(str.rstrip)

    pipeline = eval(_command)

    if hasattr(pipeline,
               "__iter__") and not isinstance(pipeline, (str, bytes)):
        for r in pipeline:
            sys.stdout.write(str(r) + "\n")

    elif pipeline is None:
        pass
    else:
        sys.stdout.write(str(pipeline) + "\n")
Esempio n. 20
0
def _parse(stream: Iterable[Part]) -> Union[List, Base]:  # type: ignore
    out: List = []  # type: ignore

    stream_once = flu(stream)

    for p in stream_once:

        if p.token == Token.R_BRACKET:
            if len(out) > 1:
                maybe_choice = handle_pipes(out)
                if isinstance(maybe_choice, Choice):
                    return Maybe(maybe_choice)
                return Maybe(Group(out))
            return Maybe(out[0])

        elif p.token == Token.R_PAREN:
            return InParens(out)

        elif p.token == Token.R_BRACE:
            # Braces always contain pipes
            return handle_pipes(out)

        elif p.token in (Token.L_BRACE, Token.L_BRACKET, Token.L_PAREN):
            out.append(_parse(stream_once))

        elif p.token == Token.ARG:
            out.append(Argument(p.text))

        elif p.token == Token.DELIMITED_COMMA:
            out[-1] = RepeatComma(out[-1])

        elif p.token == Token.DELIMITED_OR:
            out[-1] = RepeatOr(out[-1])

        elif p.token == Token.DELIMITED_NONE:
            out[-1] = RepeatNone(out[-1])

        elif p.token in (Token.LITERAL, Token.STAR):
            out.append(Literal(p.text))

        elif p.token == Token.PIPE:
            out.append(Pipe(p.text))

        elif p.token == Token.NAME:
            out.append(Name(p.text))

        elif p.token == Token.QUALIFIED_NAME:
            out.append(QualifiedName(p.text))

        elif p.token == Token.UNQUALIFIED_NAME:
            out.append(UnqualifiedName(p.text))

        else:
            assert "Unhandled Token: {}".format(p.token)

    return out
Esempio n. 21
0
def test_map_attr(benchmark):
    class Person:
        def __init__(self, age: int) -> None:
            self.age = age

    people = flu(range(200)).map(Person).collect()

    @benchmark
    def work():
        flu(people).map_attr("age").collect()
Esempio n. 22
0
    def test_side_effect(self):

        class FakeFile:
            def __init__(self):
                self.is_open = False
                self.content = []

            def write(self, text):
                if self.is_open:
                    self.content.append(text)
                else:
                    raise IOError('fake file is not open for writing')

            def open(self):
                self.is_open = True

            def close(self):
                self.is_open = False

        # Test the fake file
        ffile = FakeFile()
        ffile.open()
        ffile.write('should be there')
        ffile.close()
        assert ffile.content[0] == 'should be there'
        with self.assertRaises(IOError):
            ffile.write('should fail')

        # Reset fake file
        ffile = FakeFile()

        with self.assertRaises(IOError):
            flu(range(5)).side_effect(ffile.write).collect()


        gen_result = flu(range(5)).side_effect(ffile.write, before=ffile.open, after=ffile.close) \
                                  .collect()
        assert ffile.is_open == False
        assert ffile.content == [0, 1, 2, 3, 4]
        assert gen_result == [0, 1, 2, 3, 4]
Esempio n. 23
0
    def test_flatten(self):
        nested = [1, [2, (3, [4])], ['rbsd', 'abc'], (7,)]

        # Defaults with depth of 1
        gen = flu(nested).flatten()
        assert [x for x in gen] == [1, 2, (3, [4]), 'rbsd', 'abc', 7]

        # Depth 2
        gen = flu(nested).flatten(depth=2)
        assert [x for x in gen] == [1, 2, 3, [4], 'rbsd', 'abc', 7]

        # Depth 3
        gen = flu(nested).flatten(depth=3)
        assert [x for x in gen] == [1, 2, 3, 4, 'rbsd', 'abc', 7]

        # Depth infinite
        gen = flu(nested).flatten(depth=sys.maxsize)
        assert [x for x in gen] == [1, 2, 3, 4, 'rbsd', 'abc', 7]

        # Depth 2 with tuple base_type
        gen = flu(nested).flatten(depth=2, base_type=tuple)
        assert [x for x in gen] == [1, 2, (3, [4]), 'rbsd', 'abc', (7,)]

        # Depth 2 with iterate strings
        gen = flu(nested).flatten(depth=2, base_type=tuple, iterate_strings=True)
        assert [x for x in gen] == [1, 2, (3, [4]), 'r', 'b', 's', 'd', 'a', 'b', 'c', (7,)]
Esempio n. 24
0
def test_flatten():
    nested = [1, [2, (3, [4])], ["rbsd", "abc"], (7, )]

    # Defaults with depth of 1
    gen = flu(nested).flatten()
    assert [x for x in gen] == [1, 2, (3, [4]), "rbsd", "abc", 7]

    # Depth 2
    gen = flu(nested).flatten(depth=2)
    assert [x for x in gen] == [1, 2, 3, [4], "rbsd", "abc", 7]

    # Depth 3
    gen = flu(nested).flatten(depth=3)
    assert [x for x in gen] == [1, 2, 3, 4, "rbsd", "abc", 7]

    # Depth infinite
    gen = flu(nested).flatten(depth=sys.maxsize)
    assert [x for x in gen] == [1, 2, 3, 4, "rbsd", "abc", 7]

    # Depth 2 with tuple base_type
    gen = flu(nested).flatten(depth=2, base_type=tuple)
    assert [x for x in gen] == [1, 2, (3, [4]), "rbsd", "abc", (7, )]

    # Depth 2 with iterate strings
    gen = flu(nested).flatten(depth=2, base_type=tuple, iterate_strings=True)
    assert [x for x in gen
            ] == [1, 2, (3, [4]), "r", "b", "s", "d", "a", "b", "c", (7, )]
Esempio n. 25
0
def test_denormalize():
    content = [
        ["abc", [1, 2, 3]],
    ]
    assert flu(content).denormalize().collect() == [("abc", 1), ("abc", 2),
                                                    ("abc", 3)]
    assert (flu(content).denormalize(iterate_strings=True).collect()) == [
        ("a", 1),
        ("a", 2),
        ("a", 3),
        ("b", 1),
        ("b", 2),
        ("b", 3),
        ("c", 1),
        ("c", 2),
        ("c", 3),
    ]

    assert (flu([[[1], [1, 2], None]]).denormalize().collect()) == [
        (1, 1, None),
        (1, 2, None),
    ]

    assert (flu([[[1], [1, 2], []]]).denormalize().collect()) == []
Esempio n. 26
0
def walk_files(*pathes: str, abspath=True) -> Iterable[str]:
    """Yield files recursively starting from each location in *pathes"""
    if pathes == ():
        pathes = (".", )

    def _impl():
        for path in pathes:
            for d, _, files in os.walk(path):
                for x in files:
                    rel_path = os.path.join(d, x)
                    if abspath:
                        yield os.path.abspath(rel_path)
                    else:
                        yield rel_path

    return flu(_impl())
Esempio n. 27
0
def row_block(field: ASTNode,
              parent_name: typing.Optional[str] = None) -> Alias:
    return_type = field.return_type
    sqla_model = return_type.sqla_model
    core_model = sqla_model.__table__

    block_name = slugify_path(field.path)
    if parent_name is None:
        # If there is no parent, nodeId is mandatory
        pkey_cols = get_primary_key_columns(sqla_model)
        node_id = field.args["nodeId"]
        pkey_clause = [
            col == node_id.values[str(col.name)] for col in pkey_cols
        ]
        join_clause = [True]
    else:
        # If there is a parent no arguments are accepted
        join_clause = to_join_clause(field, parent_name)
        pkey_clause = [True]

    core_model_ref = (select(core_model.c).where(
        and_(*pkey_clause, *join_clause))).alias(block_name)

    select_clause = []
    for subfield in field.fields:

        if subfield.return_type == ID:
            elem = select([to_node_id_sql(sqla_model, core_model_ref)
                           ]).label(subfield.alias)
            select_clause.append(elem)
        elif isinstance(subfield.return_type,
                        (ScalarType, CompositeType, EnumType)):
            col_name = field_name_to_column(sqla_model, subfield.name).name
            elem = core_model_ref.c[col_name].label(subfield.alias)
            select_clause.append(elem)
        else:
            elem = build_relationship(subfield, block_name)
            select_clause.append(elem)

    block = (select([
        func.jsonb_build_object(*flu(select_clause).map(lambda x: (
            literal_string(x.key), x)).flatten().collect()).label("ret_json")
    ]).select_from(core_model_ref)).alias()

    return block
Esempio n. 28
0
def test_window():
    # Check default
    gen = flu(range(5)).window(n=3)
    assert gen.collect() == [(0, 1, 2), (1, 2, 3), (2, 3, 4)]

    # Check step param
    gen = flu(range(5)).window(n=3, step=3)
    assert gen.collect() == [(0, 1, 2), (3, 4, None)]

    # Check fill_value param
    gen = flu(range(5)).window(n=3, step=3, fill_value="i")
    assert gen.collect() == [(0, 1, 2), (3, 4, "i")]

    assert flu(range(4)).window(n=0).collect() == [tuple()]

    with pytest.raises(ValueError):
        flu(range(5)).window(n=-1).collect()

    with pytest.raises(ValueError):
        flu(range(5)).window(3, step=0).collect()
Esempio n. 29
0
    def from_database(cls, sess: Session, schema: str = "%"):
        sql = sql_text("""
        SELECT
            table_schema as schema,
            table_name,
            grantee as role_name,
            privilege_type as grant_option,
            is_grantable,
            column_name
        FROM
            information_schema.role_column_grants rcg
            -- Cant revoke from superusers so filter out those recs
            join pg_roles pr
                on rcg.grantee = pr.rolname
        WHERE
            not pr.rolsuper
            and grantor = CURRENT_USER
            and table_schema like :schema
        """)

        rows = sess.execute(sql, params={"schema": schema}).fetchall()
        grants = []

        grouped = (flu(rows).group_by(lambda x: SchemaTableRole(*x[:5])).map(
            lambda x: (x[0], x[1].map_item(5).collect())).collect())
        for s_t_r, columns in grouped:
            grant = PGGrantTable(
                schema=s_t_r.schema,
                table=s_t_r.table,
                role=s_t_r.role,
                grant=s_t_r.grant,
                with_grant_option=s_t_r.with_grant_option == "YES",
                columns=columns,
            )
            grants.append(grant)
        return grants
Esempio n. 30
0
 def test_collect(self):
     assert flu(range(3)).collect() == [0, 1, 2]
     assert flu(range(3)).collect(container_type=tuple) == (0, 1, 2)
     assert flu(range(3)).collect(n=2) == [0, 1]