Exemple #1
0
def parse_rules(text: str) -> dict:
    split_or = partial(splitstrip, sep="or")
    split_dash_to_int = compose_left(partial(splitstrip, sep="-"), c_lmap(int))
    split_dashes = partial(lmap, split_dash_to_int)
    return compose_left(
        splitstriplines,
        partial(split_to_dict, sep=":"),
        partial(valmap, split_or),
        partial(valmap, split_dashes),
    )(text)
Exemple #2
0
def cli_main():
    data = compose_left(load_input, process_input)("input-17.txt")
    answer = process(testdata)
    assert answer == 112
    pdb.set_trace()
    answer = process(data)
    pdb.set_trace()
    print("Answer one:", answer)
def cli_main():
    data = compose_left(load_input, process_input)("input-18.txt")
    tests()
    answer = process(data)
    assert answer == 1451467526514
    print("Answer one:", answer)
    answer_two = process_two(data)
    assert answer_two == 224973686321527
    print("Answer two:", answer_two)
Exemple #4
0
def groupby_many(f, it):
    return toolz.pipe(
        it,
        curried.mapcat(
            toolz.compose_left(
                lambda element: (f(element), [element]),
                functional.star(itertools.product),
            )),
        edges_to_graph,
    )
def add_debug(debug_f: Callable, orig_f: Callable) -> Callable:
    """
    Transforms the function such that output is passed
    to the debug function before being returned as normal.

    add_debug(print, str.upper) would return a function equivalent to:

    def fn(val: str): -> str
        result = str.upper(val)
        print(result)
        return result
    """
    do_f = partial(do, debug_f)
    return compose_left(orig_f, do_f)
Exemple #6
0
    def apply_transforms(self):
        for m in self.modalities:
            if len(self.transforms[m]) == 0:
                continue
            fn = compose_left(*self.transforms[m])
            # In place transformation to save some mem.

            for i in tqdm(
                    range(len(self.data)),
                    desc=f"Applying transforms for {m}",
                    total=len(self.data),
            ):
                self.data[i][m] = fn(self.data[i][m])
        self.transforms = {m: [] for m in self.modalities}

        return self
Exemple #7
0
    def __init__(
        self,
        flow_from_directory: FlowFromDirectory,
        resize_to: Tuple[int, int],
        resize_interpolation: InterpolationEnum = InterpolationEnum.inter_nearest,
        image_transform_function: Optional[Callable[[np.ndarray], np.ndarray]] = None,
        each_transformed_image_save_function_optional: Optional[
            Callable[[int, int, np.ndarray], None]
        ] = None,
        transform_function_for_all: Optional[Callable[[np.ndarray], np.ndarray]] = None,
        image_data_generator: ImageDataGenerator = ImageDataGenerator(),
    ):
        """
        디렉토리에서 파일을 불러오는 매니저입니다.

        Parameters
        ----------
        flow_from_directory : FlowFromDirectory
            디렉토리부터 이미지를 읽어올 FlowFromDirectory를 지정합니다.
        resize_to: Tuple[int, int]
            이미지를 리사이즈 할 크기를 지정합니다. (세로, 가로)
        resize_interpolation: InterpolationEnum
            Interpolation 정책을 설정합니다. by default InterpolationEnum.inter_nearest
        image_data_generator : ImageDataGenerator
            ImageDataGenerator, by default ImageDataGenerator()
        image_transform_function : Optional[Callable[[np.ndarray], np.ndarray]], optional
            배치 내 이미지 변환 함수. 변환 함수가 지정되지 않으면, 변환 없이 그냥 내보냅니다., by default None
        each_transformed_image_save_function_optional : Optional[Callable[[int, int, np.ndarray], None]], optional
            샘플 인덱스 번호, 배치 번호 및 이미지를 입력으로 하는 저장 함수, by default None
        transform_function_for_all : Optional[Callable[[np.ndarray], np.ndarray]], optional
            변환 함수. 배치 전체에 대한 변환 함수, by default None
        """
        self.flow_from_directory: FlowFromDirectory = flow_from_directory
        self.image_data_generator: ImageDataGenerator = image_data_generator
        self.resize_to: Tuple[int, int] = resize_to
        _image_transform_function = lambda img: img
        if image_transform_function is not None:
            _image_transform_function = image_transform_function
        image_transform_function_with_resize = toolz.compose_left(
            lambda img: img_resize(img, resize_to, resize_interpolation),
            _image_transform_function,
        )
        self.image_transform_function = image_transform_function_with_resize
        self.each_transformed_image_save_function_optional = (
            each_transformed_image_save_function_optional
        )
        self.transform_function_for_all = transform_function_for_all
Exemple #8
0
def files_in_folder(
    folder_name: str,
    include_hidden_file: bool = False,
    filters: List[Callable[[str], bool]] = [],
) -> List[str]:
    """
    Get files in folder.

    Parameters
    ----------
    folder_name : str
        Folder name
    include_hidden_file : bool, optional
        Whether hidden files are included(starts with '.'), by default False
    filters : List[Callable[[str], bool]], optional
        Filters to apply to result, by default []

    Returns
    -------
    List[str]
        File list

    Notes
    -----
    .. versionadded:: 0.1.0
    """
    # file only
    filters2: List[Callable[[str], bool]] = filters.copy()
    filters2.append(lambda f: os.path.isfile(folder_name + "/" + f))
    # hidden file filter
    if include_hidden_file is False:
        filters2.append(lambda file_name: not file_name.startswith("."))
    filtered_function: Callable[[List[str]], List[str]] = toolz.compose_left(
        os.listdir,
        curry(common_py.list_filters)(filters2), list)
    files: List[str] = filtered_function(folder_name)  # type: ignore

    return files
def tests():
    xx = teval_expr("1 + 2")
    assert xx == 3
    yy = teval_expr("2 / 2")
    assert yy == 1
    zz = teval_expr("2 * 3")
    assert zz == 6
    ww = teval_expr("3 - 2")
    assert ww == 1
    xx = teval_expr("1 + 2 + 3")
    yy = teval_expr("(1 + 2 + 3)")
    zz = teval_expr("(1 + 2) + 3")
    assert xx == 6
    assert yy == 6

    x = "((4 * 6 * 3 + 5 * 6 + 9) + 4 * 7 + 2 + 5) + (3 * 6 + 4) + (7 + 8 + 8)"
    pe = prep_expr(x)
    gp = group_parens(pe)
    eval_adv(gp)
    ee = compose_left(prep_expr, group_parens, eval_adv)

    assert ee("1 + (2 * 3) + (4 * (5 + 6))") == 51
    xx = ee("((2 + 4 * 9) * (6 + 9 * 8 + 6) + 6) + 2 + 4 * 2")
    assert xx == 23340
    any decorator::

        gft = excepts(
            ValueError,
            get_formatted_time,
            lambda _: None
        )

    """
    def inner_excepts_wrap(fn: Callable) -> Callable:
        return excepts(err, fn, err_func)

    return inner_excepts_wrap


lfilter = compose_left(filter, list)  # lambda f, l: [*filter(f, l)]
lmap = compose_left(map, list)  # lambda f, l: [*map(f, l)]
lpluck = compose_left(pluck, list)  # lambda k, l: [*pluck(f, l)]
c_map = curry(map)
c_lmap = curry(lmap)
is_char_az = partial(lambda y, x: x in y, ascii_lowercase)
is_char_hex = partial(lambda y, x: x in y, hexc)
is_char_az09 = partial(lambda y, x: x in y, ascii_lowercase + ascii_digits)
filter_str = partial(lambda f, s: "".join(filter(f, s)))
filter_az = partial(filter_str, is_char_az)
filter_az09 = partial(filter_str, is_char_az09)
filter_hex = partial(filter_str, is_char_hex)
add_pprint = partial(add_debug, pprint)
add_pprinting = partial(lmap, add_pprint)
lcompact = partial(lfilter, None)
Exemple #11
0
# on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
# express or implied. See the License for the specific language governing
# permissions and limitations under the License.

from datetime import datetime
from distutils.util import strtobool
from multiprocessing import cpu_count
from types import SimpleNamespace

import numpy as np
from toolz import compose_left

from gluonts import json
from gluonts.nursery import glide

parse_bool = compose_left(strtobool, bool)


def parse_attribute(ty, value: str):
    if ty == "numeric":
        return int(value)

    if ty == "string":
        return value

    if ty == "date":
        return datetime.strptime(value, "%Y-%m-%d %H-%M-%S")

    raise AttributeError(ty)

#

FILENAME3 = "filename3.json"


@dataclass(frozen=True)
class File:
    """File."""

    name: str
    io: _io.TextIOWrapper = None
    chars: int = 0


def write(filename, text):
    """Write file."""
    return pipe(File(filename),
                lambda file: replace(file, io=open(file.name, mode='w')),
                lambda file: replace(file, chars=file.io.write(text)),
                lambda file: do(lambda file: file.io.close(), file))


def show(file):
    """Show file."""
    print(f"Wrote the file `{file.name}` with {file.chars} characters.")


compose_left(write, show)(FILENAME3, TEXT)

# %%
def parse_line(line):
    raw_signals, raw_value = splitstrip(line, sep=" | ")
    sortjoin = compose_left(sorted, "".join)
    signals = lmap(sortjoin, raw_signals.split())
    value = lmap(sortjoin, raw_value.split())
    return signals, value
Exemple #14
0
    return s.replace(old, new)


@curry
def str_split(on, s):
    return s.split(on)


@curry
def str_join(sep, s):
    return sep.join(s)


space_replace = str_replace(new=" ")
str_drop = str_replace(new="")
to_title = lambda s: s.title()


nb_display_name = compose_left(space_replace(old="_"), str_drop(old=".html"), to_title)
_category_name = compose_left(space_replace(old="_"), to_title)


def category_name(s):
    if "+" not in s:
        return _category_name(s)
    cats = str_split("+", s)
    head, tail = cats[:-1], cats[-1]
    head_tail = [*head, f"and {tail}"]
    head_tail = str_join(", ", head_tail)
    return _category_name(head_tail)
Exemple #15
0
def process_one(points_on: FrozenSet[Tuple]) -> int:
    # We know there's symmetry if all the values for a dimension are the same:
    sd = [i for i, vals in enumerate(zip(*points_on)) if len(set(vals)) == 1]
    cycle_one = partial(cycle, symmetrical_dimensions=tuple(sd))
    return len(compose_left(*([cycle_one] * 6))(points_on))
    difference.

    Once we have them in groups, we extract them into the lists of runs.

    This could be all iterators instead of lists, but I'll make another
    function to do that translation.

    See also consecutive_groups in more_itertools, which was the basis for
    this.
    """
    check = lambda x: x[0] - x[1]
    collate = lambda x: map(itemgetter(1), list(x)[1])
    return map(collate, groupby(enumerate(array), key=check))


lfilter = compose_left(filter, list)  # lambda f, l: [*filter(f, l)]
lmap = compose_left(map, list)  # lambda f, l: [*map(f, l)]
lpluck = compose_left(pluck, list)  # lambda k, l: [*pluck(f, l)]
c_map = curry(map)
c_lmap = curry(lmap)
is_char_az = partial(lambda y, x: x in y, ascii_lowercase)
is_char_hex = partial(lambda y, x: x in y, hexc)
is_char_az09 = partial(lambda y, x: x in y, ascii_lowercase + ascii_digits)
filter_str = partial(lambda f, s: "".join(filter(f, s)))
filter_az = partial(filter_str, is_char_az)
filter_az09 = partial(filter_str, is_char_az09)
filter_hex = partial(filter_str, is_char_hex)
add_pprint = partial(add_debug, pprint)
add_pprinting = partial(lmap, add_pprint)
lcompact = partial(lfilter, None)
splitstrip = compose_left(str.split, partial(lmap, str.strip), lcompact)
Exemple #17
0
@toolz.curry
def inside(val, container):
    return val in container


@toolz.curry
def pair_with(f, element):
    return f(element), element


@toolz.curry
def pair_right(f, element):
    return element, f(element)


average = toolz.compose_left(bifurcate(sum, toolz.count),
                             star(operator.truediv))


@toolz.curry
def len_equals(length: int, seq):
    return len(seq) == length


@toolz.curry
def skip(n, seq):
    for i, x in enumerate(seq):
        if i < n:
            continue
        yield x

Exemple #18
0
def before(f1, f2):
    return toolz.compose_left(f1, f2)
def process_two(data):
    proc = compose_left(prep_expr, group_parens, eval_adv)
    return sum([proc(line) for line in data])
Exemple #20
0
def process_two(points_on: FrozenSet[Tuple]) -> int:
    points4 = three_to_four(points_on)
    sd = [i for i, vals in enumerate(zip(*points4)) if len(set(vals)) == 1]
    cycle_two = partial(cycle, symmetrical_dimensions=tuple(sd))
    return len(compose_left(*[*([cycle_two] * 6)])(points4))
from functools import partial
from pathlib import Path
from typing import Any, Callable, List, Iterable, Optional, Union
from toolz import (  # type: ignore
    compose_left, sliding_window,
)

lfilter = compose_left(filter, list)  # lambda f, l: [*filter(f, l)]
lcompact = partial(lfilter, None)


def is_sum_in(arr, target):
    for i in arr:
        if target - i in arr:
            return True
    return False


def is_sum_in_prior_n(arr, limit):
    for sw in sliding_window(limit + 1, arr):
        opts = sw[:limit]
        targ = sw[limit]
        if not is_sum_in(opts, targ):
            return targ


def contig_sum(arr, targ):
    sm = 0
    for i, val in enumerate(arr):
        sm = sm + val
        if sm == targ:
def process_input(input_funcs: List[Callable], text: str) -> Any:
    return compose_left(*input_funcs)(text)
 def qct(self, qcts: int) -> Point:
     # quarter-circle turn around origin, deosil positive,
     # widdershins negative.
     procs = [lambda p: Point(p.y, -p.x)] * (qcts % 4)
     return compose_left(*procs)(Point(self.x, self.y))
from functools import partial
from pathlib import Path
from toolz import compose_left, iterate  # type: ignore


lfilter = compose_left(filter, list)  # lambda f, l: [*filter(f, l)]
lcompact = partial(lfilter, None)
splitstrip = compose_left(str.split, partial(map, str.strip), lcompact)


def proc_line(lines, current, acc, seen):
    seen = seen + [current]
    cmd, amt = splitstrip(lines[current], " ")
    if cmd in ("nop", "acc"):
        nxt = current + 1
    if cmd == "jmp":
        nxt = current + int(amt)
    if cmd == "acc":
        acc = acc + int(amt)
    if nxt in seen:
        return acc
    else:
        return proc_line(lines, nxt, acc, seen)


def proc_line2(lines, current, acc, seen, changed):
    if current >= len(lines):
        return acc

    seen = seen + [current]
    cmd, amt = splitstrip(lines[current], " ")
Exemple #25
0
def lazyjuxt(*funcs):
    return toolz.compose_left(apply, curried.map, apply(funcs))
Exemple #26
0
    return map(
        toolz.first,
        graph_traverse(source=(source, 0),
                       get_neighbors=get_neighbors_limiting_radius),
    )


edges_to_graph = toolz.compose(
    curried.valmap(toolz.compose(frozenset, curried.map(toolz.second))),
    curried.groupby(toolz.first),
)

graph_to_edges = toolz.compose_left(
    curried.keymap(lambda x: (x, )),
    dict.items,
    curried.mapcat(functional.star(itertools.product)),
)

reverse_graph = toolz.compose_left(
    graph_to_edges, curried.map(toolz.compose_left(reversed, tuple)),
    edges_to_graph)

cliques_to_graph = toolz.compose_left(
    curried.mapcat(lambda clique: itertools.permutations(clique, r=2)),
    edges_to_graph)


def get_connectivity_components(graph: Dict) -> Iterable[FrozenSet]:
    """Graph is assumed to undirected, so each edge must appear both ways."""
    nodes_left = frozenset(graph)
Exemple #27
0
    difference.

    Once we have them in groups, we extract them into the lists of runs.

    This could be all iterators instead of lists, but I'll make another
    function to do that translation.

    See also consecutive_groups in more_itertools, which was the basis for
    this.
    """
    check = lambda x: x[0] - x[1]
    collate = lambda x: map(itemgetter(1), list(x)[1])
    return map(collate, groupby(enumerate(array), key=check))


lfilter = compose_left(filter, list)  # lambda f, l: [*filter(f, l)]
lcompact = partial(lfilter, None)
lmap = compose_left(map, list)  # lambda f, l: [*map(f, l)]
lpluck = compose_left(pluck, list)  # lambda k, l: [*pluck(f, l)]
lstrip = partial(lmap, str.strip)
splitstrip = compose_left(str.split, lstrip, lcompact)
splitstriplines = compose_left(str.splitlines, lstrip, lcompact)
seq_to_dict = compose_left(lmap, dict)
split_to_dict = lambda s, **kwds: seq_to_dict(partial(splitstrip, **kwds), s)
c_map = curry(map)
c_lmap = curry(lmap)
is_char_az = partial(lambda y, x: x in y, ascii_lowercase)
is_char_hex = partial(lambda y, x: x in y, hexc)
is_char_az09 = partial(lambda y, x: x in y, ascii_lowercase + ascii_digits)
filter_str = partial(lambda f, s: "".join(filter(f, s)))
filter_az = partial(filter_str, is_char_az)
Exemple #28
0
def batches_from_mapper(
    data_mapping: Mapping[str, xr.Dataset],
    variable_names: Sequence[str],
    timesteps_per_batch: int = 1,
    random_seed: int = 0,
    timesteps: Optional[Sequence[str]] = None,
    res: str = "c48",
    needs_grid: bool = True,
    in_memory: bool = False,
) -> loaders.typing.Batches:
    """ The function returns a sequence of datasets that is later
    iterated over in  ..sklearn.train.

    Args:
        data_mapping: Interface to select data for
            given timestep keys.
        variable_names: data variables to select
        timesteps_per_batch (int, optional): Defaults to 1.
        random_seed: Defaults to 0.
        timesteps: List of timesteps to use in training.
        needs_grid: Add grid information into batched datasets. [Warning] requires
            remote GCS access
        in_memory: if True, load data eagerly and keep it in memory
    Raises:
        TypeError: If no variable_names are provided to select the final datasets

    Returns:
        Sequence of xarray datasets
    """
    if timesteps and set(timesteps).issubset(data_mapping.keys()) is False:
        raise ValueError(
            "Timesteps specified in file are not present in data: "
            f"{list(set(timesteps)-set(data_mapping.keys()))}")

    random_state = RandomState(random_seed)
    if len(variable_names) == 0:
        raise TypeError("At least one value must be given for variable_names")

    if timesteps is None:
        timesteps = list(data_mapping.keys())
    num_times = len(timesteps)
    times = _sample(timesteps, num_times, random_state)
    batched_timesteps = list(partition_all(timesteps_per_batch, times))

    # First function goes from mapper + timesteps to xr.dataset
    # Subsequent transforms are all dataset -> dataset
    transforms = [_get_batch(data_mapping, variable_names)]

    if needs_grid:
        transforms += [
            add_grid_info(res),
            add_wind_rotation_info(res),
        ]

    transforms += [add_derived_data(variable_names)]

    batch_func = compose_left(*transforms)

    seq = Map(batch_func, batched_timesteps)
    seq.attrs["times"] = times

    if in_memory:
        out_seq: Batches = tuple(ds.load() for ds in seq)
    else:
        out_seq = seq
    return out_seq
def load_and_process_input(fname: Union[Path, str],
                           input_funcs: UListCall) -> Any:
    processinput = partial(process_input, input_funcs)
    return compose_left(load_input, processinput)(fname)