Beispiel #1
0
def test_schema_keys(response):
    schema = w.WeatherUndergroundAPIResponse()
    raw_data = json.loads(response)
    data, _ = schema.loads(response)
    assert (set(remove(_date_key,
                       dict_flatten(data).keys())) == set(
                           remove(_date_key,
                                  dict_flatten(raw_data).keys())))
Beispiel #2
0
Datei: Zte.py Projekt: sjava/olt
def zhongji(ip='', username='', password=''):
    try:
        result = []
        child = telnet(ip, username, password)
        child.sendline("show lacp internal")
        while True:
            index = child.expect([zte_prompt, zte_pager], timeout=120)
            if index == 0:
                result.append(child.before)
                child.sendline('exit')
                child.close()
                break
            else:
                result.append(child.before)
                child.send(' ')
                continue
    except (pexpect.EOF, pexpect.TIMEOUT) as e:
        return ['fail', None, ip]
    rslt = ''.join(result).split('\r\n')[1:-1]
    records = [x.replace('\x08', '').strip()
               for x in rslt if 'Smartgroup' in x or 'selected' in x]
    records = remove(lambda x: 'unselected' in x, records)
    rec1 = [x.split()[0].lower().replace(':', '') for x in records]
    rec2 = partition(2, partitionby(lambda x: 'smartgroup' in x, rec1))
    rec3 = {x[0][0]: x[1] for x in rec2}
    return ['success', rec3, ip]
Beispiel #3
0
Datei: Zte.py Projekt: sjava/olt
def zhongji(ip='', username='', password=''):
    try:
        result = []
        child = telnet(ip, username, password)
        child.sendline("show lacp internal")
        while True:
            index = child.expect([zte_prompt, zte_pager], timeout=120)
            if index == 0:
                result.append(child.before)
                child.sendline('exit')
                child.close()
                break
            else:
                result.append(child.before)
                child.send(' ')
                continue
    except (pexpect.EOF, pexpect.TIMEOUT) as e:
        return ['fail', None, ip]
    rslt = ''.join(result).split('\r\n')[1:-1]
    records = [
        x.replace('\x08', '').strip() for x in rslt
        if 'Smartgroup' in x or 'selected' in x
    ]
    records = remove(lambda x: 'unselected' in x, records)
    rec1 = [x.split()[0].lower().replace(':', '') for x in records]
    rec2 = partition(2, partitionby(lambda x: 'smartgroup' in x, rec1))
    rec3 = {x[0][0]: x[1] for x in rec2}
    return ['success', rec3, ip]
    def tokenize(self, message: str, stopwords=[]) -> List[str]:
        """
        Tokenize string passed in
        If self.data is not a string or is the empty string, return []
        """
        if not isinstance(message, str) or message == "":
            print("received bad input...")
            try:
                print(f"message length = {len(message)}")
            except TypeError as e:
                print(e)
            print(f"message is {str(message)}")
            return []
        else:
            tokens: List[str] = []
            # explicitly run all other methods on input
            sents = self.run_pipeline(message)
            # we now have a list of sentences, remove falsey elements
            sents = t.remove(self.empty_sent_re.match, sents)
            # tokenize each sentence and concat resultant arrays
            tokens = list(t.mapcat(nltk.word_tokenize, sents))

            if stopwords:
                tokens = [x for x in tokens if x.lower() not in stopwords]
            return tokens
Beispiel #5
0
    def _actions(seed):
        init = {'values': [], 'state': seed}

        result = reduce(_intermediate, acts, init)

        keep = remove(lambda x: x is None, result['values'])

        return done(keep, result['state'])
def sort_ohm(ohm):
    """Sorts an OrderedHierarchicalMapping
    """
    first_level = lambda k: len(k[0].split(ohm.SECTION_SEPARATOR)) == 1
    leveled_items = remove(first_level, ohm.items())
    first_level_items = [item for item in ohm.items() if first_level(item)]
    return first_level_items + sorted(
        leveled_items, key=lambda k: k[0].split(ohm.SECTION_SEPARATOR))
Beispiel #7
0
 def validate(self):
     normalized_std = (list(toolz.remove(lambda x: x.sid == "", self.rules)))
     unique_std = len(list(toolz.unique(normalized_std, key=lambda x: x.sid)))
     all_std = len(normalized_std)
     if not all_std == unique_std:
         sysexit_with_message(
             "Detect duplicate ID's in standards definition. Please use unique ID's only."
         )
Beispiel #8
0
    def _actions(seed):
        init = {'values': [], 'state': seed}

        result = reduce(_intermediate, acts, init)

        keep = remove(lambda x: x is None, result['values'])

        return done(keep, result['state'])
def sort_ohm(ohm):
    """Sorts an OrderedHierarchicalMapping
    """
    first_level = lambda k: len(k[0].split(ohm.SECTION_SEPARATOR)) == 1
    leveled_items = remove(first_level, ohm.items())
    first_level_items = [item for item in ohm.items() if first_level(item)]
    return first_level_items + sorted(leveled_items,
                                      key=lambda k: k[0].split(
                                          ohm.SECTION_SEPARATOR))
Beispiel #10
0
def _next_merge(merge_node):
    """
    Gets a node that has only leaf nodes below it. This table and
    the ones below are ready to be merged to make a new leaf node.

    """
    if all(_is_leaf_node(d) for d in _dict_value_to_pairs(merge_node)):
        return merge_node
    else:
        for d in toolz.remove(_is_leaf_node, _dict_value_to_pairs(merge_node)):
            return _next_merge(d)
        else:
            raise SimulationError('No node found for next merge.')
Beispiel #11
0
def _next_merge(merge_node):
    """
    Gets a node that has only leaf nodes below it. This table and
    the ones below are ready to be merged to make a new leaf node.

    """
    if all(_is_leaf_node(d) for d in _dict_value_to_pairs(merge_node)):
        return merge_node
    else:
        for d in toolz.remove(_is_leaf_node, _dict_value_to_pairs(merge_node)):
            return _next_merge(d)
        else:
            raise SimulationError('No node found for next merge.')
Beispiel #12
0
def columns_in_formula(formula):
    """
    Returns the names of all the columns used in a patsy formula.

    Parameters
    ----------
    formula : str, iterable, or dict
        Any formula construction supported by ``str_model_expression``.

    Returns
    -------
    columns : list of str

    """
    if formula is None:
        return []

    formula = str_model_expression(formula, add_constant=False)
    columns = []

    tokens = map(
        lambda x: x.extra,
        toolz.remove(
            lambda x: x.extra is None,
            _tokens_from_patsy(patsy.parse_formula.parse_formula(formula))))

    for tok in tokens:
        # if there are parentheses in the expression we
        # want to drop them and everything outside
        # and start again from the top
        if '(' in tok:
            start = tok.find('(') + 1
            fin = tok.rfind(')')
            columns.extend(columns_in_formula(tok[start:fin]))
        else:
            for toknum, tokval, _, _, _ in generate_tokens(
                    StringIO(tok).readline):
                if toknum == NAME:
                    columns.append(tokval)

    return list(toolz.unique(columns))
Beispiel #13
0
def elemwise(op, *args, **kwargs):
    """ Elementwise operation for dask.Sparseframes

    Parameters
    ----------
    op: function
        Function that takes as first parameter the underlying df
    args:
        Contains Dataframes
    kwargs:
        Contains meta.
    """
    meta = kwargs.pop('meta', no_default)

    _name = funcname(op) + '-' + tokenize(op, kwargs, *args)

    # if pd.Series or pd.DataFrame change to dd.DataFrame
    args = _maybe_from_pandas(args)

    # Align DataFrame blocks if divisions are different.
    from .multi import _maybe_align_partitions  # to avoid cyclical import
    args = _maybe_align_partitions(args)

    # extract all dask instances
    dasks = [
        arg for arg in args if isinstance(arg, (SparseFrame, _Frame, Scalar))
    ]
    # extract all dask frames
    dfs = [df for df in dasks if isinstance(df, (_Frame, SparseFrame))]

    # We take divisions from the first dask frame
    divisions = dfs[0].divisions

    _is_broadcastable = partial(is_broadcastable, dfs)
    dfs = list(remove(_is_broadcastable, dfs))
    n = len(divisions) - 1

    other = [(i, arg) for i, arg in enumerate(args)
             if not isinstance(arg, (_Frame, Scalar, SparseFrame))]

    # Get dsks graph tuple keys and adjust the key length of Scalar
    keys = [
        d.__dask_keys__() * n if isinstance(d, Scalar) or _is_broadcastable(d)
        else d.__dask_keys__() for d in dasks
    ]

    if other:
        dsk = {(_name, i): (apply, partial_by_order, list(frs), {
            'function': op,
            'other': other
        })
               for i, frs in enumerate(zip(*keys))}
    else:
        dsk = {(_name, i): (op, ) + frs for i, frs in enumerate(zip(*keys))}
    dsk = merge(dsk, *[d.dask for d in dasks])

    if meta is no_default:
        if len(dfs) >= 2 and len(dasks) != len(dfs):
            # should not occur in current funcs
            msg = 'elemwise with 2 or more DataFrames and Scalar is not supported'
            raise NotImplementedError(msg)
        meta = _emulate(op, *args, **kwargs)

    return SparseFrame(dsk, _name, meta, divisions)
Beispiel #14
0
# -*- encoding: utf-8 -*-
import toolz

print_list = lambda x: print(list(x))

l = list(range(25))


# 1. use toolz.curry instead functools.partial
@toolz.curry
def not_multiple_of(x, y):
    return y % x != 0


print_list(filter(not_multiple_of(3), l))
print_list(filter(not_multiple_of(7), l))
print('-' * 20, '\n')

# 2. use toolz.remove
print_list(toolz.remove(toolz.complement(not_multiple_of(3)), l))
print_list(toolz.remove(toolz.complement(not_multiple_of(7)), l))