def clear_cache(scope=None):
    """
    Clear all cached data.

    Parameters
    ----------
    scope : {None, 'step', 'iteration', 'forever'}, optional
        Clear cached values with a given scope.
        By default all cached values are removed.

    """
    if not scope:
        _TABLE_CACHE.clear()
        _COLUMN_CACHE.clear()
        _INJECTABLE_CACHE.clear()
        for m in _MEMOIZED.values():
            m.value.clear_cached()
        logger.debug('simulation cache cleared')
    else:
        for d in (_TABLE_CACHE, _COLUMN_CACHE, _INJECTABLE_CACHE):
            items = toolz.valfilter(lambda x: x.scope == scope, d)
            for k in items:
                del d[k]
        for m in toolz.filter(lambda x: x.scope == scope, _MEMOIZED.values()):
            m.value.clear_cached()
        logger.debug('cleared cached values with scope {!r}'.format(scope))
def clear_cache(scope=None):
    """
    Clear all cached data.

    Parameters
    ----------
    scope : {None, 'step', 'iteration', 'forever'}, optional
        Clear cached values with a given scope.
        By default all cached values are removed.

    """
    if not scope:
        _TABLE_CACHE.clear()
        _COLUMN_CACHE.clear()
        _INJECTABLE_CACHE.clear()
        for m in _MEMOIZED.values():
            m.value.clear_cached()
        logger.debug('simulation cache cleared')
    else:
        for d in (_TABLE_CACHE, _COLUMN_CACHE, _INJECTABLE_CACHE):
            items = toolz.valfilter(lambda x: x.scope == scope, d)
            for k in items:
                del d[k]
        for m in toolz.filter(lambda x: x.scope == scope, _MEMOIZED.values()):
            m.value.clear_cached()
        logger.debug('cleared cached values with scope {!r}'.format(scope))
Example #3
0
 def delete(self, artifact_or_id):
     artifact_id = _artifact_id(artifact_or_id)
     cs.ensure_delete(self)
     new_artifacts = list(
         t.filter(lambda a: a.id != artifact_id, self.artifacts))
     if len(new_artifacts) == len(self.artifacts):
         raise KeyError(artifact_id, self)
     else:
         self.artifacts = new_artifacts
Example #4
0
def entrypoint(dockerfile):
    "Return the entrypoint, if declared"
    f = dockerfile.split("\n")[::-1]  # reverse the lines
    try:
        entry_line = first(filter(lambda x: "ENTRYPOINT" in x, f))
    except StopIteration as e:
        # No ENTRYPOINT line was found
        return None
    else:
        res = last(entry_line.partition("ENTRYPOINT")).strip()
        try:
            return json.loads(res)
        except:
            return res.split()
        return None
Example #5
0
File: hume.py Project: ntdef/hume
def entrypoint(dockerfile):
    "Return the entrypoint, if declared"
    f = dockerfile.split("\n")[::-1]  # reverse the lines
    try:
        entry_line = first(filter(lambda x: "ENTRYPOINT" in x, f))
    except StopIteration as e:
        # No ENTRYPOINT line was found
        return None
    else:
        res = last(entry_line.partition("ENTRYPOINT")).strip()
        try:
            return json.loads(res)
        except:
            return res.split()
        return None
Example #6
0
def compute_up(t, seq, **kwargs):
    predicate = rrowfunc(t.predicate, t._child)
    return filter(predicate, seq)
Example #7
0
 def all_block(self) -> List[Block]:
   return t.filter(lambda x: isinstance(x, Block), self.values())
Example #8
0
def compact(iter):
    return filter(None, iter)
Example #9
0
def compute_up(expr, seq, predicate, **kwargs):
    preds = iter(predicate)
    return filter(lambda _: next(preds), seq)
Example #10
0
def filter_shorter_than(n, tokenset):
    """
    Filters out tokens that have less than 'n' characters.
    """
    return tlz.filter(lambda tkn: len(tkn) >= n, tokenset)
Example #11
0
def filter_stopwords(tokenset):
    """
    Filters out stopwords.
    """
    return tlz.filter(lambda tkn: tkn not in STOPWORDS, tokenset)
Example #12
0
def compute_up(t, seq, **kwargs):
    predicate = rrowfunc(t.predicate, t._child)
    return filter(predicate, seq)
Example #13
0
def compute_up(expr, seq, predicate, **kwargs):
    preds = iter(predicate)
    return filter(lambda _: next(preds), seq)
Example #14
0
def compact(_iter):
    return filter(None, _iter)
Example #15
0
def sfilter(pred, fn, x):
    """ Yield only those results which satisfy the predicate """
    for x in filter(pred, fn(x)):
        yield x
Example #16
0
def sfilter(pred, fn, x):
    """ Yield only those results which satisfy the predicate """
    for x in filter(pred, fn(x)):
        yield x
Example #17
0
def connected_mechanisms(order, network):
    """Returns an iterable the connected mechanisms of the given order within the given network."""
    return filter(valid_mechanism(order, network.cm), pyphi.utils.powerset(network.node_indices))
Example #18
0
def filter_stopwords(tokenset):
    """
    Filters out tokens that are stopwords.
    """
    return tlz.filter(not_stopword, tokenset)
Example #19
0
def filter_whitespace(tokenset):
    """
    Filters out tokens that are only whitespace.
    """
    return tlz.filter(tlz.compose(bool, lambda string: string.strip()), tokenset)
Example #20
0
def filter_stopwords(tokenset):
    """
    Filters out stopwords.
    """
    return tlz.filter(lambda tkn: tkn not in STOPWORDS, tokenset)
Example #21
0
 def all_block(self) -> List[Block]:
     return t.filter(lambda x: isinstance(x, Block), self.values())
    
deps.number_of_edges()
deps.number_of_nodes()
deps.node['skimage']
deps.in_edges('skimage')
nodes = nx.katz_centrality(deps)
central = sorted(deps.nodes(), key=nodes.__getitem__, reverse=True)
central[:10]
central[:20]
central[:40]
central[40:80]
central.index('skimage')
central.index('scipy')
import pickle
stdlib = pickle.load(open('/Users/jni/projects/depsy/data/python_standard_libs.pickle', 'rb'))
central_nonstd = list(tz.filter(lambda x: x not in stdlib, central))
len(central_nonstd)
central_nonstd.index('scipy')
len(central)
central[:5]
nx.is_connected(deps.to_undirected())
len(packages)
deps_sym = deps.to_undirected()
import numpy as np
conncomps = list(nx.connected_component_subgraphs(deps_sym))
giant = conncomps[0]
giant_d = deps.subgraph(giant.nodes())
gpackages = giant_d.nodes()
A = nx.to_scipy_sparse_matrix(giant_d)
A.shape
A.dtype
Example #23
0
def filter_whitespace(tokenset):
    """
    Filters out tokens that are only whitespace.
    """
    return tlz.filter(tlz.compose(bool, str.strip), tokenset)
Example #24
0
def filter_longer_than(n, tokenset):
    """
    Filters out tokens that have 'n' characters or more.
    """
    return tlz.filter(lambda tkn: len(tkn) < n, tokenset)
Example #25
0
def filter_longer_than(n, tokenset):
    """
    Filters out tokens that have 'n' characters or more.
    """
    return tlz.filter(lambda tkn: len(tkn) < n, tokenset)
Example #26
0
def compute_up(expr, seq, **kwargs):
    predicate = like_regex_predicate(expr)
    return filter(predicate, seq)
Example #27
0
# TODO extract zip of takeout

TAKEOUT_DIRECTORY_PATH = '/Users/guy/Downloads/Takeout'
CSV_DIRECTORY = 'Fit/Daily Aggregations'
AVERAGE_WEIGHT_FIELD = 'Average weight (kg)'

csv_dir = os.path.join(TAKEOUT_DIRECTORY_PATH, CSV_DIRECTORY)
files = os.listdir(csv_dir)


def is_daily_aggregation(file_name):
    return re.match(r'\d{4}-\d{2}-\d{2}.csv', file_name)


daily_aggregations = toolz.filter(is_daily_aggregation, files)
data = []

for csv_file in daily_aggregations:
    date_part = csv_file.split(".")[0]
    date = dateparser.parse(date_part)
    csv = pandas.read_csv(os.path.join(csv_dir, csv_file))
    weight = csv[
        csv[AVERAGE_WEIGHT_FIELD].notna()][AVERAGE_WEIGHT_FIELD].mean()
    if pandas.notna(weight):
        data.append({"date": date, "weight": weight})
        # print(csv_file, weight)
sorted_data = sorted(data, key=operator.itemgetter('date'))
df = pandas.DataFrame(sorted_data)
df.to_csv('output.csv')
print(sorted_data)
Example #28
0
y_clf = y.values.ravel() > 0
y_log = np.log(y.values.ravel() + 1)

df_variable = pd.read_excel('data/tap4fun.xlsx', sheet_name='Sheet1')
df_variable = df_variable.iloc[:, range(3)].rename(columns={
    '字段名': 'column',
    '字段解释': 'desc',
    '数据时间': 'cycle'
})
df_variable['cat'] = df_variable.column.str.split('_').apply(lambda x: x[0])

# 因子合成
# 查询消费类因子
from toolz import filter

pay_cols = list(filter(lambda x: 'pay' in x, X.columns))

X['avg_online_minutes_log10'] = np.log10(X['avg_online_minutes'] + 1)

active_cols = list(filter(lambda x: 'avg_online' in x, X.columns))
achievement_cols = ['army_add', 'resource_add', 'acceleration_add']
for col in achievement_cols:
    X[col + '_log'] = np.log(X[col] + 1)

X['achievement'] = np.log(X['army_add'] + X['resource_add'] + 1)
X['achievement_per_hour'] = X['achievement'] / X['avg_online_minutes'] / 60
X['acceleration_per_hour'] = X['acceleration_add'] / X[
    'avg_online_minutes'] / 60
X.loc[X.avg_online_minutes == 0, 'achievement_per_hour'] = 0
X.loc[X.avg_online_minutes == 0, 'acceleration_per_hour'] = 0
X['acceleration_per_hour_log'] = np.log(X['acceleration_per_hour'] + 1)
Example #29
0
def compute_up(t, seq, **kwargs):
    predicate = optimize(t.predicate, seq)
    predicate = rrowfunc(predicate, child(t))
    return filter(predicate, seq)
Example #30
0
def compute_up_1d(t, seq, **kwargs):
    return toolz.count(filter(None, seq))
Example #31
0
def compute_up_1d(t, seq, **kwargs):
    return toolz.count(filter(None, seq))
Example #32
0
def filter_shorter_than(n, tokenset):
    """
    Filters out tokens that have less than 'n' characters.
    """
    return tlz.filter(lambda tkn: len(tkn) >= n, tokenset)
Example #33
0
 def all_inline(self) -> List[Inline]:
   return t.filter(lambda x: isinstance(x, Inline), self.values())
Example #34
0
 def all_inline(self) -> List[Inline]:
     return t.filter(lambda x: isinstance(x, Inline), self.values())
Example #35
0
def compute_up(t, seq, **kwargs):
    predicate = optimize(t.predicate, seq)
    predicate = rrowfunc(predicate, child(t))
    return filter(predicate, seq)
Example #36
0
def filter_stopwords(stopwords, tokens):
    return tz.filter(lambda t: t not in stopwords, tokens)
Example #37
0
def compute_up(expr, seq, **kwargs):
    predicate = like_regex_predicate(expr)
    return filter(predicate, seq)
Example #38
0
def filter_whitespace(tokenset):
    """
    Filters out tokens that are only whitespace.
    """
    return tlz.filter(tlz.compose(bool, str.strip), tokenset)