def create_range_params(*args, **kw): range_args = request.args.getlist('_range') range_params = [ schema(range_arg.split(',')) for range_arg in range_args ] range_filters = { range_param[0]: dicttoolz.valfilter( functoolz.identity, { 'from': range_param[1], 'to': range_param[2], }) for range_param in range_params } return func(range_filters=range_filters, *args, **kw)
def dig_for_twins(tweets): """Returns list of two random twinned tweets, if twins exist""" text_to_users = defaultdict(dict) for tweet in tweets: if 'RT' not in tweet.text and tweet.user.screen_name not in CONTACTED_SCREEN_NAMES: text_to_users[tweet.text][tweet.user.screen_name] = (tweet.id, tweet.text) twins = valfilter(lambda v: len(v) == 2, text_to_users) if len(twins) == 0: tweets = None else: text = random.choice(list(twins.keys())) random_sample = random.sample(list(twins[text].items()), 2) tweets = [Tweet(*flatten(data)) for data in random_sample] return tweets
def _renderDirectory(self, ruleHits, ruleStats, directory, filename): # Generate output HTML for each rule for rule, hits in ruleHits.items(): # Render hits for individual rule outfilePathJSON = os.path.join(directory, rule.machine_name + ".json") if len(hits) > 0: # Render hits # Generate JSON API jsonAPI = { "timestamp": self.timestamp, "downloadTimestamp": self.downloadTimestamp, "rule": rule.meta_dict, "hits": [valfilter(bool, {"msgstr": entry.msgstr, # valfilter: remove empty values for smaller JSON "msgid": entry.msgid, "tcomment": entry.tcomment, "hit": hit, "origImages": origImages, "translatedImages": translatedImages, "crowdinLink": "{0}#q={1}".format(self.translationURLs[filename], genCrowdinSearchString(entry)) }) for entry, hit, filename, origImages, translatedImages in hits] } writeJSONToFile(outfilePathJSON, jsonAPI) else: # Remove file (redirects to 404 file) if there are no exportHitsAsJSON if os.path.isfile(outfilePathJSON): os.remove(outfilePathJSON) # Render file index page (no filelist) ruleInfos = [merge(rule.meta_dict, {"num_hits": ruleStats[rule]}) for rule in self.rules if ruleStats[rule] > 0] ruleInfos.sort(key=lambda o: -o["severity"]) # Invert sort order js = { "pageTimestamp": self.timestamp, "downloadTimestamp": self.downloadTimestamp, "stats": ruleInfos, "files": [merge(self.statsByFile[filename], {"filename": filename}) for filename in self.files if self.statsByFile[filename]["notices"] > 0] } writeJSONToFile(os.path.join(directory, "index.json"), js)
def _parse_and_merge_with_context(self, xlref, init_lasso): """ Merges xl-ref parsed-parsed_fields with `init_lasso`, reporting any errors. :param Lasso init_lasso: Default values to be overridden by non-nulls. :return: a Lasso with any non `None` parsed-fields updated """ assert isinstance(init_lasso.opts, ChainMap), init_lasso try: parsed_fields = _parse.parse_xlref(xlref) filled_fields = dtz.valfilter(lambda v: v is not None, parsed_fields) init_lasso = init_lasso._replace(**filled_fields) except SyntaxError: raise except Exception as ex: msg = "Parsing xl-ref(%r) failed due to: %s" log.debug(msg, xlref, ex, exc_info=1) raise ValueError(msg % (xlref, ex)) from ex return init_lasso
def test_valfilter(self): D, kw = self.D, self.kw assert valfilter(iseven, D({1: 2, 2: 3}), **kw) == D({1: 2})
def valfilter(self, f: Callable[[B], bool]) -> 'Map[A, B]': return Map(dicttoolz.valfilter(f, self))
def test_valfilter(): assert valfilter(iseven, {1: 2, 2: 3}) == {1: 2}
def _renderDirectory(self, ruleHits, ruleStats, directory, filename): # Generate output HTML for each rule for rule, hits in ruleHits.items(): # Render hits for individual rule outfilePathJSON = os.path.join(directory, rule.machine_name + ".json") if len(hits) > 0: # Render hits # Generate JSON API jsonAPI = { "timestamp": self.timestamp, "downloadTimestamp": self.downloadTimestamp, "rule": rule.meta_dict, # valfilter: remove empty values for smaller JSON "hits": [ valfilter( bool, { "msgstr": entry.translated, "msgid": entry.english, "tcomment": entry.note, "hit": hit, "origImages": origImages, "translatedImages": translatedImages, "crowdinLink": "{}#{}".format(self.translationURLs[filename], entry.id) }) for entry, hit, filename, origImages, translatedImages in hits ] } writeJSONToFile(outfilePathJSON, jsonAPI) else: # Remove file (redirects to 404 file) if there are no exportHitsAsJSON if os.path.isfile(outfilePathJSON): os.remove(outfilePathJSON) # Render file index page (no filelist) ruleInfos = [ merge(rule.meta_dict, {"num_hits": ruleStats[rule]}) for rule in self.rules if ruleStats[rule] > 0 ] ruleInfos.sort(key=lambda o: -o["severity"]) # Invert sort order js = { "pageTimestamp": self.timestamp, "downloadTimestamp": self.downloadTimestamp, "stats": ruleInfos, "files": [ merge(self.statsByFile[filename], {"filename": filename}) for filename in map(self.file_relpath, self.files) if self.statsByFile[filename]["notices"] > 0 ] } writeJSONToFile(os.path.join(directory, "index.json"), js)
def attribute_filter_empty(result): return valfilter(lambda x: x is not None, result)
def choose_kwargs(from_, which): return keyfilter(lambda x: x in which, valfilter(lambda x: x is not None, from_))
from itertools import filterfalse from toolz.dicttoolz import keyfilter, valfilter, itemfilter def is_even(x): if x % 2 == 0: return True else: return False def both_are_even(x): k, v = x if is_even(k) and is_even(v): return True else: return False print(list(filterfalse(is_even, range(10)))) # [1, 3, 5, 7, 9] print(list(keyfilter(is_even, {1: 2, 2: 3, 3: 4, 4: 5, 5: 6}))) # [2, 4] print(list(valfilter(is_even, {1: 2, 2: 3, 3: 4, 4: 5, 5: 6}))) # [1, 3, 5] print(list(itemfilter(both_are_even, {1: 5, 2: 4, 3: 3, 4: 2, 5: 1}))) # [2, 4]
def _yield_configs_and_defaults(self, config, search_terms, merged: bool, ciphered: bool): verbose = self.verbose get_classes = (self._classes_inc_parents if verbose else self._classes_with_config_traits) all_classes = list(get_classes(self.all_app_configurables())) ## Merging needs to visit all hierarchy. own_traits = not (verbose or merged) search_map = prepare_search_map(all_classes, own_traits) if ciphered: from . import crypto def ciphered_filter(mapval): _, trait = mapval if isinstance(trait, crypto.Cipher): return mapval search_map = dtz.valfilter(ciphered_filter, search_map) if search_terms: matcher = prepare_matcher(search_terms, self.regex) search_map = dtz.keyfilter(matcher, search_map) items = search_map.items() if self.sort: items = sorted(items) # Sort by class-name (traits always sorted). classes_configured = {} for key, (cls, trait) in items: if self.list: yield key continue if not trait: ## Not --verbose and class not owning traits. continue clsname, trtname = key.split('.') ## Print own traits only, even when "merge" visits all. # sup = super(cls, cls) if not verbose and getattr(sup, trtname, None) is trait: continue ## Instanciate classes once, to merge values. # obj = classes_configured.get(cls) if obj is None: try: ## Exceptional rule for Project-zygote. # TODO: delete when project rule is gone. # if cls.__name__ == 'Project': cls.new_instance('test', None, config) else: obj = cls(config=config) except Exception as ex: self.log.warning( "Falied initializing class '%s' due to: %r", clsname, ex) ## Assign config-values as dummy-object's attributes. # Note: no merging of values now! # class C: pass obj = C() obj.__dict__ = dict(config[clsname]) classes_configured[cls] = obj ## Print 1 class-line for all its traits. # base_classes = ', '.join(p.__name__ for p in cls.__bases__) yield '%s(%s)' % (clsname, base_classes) if merged: try: val = getattr(obj, trtname, '??') except trt.TraitError as ex: self.log.warning("Cannot merge '%s' due to: %r", trtname, ex) val = "<invalid due to: %s>" % ex else: val = repr(trait.default()) yield ' +--%s = %s' % (trtname, val)
def _get_relevant_candidates(self, keywords): score_candidates = get_score_candidates(self.raw_html) for candidate, score in score_candidates.items(): score_candidates[candidate] += self._calculate_score_by_matching( candidate, keywords) return dicttoolz.valfilter(lambda score: score, score_candidates)