def getPitches(self, bassPitch, notationString = None, maxPitch=pitch.Pitch('B5')): ''' Takes in a bassPitch, a notationString, and a maxPitch representing the highest possible pitch that can be returned. Returns a sorted list of pitches which correspond to the pitches of each specific pitch name found through getPitchNames that fall between the bassPitch and the maxPitch, inclusive of both. >>> from music21.figuredBass import realizerScale >>> fbScale = realizerScale.FiguredBassScale() Root position triad >>> [str(p) for p in fbScale.getPitches('C3') ] ['C3', 'E3', 'G3', 'C4', 'E4', 'G4', 'C5', 'E5', 'G5'] First inversion triad >>> [str(p) for p in fbScale.getPitches('D3', '6') ] ['D3', 'F3', 'B3', 'D4', 'F4', 'B4', 'D5', 'F5', 'B5'] Root position seventh chord, showing MaxPitch >>> fbScale.getPitches(pitch.Pitch('G3'), '7', 'F4') [<music21.pitch.Pitch G3>, <music21.pitch.Pitch B3>, <music21.pitch.Pitch D4>, <music21.pitch.Pitch F4>] ''' bassPitch = convertToPitch(bassPitch) maxPitch = convertToPitch(maxPitch) pitchNames = self.getPitchNames(bassPitch, notationString) iter1 = itertools.product(pitchNames, range(maxPitch.octave + 1)) iter2 = itertools.imap(lambda x: pitch.Pitch(x[0] + str(x[1])), iter1) iter3 = itertools.ifilterfalse(lambda samplePitch: bassPitch > samplePitch, iter2) iter4 = itertools.ifilterfalse(lambda samplePitch: samplePitch > maxPitch, iter3) allPitches = list(iter4) allPitches.sort() return allPitches
def __init__( self, default_content, sections, source, preview ): if default_content is None: raise ValueError('default_content is required') if not (isinstance(default_content, dict) and len(list(ifilterfalse(lambda __item: isinstance(__item[0], basestring) and isinstance(__item[1], basestring), default_content.iteritems()))) == 0): raise TypeError(getattr(__builtin__, 'type')(default_content)) self.__default_content = default_content.copy() if default_content is not None else None if sections is None: raise ValueError('sections is required') if not (isinstance(sections, tuple) and len(list(ifilterfalse(lambda _: isinstance(_, basestring), sections))) == 0): raise TypeError(getattr(__builtin__, 'type')(sections)) self.__sections = sections if source is None: raise ValueError('source is required') if not isinstance(source, basestring): raise TypeError(getattr(__builtin__, 'type')(source)) self.__source = source if preview is None: raise ValueError('preview is required') if not isinstance(preview, basestring): raise TypeError(getattr(__builtin__, 'type')(preview)) self.__preview = preview
def getPitches(self, bassPitch, notationString = None, maxPitch=pitch.Pitch('B5')): ''' Takes in a bassPitch, a notationString, and a maxPitch representing the highest possible pitch that can be returned. Returns a sorted list of pitches which correspond to the pitches of each specific pitch name found through getPitchNames that fall between the bassPitch and the maxPitch, inclusive of both. >>> from music21.figuredBass import realizerScale >>> fbScale = realizerScale.FiguredBassScale() >>> fbScale.getPitches('C3') # Root position triad [C3, E3, G3, C4, E4, G4, C5, E5, G5] >>> fbScale.getPitches('D3', '6') # First inversion triad [D3, F3, B3, D4, F4, B4, D5, F5, B5] >>> fbScale.getPitches(pitch.Pitch('G3'), '7', 'F4') # Root position seventh chord [G3, B3, D4, F4] ''' bassPitch = convertToPitch(bassPitch) maxPitch = convertToPitch(maxPitch) pitchNames = self.getPitchNames(bassPitch, notationString) iter1 = itertools.product(pitchNames, range(maxPitch.octave + 1)) iter2 = itertools.imap(lambda x: pitch.Pitch(x[0] + str(x[1])), iter1) iter3 = itertools.ifilterfalse(lambda samplePitch: bassPitch > samplePitch, iter2) iter4 = itertools.ifilterfalse(lambda samplePitch: samplePitch > maxPitch, iter3) allPitches = list(iter4) allPitches.sort() return allPitches
def update(self, *args): pygame.sprite.Group.update(self, *args) to_kill = [] for ij, pt in self.points.iteritems(): pt.num_inbound = len(list(self.find_inbound(ij))) pt.num_outbound = len(list(self.find_outbound(ij))) if pt.kill_me: to_kill.append(ij) pt.kill() # remove from sprite groups self.map.map_file.outer[ij[0]][ij[1]].pop('owner') ## remove any connections from and to this point ## filter = lambda c: c.point_in == pt or c.point_out == pt cons_to_rm = ifilter(filter, self.connections) print map(lambda c: c.kill(), cons_to_rm) self.connections[:] = list(ifilterfalse(filter, self.connections)) ## remove attack-connections from other grids ## for g in self.other_grids: filter = lambda c: c.point_out == pt cons_to_rm = ifilter(filter, g.connections) map(lambda c: c.kill(), cons_to_rm) g.connections[:] = list(ifilterfalse(filter, g.connections)) for ij in to_kill: self.points.pop(ij) print "killing %s" % (str(ij)) if len(self.points) == 0: self.game_over = True
def get_for_directory(dp, hash_mode="md5", filter_dots=False, filter_func=lambda fp: False): r""" Returns a hash string for the files below a given directory path. :param dp: Path to a directory. :param hash_mode: Can be either one of 'md5', 'sha1', 'sha256' or 'sha512'. Defines the algorithm used to generate the resulting hash string. Default is 'md5'. :param filter_dots: If True will filter directories or files beginning with a '.' (dot) like '.git'. Default is False. :param filter_func: A function receiving a path as a single paramter. If it returns True the given path will be excluded from the hash calculation. Otherwise it will be included. """ hash_func = _HASH_MODE_DICT.get(hash_mode) root_dps_fns = os.walk(dp, topdown=True) root_dps_fns = itertools.imap(list, root_dps_fns) if filter_dots: root_dps_fns = itertools.ifilterfalse(_is_dot_root, root_dps_fns) root_dps_fns = itertools.imap(_filter_dot_fns, root_dps_fns) fps_lists = itertools.imap(_gen_fps, root_dps_fns) fps = itertools.chain(*fps_lists) fps = itertools.ifilterfalse(filter_func, fps) file_handles = itertools.imap(_get_file_handle, fps) file_hash_digests = itertools.imap(_get_file_hash_digest, file_handles, itertools.repeat(hash_func)) file_hash_digests = sorted(file_hash_digests) file_hash_digests = map(_get_utf8_encoded, file_hash_digests) hash_ = _get_merged_hash(file_hash_digests, hash_func) return hash_.hexdigest()
def getPitches(pitchNames = ['C','E','G'], bassPitch = 'C3', maxPitch = 'C8'): ''' Given a list of pitchNames, a bassPitch, and a maxPitch, returns a sorted list of pitches between the two limits (inclusive) which correspond to items in pitchNames. >>> from music21.figuredBass import segment >>> from music21 import pitch >>> pitches = segment.getPitches() >>> print(', '.join([p.nameWithOctave for p in pitches])) C3, E3, G3, C4, E4, G4, C5, E5, G5, C6, E6, G6, C7, E7, G7, C8 >>> pitches = segment.getPitches(['G', 'B', 'D', 'F'], bassPitch = pitch.Pitch('B2')) >>> print(', '.join([p.nameWithOctave for p in pitches])) B2, D3, F3, G3, B3, D4, F4, G4, B4, D5, F5, G5, B5, D6, F6, G6, B6, D7, F7, G7, B7 >>> pitches = segment.getPitches(['F##','A#','C#'], bassPitch = pitch.Pitch('A#3')) >>> print(', '.join([p.nameWithOctave for p in pitches])) A#3, C#4, F##4, A#4, C#5, F##5, A#5, C#6, F##6, A#6, C#7, F##7, A#7 ''' if isinstance(bassPitch, basestring): bassPitch = pitch.Pitch(bassPitch) if isinstance(maxPitch, basestring): maxPitch = pitch.Pitch(maxPitch) iter1 = itertools.product(pitchNames, range(maxPitch.octave + 1)) iter2 = itertools.imap(lambda x: pitch.Pitch(x[0] + str(x[1])), iter1) iter3 = itertools.ifilterfalse(lambda samplePitch: bassPitch > samplePitch, iter2) iter4 = itertools.ifilterfalse(lambda samplePitch: samplePitch > maxPitch, iter3) allPitches = list(iter4) allPitches.sort() return allPitches
def found(self,n): if n==0 or not self.num12: self.indi=1 return if len(self.num12)==1: if n in self.num12: #self.num12=[] self.indi=1 return #destroy else: return #destroy if n in self.num12: self.num12.remove(n)#return left terms else: subseq=list(ifilterfalse(lambda x: x>n,self.num12)) #print subseq combcand=[] iter4=list(range(2,n))#;print iter4 for r in iter4: combcand=combcand+list(combinations(subseq,r))#;print combcand candidate=list(ifilterfalse(lambda x: sum(x)!=n, combcand))#;print candidate if len(candidate)>0: deleteterms=candidate[0] for i in deleteterms: self.num12.remove(i) return
def possible(n,A): if n==0: return A if len(A)==1: if n in A: return []#return left terms else: return A #return left terms, in this case A itself if n in A: return list(ifilterfalse(lambda x: x in [n], A))#return left terms else: subseq=list(ifilterfalse(lambda x: x>n,A)) combcand=[] for r in list(range(2,n)): combcand=combcand+list(combinations(subseq,r)) inttostr=[] for rs in combcand: inttostr.append(rs) candidate=list(ifilterfalse(lambda x: sum(x)!=n, inttostr)) #randomly pick up a combination, actually, a more reasonable candidate is the first candidate in Lex order lenofcand=len(candidate) if lenofcand >0: return list(ifilterfalse(lambda x: x in candidate[0],A)) #return the ramaining elements after finding correct combination of number n if we find it, else return A else: return A
def _clean_up_author_list(author_list): '''Apply translations and blacklist, and get rid of duplicates. ''' # Double check that all names have no leading or trailing whitespace. result = map(string.strip, author_list) # Remove any blacklisted names. result = set(ifilterfalse(_blacklist.__contains__, result)) # Make sure there are no names in Git without a corresponding translation. untranslated = set(ifilterfalse(_translations.keys().__contains__, result)) if untranslated: raise Exception( 'No translation exists for the following Git author(s): \n' + \ '\n'.join(untranslated) + '\n' + \ 'Please edit the translations table accordingly.') # Translate all remaining names. result = [_translations[a] for a in result] # Another check for any blacklisted names, in case we want to remove the # translated name. result = set(ifilterfalse(_blacklist.__contains__, result)) # Return the unique list of translated names. return sorted(set(result))
def symmetric_difference(self, other): """Return the symmetric difference of two sets as a new set. (I.e. all elements that are in exactly one of the sets.) """ if not isinstance(other, _set_types): other = self.__class__(other) # preserve the order in other too return self.__class__(chain(ifilterfalse(other.__contains__, self), ifilterfalse(self.__contains__, other)))
def test(self): wrong = \ set(itertools.ifilterfalse(self._BUILTIN_NAME_RE.match, itertools.ifilterfalse(self._VALID_NAME_RE.match, dir(constants)))) wrong -= self._EXCEPTIONS self.assertFalse(wrong, msg=("Invalid names exported from constants module: %s" % utils.CommaJoin(sorted(wrong))))
def phase1(self): """ # Compute common names """ a = dict(izip(imap(os.path.normcase, self.left_list), self.left_list)) b = dict(izip(imap(os.path.normcase, self.right_list), self.right_list)) self.common = map(a.__getitem__, ifilter(b.has_key, a)) self.left_only = map(a.__getitem__, ifilterfalse(b.has_key, a)) self.right_only = map(b.__getitem__, ifilterfalse(a.has_key, b))
def repo_files(root_path, skip): assert not root_path.endswith('/') for parent_path, dir_names, file_names in os.walk(root_path): parent_rel_path = parent_path[len(root_path):] if parent_rel_path == '': dir_names.remove('.mf') dir_names[:] = ifilterfalse(skip, dir_names) for name in ifilterfalse(skip, file_names): yield (parent_rel_path + '/' + name)[1:]
def get_mail_templates(self, types=None): if types is not None: if not (isinstance(types, frozenset) and len(list(ifilterfalse(lambda _: isinstance(_, yogento.api.models.mail.template.mail_template_type.MailTemplateType), types))) == 0): raise TypeError(getattr(__builtin__, 'type')(types)) get_mail_templates_return_value = self._get_mail_templates(types=types) if not (isinstance(get_mail_templates_return_value, frozenset) and len(list(ifilterfalse(lambda _: isinstance(_, yogento.api.models.mail.template.mail_template.MailTemplate), get_mail_templates_return_value))) == 0): raise TypeError(getattr(__builtin__, 'type')(get_mail_templates_return_value)) return get_mail_templates_return_value
def preprocess(wordlist, stopwords=True, digits=True, stem=True): """Perform preprocessing on a list of words. The various arguments to this function allow one to turn off certain preprocessing steps. :param bool stopwords: If True, remove stopwords. :param bool digits: If True, remove words that start with digits. :param bool stem: If True, stem words using a Porter stemmer. """ if stopwords: wordlist = it.ifilterfalse(is_stopword, wordlist) if digits: wordlist = it.ifilterfalse(starts_with_digits, wordlist) if stem: wordlist = it.imap(stem_word, wordlist) return wordlist
def _innerCalc(): l = len(distanceMatrix) for i in ifilterfalse(lambda x: x == node, range(l - 1)) : for k in ifilterfalse(lambda x: x == node, range(i + 1, l)): # print "Inner Calc: {0} : {1}".format(i,k) try: yield ((distanceMatrix[i][node] + distanceMatrix[k][node] - distanceMatrix[i][k]) / 2) except IndexError as e: print "Index Error: {0} {1} {2}".format(i, k, node) raise e
def test_iterables(self): import itertools iterables = [ itertools.chain(), itertools.count(), itertools.cycle([]), itertools.dropwhile(bool, []), itertools.groupby([]), itertools.ifilter(None, []), itertools.ifilterfalse(None, []), itertools.imap(None, []), itertools.islice([], 0), itertools.izip(), itertools.repeat(None), itertools.starmap(bool, []), itertools.takewhile(bool, []), itertools.tee([])[0], itertools.tee([])[1], ] for it in iterables: assert hasattr(it, '__iter__') assert iter(it) is it assert hasattr(it, 'next') assert callable(it.next)
def entries_without_errors(self, gt_entires): return list(itertools.ifilterfalse(lambda x: x.has_key('exceptions'), gt_entires)) # def leave_allowed_account_type_titles(self, gt_entries): #required resolved acc types # ignored_account_type_titles = ['Archive','Offline integration'] # f = lambda gt_entry: gt_entry["account_type_obj"].title in ignored_account_type_titles # return list(itertools.ifilterfalse(f, gt_entries)) # # def leave_not_allowed_account_type_titles(self, gt_entries): #required resolved acc types # ignored_account_type_titles = ['Archive','Offline integration'] # f = lambda gt_entry: gt_entry["account_type_obj"].title in ignored_account_type_titles # return list(itertools.ifilter(f, gt_entries)) # def leave_allowed_accounts(self, entries_with_allowed_account_types): # ignored_account_names = [obj.account.name for obj in IgnoredAccount.objects.all()] #todo check by account type # print ignored_account_names # rez = [] # for gt_entry in entries_with_allowed_account_types: # account_name = gt_entry['pfn'].rsplit("/", 1)[1] # if account_name in ignored_account_names: # print "skipping", account_name # continue # rez.append(gt_entry) # return rez # # def leave_not_allowed_accounts(self, entries_with_allowed_account_types): # ignored_account_names = [obj.account.name for obj in IgnoredAccount.objects.all()] # rez = [] # for gt_entry in entries_with_allowed_account_types: # account_name = gt_entry['pfn'].rsplit("/", 1)[1] # if account_name not in ignored_account_names: # continue # rez.append(gt_entry) # return rez
def test_ifilterfalse_wrongargs(self): import itertools it = itertools.ifilterfalse(0, [1]) raises(TypeError, it.next) raises(TypeError, itertools.ifilterfalse, bool, None)
def __init__(self, fields=None, **kwargs): # Some helpful functions is_basefield = lambda field: isinstance(field, BaseField) is_embeddeddoc = lambda field: isinstance(field, EmbeddedDocumentField) is_dictfield = lambda field: isinstance(field, DictField) # field instance #if isinstance(fields, BaseField): if is_basefield(fields): #if isinstance(fields, EmbeddedDocumentField): if is_embeddeddoc(fields): kwargs.setdefault('primary_embedded', fields) fields = [fields] # something other than a list elif not isinstance(fields, list): raise InvalidShield('Argument to ListField constructor must be ' 'a valid field or list of fields') # some bad stuff in the list elif list(ifilterfalse(is_basefield, fields)): raise InvalidShield('Argument to ListField constructor must be ' 'a valid field or list of valid fields') else: docs = filter(is_embeddeddoc, fields) dicts = filter(is_dictfield, fields) if dicts: kwargs.setdefault('primary_embedded', None) if docs: kwargs.setdefault('primary_embedded', docs[0]) self.fields = fields kwargs.setdefault('default', list) self.primary_embedded = kwargs.pop('primary_embedded', None) super(ListField, self).__init__(**kwargs)
def ignore(context): '''.ignore nick!user@host''' bot.config.setdefault('IGNORE', []) if context.args: to_ignore = glob(context.args) supersets = list(ifilter(lambda ignored: to_ignore.issub(glob(ignored)), bot.config['IGNORE'])) if len(supersets) > 0: return 'Not ignoring \x02%s\x02 because it is already matched by \x02%s\x02' % (context.args, supersets[0]) filter = lambda ignored: to_ignore.issuper(glob(ignored)) removed = list(ifilter(filter, bot.config['IGNORE'])) bot.config['IGNORE'] = list(ifilterfalse(filter, bot.config['IGNORE'])) bot.config['IGNORE'].append(context.args) save_ignores() bot.log(context, ('IGNORE'), '+{0}{1}'.format(context.args, (' -' + ' -'.join(removed)) if removed else '')) if removed: return 'Ignored and removed \x02%d\x02 redundant ignores: \x02%s\x02' % (len(removed), '\x02, \x02'.join(removed)) else: return 'Ignored.' else: return eval.__doc__
def check(status): """Check the status of Exhibitor and raise CheckException if error.""" node_serving = lambda node: node["description"] == "serving" # how many nodes are down down_nodes = len(list(itertools.ifilterfalse(node_serving, status))) max_down = (len(status) - 1) / 2 if down_nodes: if max_down > down_nodes: raise WarningException( "There are %s down nodes. We can handle a maximum of %s." % ( down_nodes, max_down) ) else: raise CriticalException("There are %s down nodes." % down_nodes) # how many nodes are up up_nodes = len(list(itertools.ifilter(node_serving, status))) if not up_nodes: raise CriticalException("There are no up nodes.") # at least one node is leader has_leader = any(node["isLeader"] for node in status) if not has_leader: raise CriticalException("There appears to be no leader.")
def __init__( self, elements, display=None, notes=None, ): ''' :type elements: tuple(costume.api.models.closure.closure.Closure) :type display: str or None :type notes: str or None ''' if elements is None: raise ValueError('elements is required') if not (isinstance(elements, tuple) and len(list(ifilterfalse(lambda _: isinstance(_, costume.api.models.closure.closure.Closure), elements))) == 0): raise TypeError("expected elements to be a tuple(costume.api.models.closure.closure.Closure) but it is a %s" % getattr(__builtin__, 'type')(elements)) if len(elements) < 1: raise ValueError("expected len(elements) to be >= 1, was %d" % len(elements)) self.__elements = elements if display is not None: if not isinstance(display, basestring): raise TypeError("expected display to be a str but it is a %s" % getattr(__builtin__, 'type')(display)) if len(display) < 1: raise ValueError("expected len(display) to be >= 1, was %d" % len(display)) self.__display = display if notes is not None: if not isinstance(notes, basestring): raise TypeError("expected notes to be a str but it is a %s" % getattr(__builtin__, 'type')(notes)) if len(notes) < 1: raise ValueError("expected len(notes) to be >= 1, was %d" % len(notes)) self.__notes = notes
def _all(seq): """Returns True if all elements in the iterable are True. """ for _ in itertools.ifilterfalse(bool, seq): return False return True
def unique_everseen(iterable, key=None): """ List unique elements, preserving order. Remember all elements ever seen. http://docs.python.org/library/itertools.html#recipes """ # unique_everseen('AAAABBBCCDAABBB') --> A B C D # unique_everseen('ABBCcAD', str.lower) --> A B C D try: from itertools import ifilterfalse except: from itertools import filterfalse seen = set() seen_add = seen.add if key is None: try: for element in ifilterfalse(seen.__contains__, iterable): seen_add(element) yield element except: for element in filterfalse(seen.__contains__, iterable): seen_add(element) yield element else: for element in iterable: k = key(element) if k not in seen: seen_add(k) yield element
def compute_testing_installability(self): """Computes the installability of packages in testing This method computes the installability of all packages in testing and caches the result. This has the advantage of making "is_installable" queries very fast for all packages in testing. """ check_inst = self._check_inst cbroken = self._cache_broken cache_inst = self._cache_inst eqv_table = self._eqv_table testing = self._testing tcopy = [x for x in testing] for t in ifilterfalse(cache_inst.__contains__, tcopy): if t in cbroken: continue res = check_inst(t) if t in eqv_table: eqv = (x for x in eqv_table[t] if x in testing) if res: cache_inst.update(eqv) else: eqv_set = frozenset(eqv) testing -= eqv_set cbroken |= eqv_set
def __init__(self, fields, **kwargs): # Some helpful functions is_basetype = lambda tipe: isinstance(tipe, BaseType) is_model = lambda tipe: isinstance(tipe, ModelType) is_dicttype = lambda tipe: isinstance(tipe, DictType) # field instance if is_basetype(fields): if is_model(fields): kwargs.setdefault('primary_embedded', fields) fields = [fields] # something other than a list elif not isinstance(fields, list): raise TypeException('Argument to ListType constructor must be ' 'a valid field or list of fields', self.field_name, list) # some bad stuff in the list elif list(ifilterfalse(is_basetype, fields)): raise TypeException('Argument to ListType constructor must be ' 'a valid field or list of valid fields', self.field_name, list) else: models = filter(is_model, fields) dicts = filter(is_dicttype, fields) if dicts: kwargs.setdefault('primary_embedded', None) if models: kwargs.setdefault('primary_embedded', models[0]) self.fields = fields kwargs.setdefault('default', list) self.primary_embedded = kwargs.pop('primary_embedded', None) super(ListType, self).__init__(**kwargs)
def clean_article_ids(self): article_ids = self.cleaned_data["article_ids"].split("\n") article_ids = filter(bool, map(unicode.strip, article_ids)) # Parse all article ids as integer try: article_ids = map(int, article_ids) except ValueError: offender = repr(next(ifilterfalse(unicode.isnumeric, article_ids))) raise ValidationError("{offender} is not an integer".format(**locals())) # Check if they can be chosen articlesets = self.cleaned_data["articlesets"] distinct_args = ["id"] if db_supports_distinct_on() else [] all_articles = Article.objects.filter(articlesets_set__in=articlesets).distinct(*distinct_args) chosen_articles = Article.objects.filter(id__in=article_ids).distinct(*distinct_args) intersection = all_articles & chosen_articles if chosen_articles.count() != intersection.count(): # Find offenders (skipping non-existing, which we can only find when # fetching all possible articles) existing = all_articles.values_list("id", flat=True) offenders = chosen_articles.exclude(id__in=existing).values_list("id", flat=True) raise ValidationError( ("Articles {offenders} not in chosen articlesets or some non-existent" .format(**locals())), code="invalid") return article_ids
def rebase(acl, node, base=None, discard_old_mode=False): 'Rebase given ACL lines on top of ones, generated from mode' acl = canonized(acl) # ACL base if not base and not base == 0: # get current base, if unspecified base = filter(_mode, get( node, mode_filter=True, acl_type=ACL_TYPE_ACCESS )) else: # convert given mode to a canonical base-ACL if not isinstance(base, (int, long)): try: base = mode(base) except Error: pass base = from_mode(int(base)) # Access ACL ext = it.ifilterfalse(_def_get, acl) stracl_set( '\n'.join( update(ext, base) if discard_old_mode else update(base, ext) ), node, ACL_TYPE_ACCESS ) # Default ACL if isinstance(node, types.StringTypes) and os.path.isdir(node): ext = it.imap(_def_strip, it.ifilter(_def_get, acl)) stracl_set( '\n'.join( update(ext, base) if discard_old_mode else update(base, ext) ), node, ACL_TYPE_DEFAULT )
def unique_everseen(iterable, key=None): seen = set() seen_add = seen.add for element in itertools.ifilterfalse(seen.__contains__, iterable): #print element seen_add(element) yield element
def mean(l, ignore_nan=False, empty=0): """ nanmean compatible with generators. """ l = iter(l) if ignore_nan: l = ifilterfalse(np.isnan, l) try: n = 1 acc = next(l) except StopIteration: if empty == 'raise': raise ValueError('Empty mean') return empty for n, v in enumerate(l, 2): acc += v if n == 1: return acc return acc / n
def _get_categories(self, *optional_category): # why the auto return? current porttrees don't allow/support # categories deeper then one dir. if optional_category: #raise KeyError return () cats = self.hardcoded_categories if cats is not None: return cats try: return tuple( imap( intern, ifilterfalse( self.false_categories.__contains__, (x for x in listdir_dirs(self.base) if x[0:1] != ".")))) except EnvironmentError as e: raise_from(KeyError("failed fetching categories: %s" % str(e)))
def mean(values, ignore_nan=False, empty=0): """ Nanmean compatible with generators. """ values = iter(values) if ignore_nan: values = ifilterfalse(isnan, values) try: n = 1 acc = next(values) except StopIteration: if empty == "raise": raise ValueError("Empty mean") return empty for n, v in enumerate(values, 2): # noqa: B007 acc += v if n == 1: return acc return acc / n
def mean(it, ignore_nan=False, empty=0): """ nanmean compatible with generators. """ it = iter(it) if ignore_nan: it = ifilterfalse(isnan, it) try: n = 1 acc = next(it) except StopIteration: if empty == "raise": raise ValueError("Empty mean") return empty for n, v in enumerate(it, 2): acc += v if n == 1: return acc return acc / n
def installs(requirements): print("Installs:") map(print, map("- {}".format, requirements)) def install(requirement): package, version = requirement.split('==') print("Installing '{}:{}'".format( package, version, )) pip_success = pip(package, version) return pip_success failed_requirements = list(ifilterfalse(install, requirements)) installs(failed_requirements)
def __call__(self, eventContext): # Make sure summary and message are populated if not eventContext.event.HasField( 'message') and eventContext.event.HasField('summary'): eventContext.event.message = eventContext.event.summary elif not eventContext.event.HasField( 'summary') and eventContext.event.HasField('message'): eventContext.event.summary = eventContext.event.message[:255] missingFields = ','.join( ifilterfalse(eventContext.event.HasField, self.REQUIRED_EVENT_FIELDS)) if missingFields: raise DropEvent( 'Required event fields %s not found' % missingFields, eventContext.event) return eventContext
def unique_everseen(iterable, key=None): """Return iterator of unique elements ever seen with preserving order. Return iterator of unique elements ever seen with preserving order. From: https://docs.python.org/3/library/itertools.html#itertools-recipes Examples -------- >>> from pygimli.utils import unique_everseen >>> s1 = 'AAAABBBCCDAABBB' >>> s2 = 'ABBCcAD' >>> list(unique_everseen(s1)) ['A', 'B', 'C', 'D'] >>> list(unique_everseen(s2, key=str.lower)) ['A', 'B', 'C', 'D'] See also -------- unique, unique_rows """ try: from itertools import ifilterfalse except BaseException as _: from itertools import filterfalse seen = set() seen_add = seen.add if key is None: try: for element in ifilterfalse(seen.__contains__, iterable): seen_add(element) yield element except BaseException as _: for element in filterfalse(seen.__contains__, iterable): seen_add(element) yield element else: for element in iterable: k = key(element) if k not in seen: seen_add(k) yield element
def partition(iterable, pred): """ Split iterable into two partitions based on predicate function >>> pred = lambda x: x < 6 >>> smaller, larger = partition(xrange(10), pred) >>> list(smaller) [0, 1, 2, 3, 4, 5] >>> list(larger) [6, 7, 8, 9] :param iterable: Any iterable, e.g. list, xrange, ... :param pred: Predicate function. :return: Partition iterators :rtype: Two iterators """ t1, t2 = itt.tee(iterable) return itt.ifilter(pred, t1), itt.ifilterfalse(pred, t2)
def parse_migration_file(migration_file_content, table_name=r'.*?'): """Takes migration file content, returns columns...""" info_mo = re.search( r'Schema::create\([\'"]' r'(?P<table_name>\w+)[\'"].*?\).*?\{' r'(?P<column_info>[\s\S]+?)\}\);', migration_file_content) info = info_mo.groupdict() column_dicts = list(parse_columns(info['column_info'])) slugless_column_dicts = list( ifilterfalse(lambda d_: d_['name'] in ['slug', 'identifier'], column_dicts)) return { 'table_name': info['table_name'], 'columns': column_dicts, 'slugless_columns': slugless_column_dicts, 'foreign_key_info': parse_foreign_key_info(info['column_info'], table_name) }
def wrapper(*args, **kwds): # cache key records both positional and keyword args key = args if kwds: key += (kwd_mark, ) + tuple(sorted(kwds.items())) # get cache entry or compute if not found try: result = cache[key] wrapper.hits += 1 except KeyError: # purge least recently used cache entry if len(cache) >= maxsize: dkey = queue_popleft() refcount[dkey] -= 1 # remove any duplicate entries in the queue while refcount[dkey]: dkey = queue_popleft() refcount[dkey] -= 1 if destructor != None: destructor(cache[dkey]) del cache[dkey], refcount[dkey] # record recent use of this key queue_append(key) refcount[key] += 1 result = user_function(*args, **kwds) cache[key] = result wrapper.misses += 1 # periodically compact the queue by eliminating duplicate keys # while preserving order of most recent access if len(queue) > maxqueue: refcount.clear() queue_appendleft(sentinel) for key in ifilterfalse(refcount.__contains__, iter(queue_pop, sentinel)): queue_appendleft(key) refcount[key] = 1 return result
def refresh(self): NormalSpoke.refresh(self) self.disks = getDisks(self.storage.devicetree) self.selected_disks = self.data.ignoredisk.onlyuse[:] self.ancestors = itertools.chain( *map(self._real_ancestors, self.disks)) self.ancestors = map(lambda d: d.name, self.ancestors) self._store.clear() allDisks = [] multipathDisks = [] otherDisks = [] raidDisks = [] zDisks = [] # Now all all the non-local disks to the store. Everything has been set up # ahead of time, so there's no need to configure anything. We first make # these lists of disks, then call setup on each individual page. This is # because there could be page-specific setup to do that requires a complete # view of all the disks on that page. for disk in itertools.ifilterfalse(isLocalDisk, self.disks): if self.pages[1].ismember(disk): multipathDisks.append(disk) elif self.pages[2].ismember(disk): otherDisks.append(disk) elif self.pages[3].ismember(disk): raidDisks.append(disk) elif self.pages[4].ismember(disk): zDisks.append(disk) allDisks.append(disk) self.pages[0].setup(self._store, self.selected_disks, allDisks) self.pages[1].setup(self._store, self.selected_disks, multipathDisks) self.pages[2].setup(self._store, self.selected_disks, otherDisks) self.pages[3].setup(self._store, self.selected_disks, raidDisks) self.pages[4].setup(self._store, self.selected_disks, zDisks) self._update_summary()
def _ref_key(self, key): """Record a reference to the argument key.""" queue = self.queue refcount = self.refcount queue.append(key) refcount[key] = refcount[key] + 1 # periodically compact the queue by eliminating duplicate keys # while preserving order of most recent access. Note that this # is only required when the cache does not exceed its maximum # size if len(queue) > self.max_queue: refcount.clear() queue_appendleft = queue.appendleft queue_appendleft(self.sentinel) for k in ifilterfalse(refcount.__contains__, iter(queue.pop, self.sentinel)): queue_appendleft(k) refcount[k] = 1
def unique(seq, key=None): ''' >>> list(unique('ABBA')) ['A', 'B'] >>> list(unique('ABBAabba')) ['A', 'B', 'a', 'b'] >>> list(unique('ABBAabba', key=str.lower)) ['A', 'B'] ''' seen = set() if key: for elem in seq: if key(elem) not in seen: seen.add(key(elem)) yield elem else: for elem in itertools.ifilterfalse(seen.__contains__, seq): seen.add(elem) yield elem
def unique_everseen(iterable, key=None): """ List unique elements, preserving order. Remember all elements ever seen >>> list(unique_everseen('AAAABBBCCDAABBB')) ['A', 'B', 'C', 'D'] >>> list(unique_everseen('ABBCcAD', str.lower)) ['A', 'B', 'C', 'D'] """ seen = set() seen_add = seen.add if key is None: for element in ifilterfalse(seen.__contains__, iterable): seen_add(element) yield element else: for element in iterable: k = key(element) if k not in seen: seen_add(k) yield element
def update_table(con, table, new_data, temp_id_name): pk = list(table.primary_key.columns)[0].name xs1, xs2 = itertools.tee(iter(new_data)) def p(x): return pk in x and x[pk] is not None new_items, changed_items = (list(itertools.ifilterfalse(p, xs1)), list(itertools.ifilter(p, xs2))) new_item_ids = set() if new_items: statement = table.insert() for new_item in new_items: fixed_item = dict((k, v) for k, v in new_item.iteritems() if k not in (pk, temp_id_name)) new_key = con.execute(statement, fixed_item).inserted_primary_key new_item[pk] = new_key[0] new_item_ids.add(new_key[0]) if changed_items: con.execute(table.update().where(table.c[pk] == bindparam('_id')), *[dict(_id=x[pk], **x) for x in changed_items]) return new_items, new_item_ids
def get_frame(self): raw = self._get_raw_frame().strip(self.MARKER_END_LINE) try: groups = [ line.split(" ", 2) for line in raw.split(self.MARKER_END_LINE) ] frame = dict([(k, v) for k, v, chksum in groups if chksum == self._checksum(k, v)]) if len(frame) != len(groups): logger.info( "Discarded fields because of bad checksum: {}".format([ f for f in itertools.ifilterfalse( lambda g: g[2] == self._checksum(g[0], g[1]), groups) ])) except Exception as e: logger.error( "Caught exception while parsing teleinfo frame: {}".format(e)) frame = {} return frame
def unique_everseen(iterable, key=None): """ http://stackoverflow.com/a/12897501 "List unique elements, preserving order. Remember all elements ever seen." # unique_everseen('AAAABBBCCDAABBB') --> A B C D # unique_everseen('ABBCcAD', str.lower) --> A B C D """ seen = set() seen_add = seen.add if key is None: for element in ifilterfalse(seen.__contains__, iterable): seen_add(element) yield element else: for element in iterable: k = key(element) if k not in seen: seen_add(k) yield element
def unique_everseen(iterable, key=None): """Yield unique elements, preserving order. >>> list(unique_everseen('AAAABBBCCDAABBB')) ['A', 'B', 'C', 'D'] >>> list(unique_everseen('ABBCcAD', str.lower)) ['A', 'B', 'C', 'D'] refer to: https://github.com/erikrose/more-itertools/blob/master/more_itertools/recipes.py """ seen = set() seen_add = seen.add if key is None: for element in ifilterfalse(seen.__contains__, iterable): seen_add(element) yield element else: for element in iterable: k = key(element) if k not in seen: seen_add(k) yield element
def dir_statistic(path): """Counts all files, subdirectories and their total size untder given `path` recursivly. Symbolic links to files and directories are ignored. :returns: a tuple with directory count, file count and total size. :rtype: (int, int, int) """ dir_count = 0 file_count = 0 total_size = 0 for pathname, dirnames, filenames in os.walk(path): dir_count += len(dirnames) file_count += len(filenames) fullnames = (os.path.join(pathname, x) for x in chain(dirnames, filenames)) total_size += sum( imap(os.path.getsize, ifilterfalse(os.path.islink, fullnames))) return (dir_count, file_count, total_size)
def dump_context(v, context, mode): if mode is 1: for c in context: print c elif mode is 2: s = hex_reverse(v) test = frozenset( ifilterfalse(lambda t: '0000' == t, (s[i:i + 4] for i in range(0, len(s) - 2, 2)))) for c in context: o = c[-1][0] for i in range(0, len(o) - 2, 2): if o[i:i + 4] in test: print '**', o[i:i + 4], break print c else: print "Error: dump mode not set!" print
def populate_synonyms_for_molecule(self, molecule_set, molecules_syn_dict): def _append_to_mol2syn(m2s_dict, molecule): """if molecule has synonyms create a clean entry in m2s_dict with all synms for that chembl_id. Returns either None if goes ok or the molecule chembl id if something wrong""" if 'molecule_synonyms' in molecule and molecule[ 'molecule_synonyms']: synonyms = [] for syn in molecule['molecule_synonyms']: synonyms.append(syn['synonyms']) synonyms.append(syn['molecule_synonym']) synonyms = list(set(synonyms)) m2s_dict[molecule['molecule_chembl_id']] = synonyms return None else: return molecule['molecule_chembl_id'] if not molecule_set or not len(molecule_set): self._logger.warn("No molecules in set") return data = {'molecules': []} for mol_k in molecule_set: if self.molecules_dict.has_key(mol_k): data['molecules'].append(self.molecules_dict[mol_k]) else: raise ValueError( 'problem retrieving the molecule info from the local db', str(mol_k)) #if the data is what we expected, process it if 'molecules' in data: map_f = functools.partial(_append_to_mol2syn, molecules_syn_dict) mols_without_syn = \ list(itertools.ifilterfalse(lambda mol: mol is None, itertools.imap(map_f, data['molecules']))) if mols_without_syn: self._logger.debug('molecule list with no synonyms %s', str(mols_without_syn)) else: self._logger.error("there is no 'molecules' key in the structure") raise RuntimeError("unexpected chembl API response")
def iter_rows(self, data): """ Get an iterator over rows extracted from the raw data body. Args: `data` (dict): As returned by prepare_data() (especially, its 'raw' item contains the raw data body). Returns: An iterator over data rows (each being an str). The default implementation of this method does the right thing for most cases. Typically, this method is used indirectly -- being called in parse(). """ raw_rows = StringIO(data['raw']) if self.skip_first_row: next(raw_rows) if not self.disable_row_rstrip: raw_rows = itertools.imap(str.rstrip, raw_rows) if self.skip_blank_rows: raw_rows = itertools.ifilter(None, raw_rows) else: raw_rows = itertools.imap( # remove newline characters operator.methodcaller('rstrip', '\r\n'), raw_rows) if self.skip_blank_rows: raw_rows = itertools.ifilter(str.rstrip, raw_rows) if self.required_row_prefixes is not None: raw_rows = itertools.ifilter( operator.methodcaller('startswith', self.required_row_prefixes), raw_rows) if self.ignored_row_prefixes is not None: raw_rows = itertools.ifilterfalse( operator.methodcaller('startswith', self.ignored_row_prefixes), raw_rows) return raw_rows
def unique_everseen(iterable, key=None): "List unique elements, preserving order. Remember all elements ever seen." seen = set() seen_add = seen.add if key is None: try: for element in itertools.ifilterfalse(seen.__contains__, iterable): seen_add(element) yield element except: for element in iterable: if element not in seen: seen_add(element) yield element else: for element in iterable: k = key(element) if k not in seen: seen_add(k) yield element
def array_nth_index(array, n, inverse_filter=False, list_form=True): ''' Description: Takes input array and outputs list corrosponding to every nth value of input array ''' try: if (inverse_filter): out_object = itertools.ifilter(lambda x: array.index(x) % n, array) else: out_object = itertools.ifilterfalse(lambda x: array.index(x) % n, array) if (list_form): out_list = [] for i in out_object: out_list.append(i) return out_list else: return out_object except: return False
def static(instrs, symbol_table=None, libraries=()): symbol_table = SymbolTable() if symbol_table is None else symbol_table references = {} for instr in instrs: for o in operns(instr, ()): if isinstance(referenced_obj(o, None), object_file.Reference): references[referenced_obj(o).name] = o yield instr for ref_name in ifilterfalse(symbol_table.__contains__, references.iterkeys()): try: l = next( ifilter(lambda lib, ref_name=ref_name: ref_name in lib, libraries)) for instr in static(binaries(l[ref_name], symbol_table), symbol_table, libraries): yield instr except StopIteration as _: raise ValueError('{l} Could no locate symbol {s}'.format( l=loc(references[ref_name]), s=ref_name))
def _apply_tag_filters(results, tokens): """Consumes and applies tag filters (e.g. "tag:python") to search results. Arguments: results: List of search result plugins. tokens: Remaining search text tokens that have not been consumed. Returns: (results, tokens): Results that match the given tag, and tokens that have not been consumed. """ tag_filter = lambda t: t.startswith('tag:') tag_tokens = filter(tag_filter, tokens) tokens = list(itertools.ifilterfalse(tag_filter, tokens)) if tag_tokens: required_tags = set(t[len('tag:'):] for t in tag_tokens) results = filter(lambda plugin: required_tags <= set(plugin['tags']), results) return results, tokens
def _apply_category_filters(results, tokens): """Consumes and applies category filters (e.g. "cat:other") to results. Arguments: results: List of search result plugins. tokens: Remaining search text tokens that have not been consumed. Returns: (results, tokens): Results that match the given category, and tokens that have not been consumed. """ category_filter = lambda t: t.startswith('cat:') category_tokens = filter(category_filter, tokens) tokens = list(itertools.ifilterfalse(category_filter, tokens)) if category_tokens: category_ids = set(t[len('cat:'):] for t in category_tokens) results = filter(lambda plugin: plugin['category'] in category_ids, results) return results, tokens
def __init__(self, filenames): ''' Constructor, takes in the list of filenames to load, who's workspaces will be iterated over. ''' # Validate. if not isinstance(filenames, list): raise TypeError("Expected a list.") if not all([self._is_string(s) for s in filenames]): raise TypeError("Expected a list of strings.") if len(filenames) < 1: raise ValueError("Expected at least one filename.") # In the general case, we may or may not have checked for the existance # of the files previously, so before we even start iterating throw if # any are missing. missing_files = list(ifilterfalse(os.path.exists, filenames)) if len(missing_files) > 0: raise ValueError("One or more files are missing: " +\ str(missing_files)) self._filenames = filenames self._loaded_ws = None
def make_filtered(readpath, idxQualMin, dropNs): ''' given a fastq file with an index, will filter on low-quality index entries, and drop all reads with N. If file does not have an index, only drops Ns. Raises an AssertionError if the index-quality filter drops all reads. ''' index = has_index(readpath) if idxQualMin and not index: print "Specified Non-null index quality minimum, but index for file {0} does not exist.".format( readpath) format = 'sff' if readpath.endswith('sff') else 'fastq' fq_open = partial(SeqIO.parse, format=format) if index and idxQualMin: reads, idxreads = fq_open(readpath), fq_open(index) intermediate = (r for r, idx in izip(reads, idxreads) if idx_filter(r, idx, idxQualMin)) else: intermediate = fq_open(readpath) if dropNs: hasNs = lambda rec: 'N' in str(rec.seq).upper() return ifilterfalse(hasNs, intermediate) else: return intermediate
def remove(self, match=None): """ Modify the tree by removing any matching key references from the messages tree. Example of use: >>> msgtree.remove(lambda k: k > 3) """ estack = [self._root] # walk the tree to keep only matching keys while estack: elem = estack.pop() if len(elem.children) > 0: estack += elem.children.values() if elem.keys: # has some keys elem.keys = set(ifilterfalse(match, elem.keys)) # also remove key(s) from known keys dict for key in filter(match, self._keys.keys()): del self._keys[key]