def fqframe(fileh): final_schema = Schema({ 'id': str, 'seq': str, 'quality': str, 'qual_ints': check_np_type('int64'), 'error': check_np_type('float64'), 'description': str }) #get_object = _id index = ['id'] columns = ('id', 'seq', 'quality', 'description', 'qual_ints', 'error') SANGER = True get_id = attr('id') get_seq = compose(str, attr('seq')) get_qual_ints = compose_all(np.array, itemgetter('phred_quality'), attr('_per_letter_annotations')) get_description = attr('description') get_quality = SeqIO.QualityIO._get_sanger_quality_str get_error = compose(error, get_qual_ints) #get_error = error_from_ints(get_qual_ints) getters = [ get_id, get_seq, get_quality, get_description, get_qual_ints, get_error ] assert len(getters) == len(columns) metadata = {'filename': fileh.name} iterator = get_fastq(fileh) get_raw_record = partial(next, iterator) # def get_row(record): # #record = next(fileh) ## import sys ## __module__ = sys.modules[__name__] ## get_getter = compose(attr, "get_{0}".format) ## _getters = map(get_getter, columns) ## self_getters = apply_each(_getters, __module__) #fzip(_getters, repeat(__module__, clen)) # results = apply_each(self_getters, record) # final_dict = dict(zip(columns, results)) # final_schema.validate(final_dict) # return final_dict # def load_fastq(): # fq = get_fastq(fileh) # dicts = map(get_row, fq) # return pd.DataFrame(dicts).set_index(index) #, index=index, columns=columns) #jreturn nameddict( return { 'obj_func': get_raw_record, 'columns': columns, 'getters': getters, 'validator': final_schema, 'dictgetters': None }
class Seq(object) #SANGER_OFFSET = 33 SANGER = True get_seq_str = compose(str, attr('seq')) get_id = attr('id') get_qual_ints = compose(itemgetter('phred_quality'), attr('_per_letter_annotations')) get_description = attr('description') phred_to_char = chr if not SANGER else compose(chr, lambda a: a - 33) get_qual_chars = compose(pmap(phred_to_char), get_qual_ints) index = 'id' _object = _id
def fqframe(fileh): final_schema = Schema({ 'id' : str, 'seq' : str, 'quality' : str, 'qual_ints' : check_np_type('int64'), 'error' : check_np_type('float64'), 'description' : str }) #get_object = _id index = ['id'] columns = ('id', 'seq', 'quality', 'description', 'qual_ints', 'error') SANGER = True get_id = attr('id') get_seq= compose(str, attr('seq')) get_qual_ints = compose_all(np.array, itemgetter('phred_quality'), attr('_per_letter_annotations')) get_description = attr('description') get_quality = SeqIO.QualityIO._get_sanger_quality_str get_error = compose(error, get_qual_ints) #get_error = error_from_ints(get_qual_ints) getters = [get_id, get_seq, get_quality, get_description, get_qual_ints, get_error] assert len(getters) == len(columns) metadata = {'filename' : fileh.name} iterator = get_fastq(fileh) get_raw_record = partial(next, iterator) # def get_row(record): # #record = next(fileh) ## import sys ## __module__ = sys.modules[__name__] ## get_getter = compose(attr, "get_{0}".format) ## _getters = map(get_getter, columns) ## self_getters = apply_each(_getters, __module__) #fzip(_getters, repeat(__module__, clen)) # results = apply_each(self_getters, record) # final_dict = dict(zip(columns, results)) # final_schema.validate(final_dict) # return final_dict # def load_fastq(): # fq = get_fastq(fileh) # dicts = map(get_row, fq) # return pd.DataFrame(dicts).set_index(index) #, index=index, columns=columns) #jreturn nameddict( return { 'obj_func' : get_raw_record, 'columns' : columns, 'getters' : getters, 'validator' : final_schema, 'dictgetters' : None }
def run(): while len(teams['immune_system']) > 0 and len(teams['infection']) > 0: fights = {} unit_count = total_units() for team in teams: enemies = teams['immune_system'] if team == 'infection' else teams[ 'infection'] for attacker in sorted(teams[team], reverse=True): if attacker.units == 0: continue targets = sorted( [(attacker.attack(enemy, simulate=True), enemy) for enemy in enemies if enemy not in fights.values() and enemy.units > 0], reverse=True) if len(targets) > 0 and targets[0][0] > 0: fights[attacker] = targets[0][1] for attacker in sorted(fights.keys(), key=attr('initiative'), reverse=True): if attacker.units > 0: attacker.attack(fights[attacker]) for team in teams: to_del = [] for i, squad in enumerate(teams[team]): if squad.units == 0: to_del += [i] for i in sorted(to_del, reverse=True): del teams[team][i] if total_units() == unit_count: break return len(teams['infection']) == 0
def link_list_view(request): links = Link.objects.all().order_by('title') files = File.objects.all().order_by('title') link_tags = Tag.objects.all().filter(link__isnull=False).distinct() file_tags = Tag.objects.all().filter(file__isnull=False).distinct() tags_unsorted = list(chain(link_tags, file_tags)) tags = sorted(set(tags_unsorted), key=attr('title')) query = request.GET.get("q") if query: files = pt_files.filter( Q(title__icontains=query) | Q(text__icontains=query) | Q(language__icontains=query)).distinct() links = pt_links.filter( Q(title__icontains=query) | Q(text__icontains=query) | Q(language__icontains=query)).distinct() tags = pt_tags.filter( Q(title__icontains=query) | Q(text__icontains=query) | Q(language__icontains=query)).distinct() return render(request, 'resources/resources_list.html', { 'links': links, 'tags': tags, 'files': files })
def find_optimal_position(bots): box_size = 1 while box_size < max(bots, key=attr('x')).x - min(bots, key=attr('x')).x: box_size *= 2 box_center = ORIGIN while box_size > 0: best, best_count = None, None for p in points_to_test(box_center, box_size): count = p.reaches(bots, box_size) if best is None \ or count > best_count \ or count == best_count and p.displacement < best.displacement: best, best_count = p, count box_center = best box_size //= 2 return box_center.displacement
def fqframe(fileh): final_schema = Schema({ 'id': str, 'seq': str, 'quality': str, 'qual_ints': check_np_type('int64'), 'error': check_np_type('float64'), 'description': str }) #get_object = _id index = ['id'] columns = ('id', 'seq', 'quality', 'description', 'qual_ints', 'error') SANGER = True get_id = attr('id') get_seq = compose(str, attr('seq')) get_qual_ints = compose_all(np.array, itemgetter('phred_quality'), attr('_per_letter_annotations')) get_description = attr('description') get_quality = SeqIO.QualityIO._get_sanger_quality_str get_error = compose(error, get_qual_ints) #get_error = error_from_ints(get_qual_ints) def get_row(record): #record = next(fileh) print(get_funcs()) import sys __module__ = sys.modules[__name__] get_getter = compose(attr, "get_{0}".format) _getters = map(get_getter, columns) self_getters = apply_each( _getters, __module__) #fzip(_getters, repeat(__module__, clen)) results = apply_each(self_getters, record) final_dict = dict(zip(columns, results)) final_schema.validate(final_dict) return final_dict def load_fastq(): fq = get_fastq(fileh) dicts = map(get_row, fq) return pd.DataFrame(dicts).set_index( index) #, index=index, columns=columns) return namedtuple('FastqFrame', ['get_row', 'load_fastq'])( get_row, load_fastq) #{'get_row' : get_row, 'load_fastq' : load_fastq}
def started_at(path_sets, explored): more = lazy( filter(lambda p: p.lead_to not in explored, (path.extend(m) for path in path_sets for m in moves))) yield from more yield from started_at(more, explored.union(set(map(attr("lead_to"), more))))
def started_at(path_sets, explored): more = lazy(filter( lambda p: p.lead_to not in explored, (path.extend(m) for path in path_sets for m in moves) )) yield from more yield from started_at(more, explored.union(set(map(attr("lead_to"), more))))
def assertSeqRecordEqual(self, seq1, seq2): '''This is necessary because the __eq__ in SeqRecord is weird.''' _fields = ['id', 'name', 'description'] seqstr = compose(str, attr('seq')) self.assertEquals(seq1.letter_annotations, seq2.letter_annotations) self.assertEquals(seqstr(seq1), seqstr(seq2)) for field in _fields: f1, f2 = getattr(seq1, field), getattr(seq2, field) #TODO: include field name in message? self.assertEquals(f1, f2) #, msg=)
def fqframe(fileh): final_schema = Schema({ 'id' : str, 'seq' : str, 'quality' : str, 'qual_ints' : check_np_type('int64'), 'error' : check_np_type('float64'), 'description' : str }) #get_object = _id index = ['id'] columns = ('id', 'seq', 'quality', 'description', 'qual_ints', 'error') SANGER = True get_id = attr('id') get_seq= compose(str, attr('seq')) get_qual_ints = compose_all(np.array, itemgetter('phred_quality'), attr('_per_letter_annotations')) get_description = attr('description') get_quality = SeqIO.QualityIO._get_sanger_quality_str get_error = compose(error, get_qual_ints) #get_error = error_from_ints(get_qual_ints) def get_row(record): #record = next(fileh) print(get_funcs()) import sys __module__ = sys.modules[__name__] get_getter = compose(attr, "get_{0}".format) _getters = map(get_getter, columns) self_getters = apply_each(_getters, __module__) #fzip(_getters, repeat(__module__, clen)) results = apply_each(self_getters, record) final_dict = dict(zip(columns, results)) final_schema.validate(final_dict) return final_dict def load_fastq(): fq = get_fastq(fileh) dicts = map(get_row, fq) return pd.DataFrame(dicts).set_index(index) #, index=index, columns=columns) return namedtuple('FastqFrame', ['get_row', 'load_fastq'])(get_row, load_fastq)#{'get_row' : get_row, 'load_fastq' : load_fastq}
def calculate_sky_message(particles): height = abs(max(particles).y - min(particles).y) time = 0 while height > 10: for particle in particles: particle.travel() height = abs(max(particles).y - min(particles).y) time += 1 ymax, ymin = max(particles).y, min(particles).y xmax = max(particles, key=attr('x')).x xmin = min(particles, key=attr('x')).x try: return time, tesseract_parse(particles, ymax, ymin, xmax, xmin) except ImportError: for y in range(ymin, ymax + 1): l = '' for x in range(xmin, xmax + 1): l += '█' if (x, y) in particles else ' ' print(l) print() return time, None
def main(): scheme = Schema( { '<fasta>' : os.path.isfile, Optional('--gb-file') : Or(os.path.isfile, lambda x: x is None), Optional('--tab-file') : Or(os.path.isfile, lambda x: x is None), Optional('--gb-id') : Or(str, lambda x: x is None), }) raw_args = docopt(__doc__, version='Version 1.0') args = scheme.validate(raw_args) fasta = parse_fasta(args['<fasta>']) genes, cds = get_genes(args['--gb-id'], args['--gb-file'], args['--tab-file']) infos = map(partial(get_gene_degen_overlap_info, genes), map(attr('seq'), fasta)) #need `list` to force evaluation of `print` list(map(print, map(pretty_table, infos)))
def adjust_mini(icon_label, value_label, place: int, pos: Vector): smaller = min(icon_label, value_label, key = attr('content_width')) if smaller is icon_label: larger = value_label else: larger = icon_label larger.x = pos.x + place * CardDrawer.PADDING.x larger.anchor_x = 'left' if place == 1 else 'right' smaller.x = larger.x + place * larger.content_width // 2 smaller.anchor_x = 'center' icon_label.y = pos.y + place * CardDrawer.PADDING.y value_label.y = pos.y - (icon_label.document.get_font().ascent * place * (0.9 if place == 1 else 0.8))
def test_flags(self): df = self.result middle_e = pd.Series([False, False, False, True], dtype=object).values outer_e = pd.Series([False, False, True, True], dtype=object).values names = [ "template having multiple segments in sequencing", "each segment properly aligned according to the aligner", "segment unmapped", "next segment in the template unmapped", ] # inner_actual = df[df['QNAME'] == 'read2'][names] # outer_actual1= df[df['QNAME'] == 'read1'][names] # outer_actual3= df[df['QNAME'] == 'read3'][names] flag8, flag60_1, flag60_2 = map(attr("values"), map(itemgetter(names), itemgetter(1, 0, 2)(df.ix))) assert_array_equal(middle_e, flag8) assert_array_equal(outer_e, flag60_1) assert_array_equal(outer_e, flag60_2)
def test_flags(self): df = self.result middle_e = pd.Series([False, False, False, True], dtype=object).values outer_e = pd.Series([False, False, True, True], dtype=object).values names = [ "template having multiple segments in sequencing", "each segment properly aligned according to the aligner", "segment unmapped", "next segment in the template unmapped" ] # inner_actual = df[df['QNAME'] == 'read2'][names] # outer_actual1= df[df['QNAME'] == 'read1'][names] # outer_actual3= df[df['QNAME'] == 'read3'][names] flag8, flag60_1, flag60_2 = map( attr('values'), map(itemgetter(names), itemgetter(1, 0, 2)(df.ix))) assert_array_equal(middle_e, flag8) assert_array_equal(outer_e, flag60_1) assert_array_equal(outer_e, flag60_2)
def _scale(entries): images = [e.best_post for e in entries] posts = [] if images: for im in images: im.scale = g.column_width sm = sorted(images, key=attr('score'), reverse=True) for im in sm[:max(1, int(len(sm) / current_app.config['AIP_PER']))]: im.scale = g.gutter + 2 * g.column_width for im in images: preview_height = int(im.scale * im.height / im.width) preview_width = int(im.scale) posts.append(Post( url=im.post_url, preview_url=im.preview_url, preview_height=preview_height, preview_width=preview_width, md5=im.md5 )) return posts
def sigma_scaling_selection(self, population): fitnesses = map(attr('fitness'), population) mu = np.mean(fitnesses) sigma = np.std(fitnesses) mu_sum = np.sum((fitnesses - mu)/(2*sigma) + 1) new_population = [] for i in range(len(population)): current_counter = 0 candidate_random_number = random.random() * mu_sum for _ in range(1): parents = [] for candidate in population: current_counter += (1 + (candidate.fitness - mu) / (2 * sigma)) if current_counter >= candidate_random_number: parents.append(candidate) break new_population.append(parents[0].crossover(parents[1])) return population
def sigma_scaling_selection(self, population): fitnesses = map(attr('fitness'), population) mu = np.mean(fitnesses) sigma = np.std(fitnesses) mu_sum = np.sum((fitnesses - mu) / (2 * sigma) + 1) new_population = [] for i in range(len(population)): current_counter = 0 candidate_random_number = random.random() * mu_sum for _ in range(1): parents = [] for candidate in population: current_counter += (1 + (candidate.fitness - mu) / (2 * sigma)) if current_counter >= candidate_random_number: parents.append(candidate) break new_population.append(parents[0].crossover(parents[1])) return population
def leaderboard(): scores = models.UserProgress.objects.filter(score__gt=0).order_by("-score") transforms = {_("Name"): attr("user.name"), **FN_BASE} return { "leaderboard": prepare_dataframe(scores, transforms, class_="table") }
''' dict to named tuple ''' names, values = unzip(_dict.items()) return namedtuple(Name, names)(*values) ppartial = partial(partial) apply_to_object = compose(apply, ppartial) kstarcompose2 = lambda f, g: lambda x: f(**g(x)) def kstarcompose(*funcs): return reduce(kstarcompose2, funcs) #kstarcompose = partial(reduce, kstarcompose2) #use str.endswith( (tuple, of, vals) extension = compose(itemgetter(-1), psplit('.')) fileext = compose(extension, attr('filename')) def iter_until_stop(f, *args, **kwargs): while True: try: yield f(*args, **kwargs) except StopIteration: break flatten_list = lambda a: a if type(a) != list else a[0] def split_list(A, idx): return A[:idx], A[idx:]
def ordered(articles): return sorted(articles, key=attr('citation_count'), reverse=True)
def issues(infolist): return filter(attr('cookies'), infolist)
def dump_path(path): p = ", ".join(map(attr("__name__"), path)) return "{path} ==> {end}".format(path=p, end=lead_to(path))
def __str__(self): path = ", ".join(map(attr("__name__"), self)) return "{path} ==> {end}".format(path=path, end=self.lead_to)
def field_names(self): yield from map(attr('name'), fields(self.dataclass))
import functools fsum(functools.partial(operator.mul, 2))(1,10) ############################################################ ## currying (standard library) ############################################################ from operator import itemgetter itemgetter(3)([1,3,5,7,9]) from operator import attrgetter as attr class Speaker(object): def __init__(self, name): self.name = "[name] " + name alexey = Speaker("alexey") attr("name")(alexey) from operator import methodcaller methodcaller("__str__")([1,2,3,4,5]) methodcaller("keys")(dict(name="alexey", topic='fp')) values_extractor = methodcaller("values") values_extractor(dict(name="alexey", topic="fp")) methodcaller("count", 1)([1,1,1,2,2]) ############################################################ ## good function is small function ############################################################ ss = ["UA", "PyCon", "2012"] reduce(lambda acc, s: acc + len(s), ss, 0) ## BAD reduce(lambda l,r: l+r, map(lambda s: len(s), ss)) ## NOT BAD... reduce(operator.add, map(len, ss)) # GOOD
def seqs_equal(self, fn1, fn2, format): open_sorted = compose(partial(sorted, key=compose(str, attr('seq'))), partial(SeqIO.parse, format=format)) fq1, fq2 = map(open_sorted, [fn1, fn2]) self.assertFalse(len(fq1) == 0) map(self.assert_seq_recs_equal, fq1, fq2)
: def _iterator_comparator iterator, value : return value in iterator : end : def select name=None, choices=None, selected=None, **attributes : """A factory for HTML <select> elements.""" : if isinstance(selected, (tuple, list, Iterator)) : comparator = _iterator_comparator : else : comparator = _basic_comparator : end <select&{attributes, name=name}> : for group, options in groupby(options(choices), attr('group')) : if group <optgroup&{label=group.label, disabled=group.disabled}> : end : for option in options <option&{value=option.value, disabled=option.disabled, selected=comparator(selected, option.value)}>${option.label}</option> : end : if group </optgroup> : end : end </select>
def main() -> None: bots = list(get_data(today, [('func', Nanobot)])) strongest = max(bots, key=attr('r')) print(f'{today} star 1 = {sum(strongest.in_range(bot) for bot in bots)}') print(f'{today} star 2 = {find_optimal_position(bots)}')
@urlpatterns.route("leaderboard/conversations/", staff=True, template="ej_gamification/leaderboard.jinja2") def leaderboard_conversations(): scores = models.ConversationProgress.objects.filter( score__gt=0).order_by("-score") return {"leaderboard": prepare_dataframe(scores, FN_EXT, class_="table")} # # Constants and auxiliary functions # FN_BASE = { _("Score"): attr("score"), _("Votes"): attr("n_final_votes"), _("Comments"): attr("n_approved_comments"), _("Rejected"): attr("n_rejected_comments"), _("pts"): lambda x: x.pts_approved_comments - x.pts_rejected_comments, } FN_EXT = { _("Name"): attr("conversation.title"), _("Author"): attr("conversation.author.name"), **FN_BASE, _("Conversation"): attr("total_conversation_score"), } def prepare_dataframe(data, transforms, **kwargs): fns = transforms.values()
def dump_path(path): p = ", ".join(map(attr("__name__"), path)) return "{path} ==> {end}".format(path = p, end = lead_to(path))
def __str__(self): path = ", ".join(map(attr("__name__"), self)) return "{path} ==> {end}".format(path = path, end = self.lead_to)
### standard library currying from operator import itemgetter print(itemgetter(3)([1, 2, 3, 4, 5])) from operator import attrgetter as attr class Speaker(object): def __init__(self, name): self.name = "[name] " + name alexey = Speaker("Alexey") print(attr("name")(alexey)) from operator import methodcaller methodcaller("__str__")([1, 2, 3, 4, 5]) print(methodcaller("keys")(dict(name="Alexey", topic="FP"))) values_extractor = methodcaller("values") print(values_extractor(dict(name="Alexey", topic="FP"))) # good function is small function ss = ["UA", "PyCon", "2012"] #bad reduce(lambda acc, s: acc + len(s), ss, 0) # not bad
def fake_dirs(self): return set(chain(*map(attr("parents"), self.fake_files)))
def plused_entries(self): from operator import attrgetter as attr return [p.entry for p in sorted(self.plused, key=attr('ctime'), reverse=True)]