def extract_output_stats(number_of_trials, aggregate_stats_object, function_list, base_stats):
    output_stats = dict()

    aggregate_stats_object.strip_dirs().sort_stats(-1).print_stats()
    s = aggregate_stats_object.stats

    push_key = [tup for tup in s.keys() if operator.getitem(tup, 2) == function_list[0]][0]
    similarity_key = [tup for tup in s.keys() if operator.getitem(tup, 2) == function_list[1]][0]

    push_value = s[push_key]
    similarity_value = s[similarity_key]

    number_of_nodes = similarity_value[1]/number_of_trials

    output_stats["total_number_of_operations"] = push_value[1]/number_of_trials
    output_stats["total_time"] = push_value[3]/number_of_trials
    output_stats["per_node_number_of_operations"] = output_stats["total_number_of_operations"]/number_of_nodes
    output_stats["per_node_time"] = output_stats["total_time"]/number_of_nodes

    if base_stats is not None:
        output_stats["speedup_number_of_operations"] = percentage_increase(output_stats["total_number_of_operations"],
                                                                           base_stats["total_number_of_operations"])
        output_stats["speedup_time"] = percentage_increase(output_stats["total_time"],
                                                           base_stats["total_time"])

    print(output_stats)

    return output_stats
Example #2
0
 def test_getitem(self):
     c = caching_iter(range(20))
     assert c[-1] == 19
     with pytest.raises(IndexError):
         operator.getitem(c, -21)
     with pytest.raises(IndexError):
         operator.getitem(c, 21)
def main(arg_list):
    if os.path.exists(arg_list.path):
        for f_name in os.listdir(arg_list.path):
            match = pattern.match(f_name)
            if match:
                file_map[f_name] = parse_filename(match.groups())

        for k, g in _iter.groupby(file_map.values(),
                                  key=lambda i: op.getitem(i, 0)):
            for elem in g:
                version_map[k].append(op.getitem(elem, 1))

        for (k, v) in version_map.items():
            comp_map[k] = sorted(v, reverse=True)[:spec.get(k[0], 1)]

        version_spec = set()

        with open(arg_list.spec_file, mode='w') as f:
            not_build_version = lambda a: len(str(a)) < 3
            for k, v in comp_map.items():
                join_versions = lambda seq: '.'.join(
                    str(el) for el in filter(not_build_version, seq))
                v_str = ','.join(map(join_versions, v))
                version_spec.add('{comp:s} = {versions:s}\n'
                                 .format(**{'comp': k[0], 'versions': v_str}))
            f.writelines(version_spec)

        for (f_name, comp) in file_map.items():
            if not comp[1] in comp_map[comp[0]]:
                target = os.path.join(arg_list.path, f_name)
                sys.stdout.write(str(target).strip() + '\n')
def assert_equal_records(a, b):
    """Asserts that two records are equal. Pretty crude for now."""
    assert_equal(a.dtype, b.dtype)
    for f in a.dtype.names:
        (af, bf) = (operator.getitem(a, f), operator.getitem(b, f))
        if not (af is masked) and not (bf is masked):
            assert_equal(operator.getitem(a, f), operator.getitem(b, f))
    return
Example #5
0
 def __getitem__(self, i):
     if isinstance(i, slice):
         args = [i.start] if i.start is not None else []
         args.append(i.stop)
         if i.step:
             args.append(i.step)
         return self.__class__(getitem(self.notes, *args))
     else:
         return self._item_class(getitem(self.notes, i))
Example #6
0
def test_column():
    t = TableSymbol('t', '{name: string, amount: int}')
    assert t.columns == ['name', 'amount']

    assert eval(str(t['name'])) == t['name']
    assert str(t['name']) == "t['name']"
    with pytest.raises(ValueError):
        t['name'].project('balance')
    with pytest.raises(ValueError):
        getitem(t, set('balance'))
Example #7
0
def test_column():
    t = TableSymbol("t", "{name: string, amount: int}")
    assert t.fields == ["name", "amount"]

    assert eval(str(t.name)) == t.name
    assert str(t.name) == "t.name"
    with pytest.raises(AttributeError):
        t.name.balance
    with pytest.raises((NotImplementedError, ValueError)):
        getitem(t, set("balance"))
Example #8
0
def test_column():
    t = symbol('t', 'var * {name: string, amount: int}')
    assert t.fields == ['name', 'amount']

    assert eval(str(t.name)) == t.name
    assert str(t.name) == "t.name"
    with pytest.raises(AttributeError):
        t.name.balance
    with pytest.raises((NotImplementedError, ValueError)):
        getitem(t, set('balance'))
Example #9
0
def test_column():
    t = TableSymbol('t', '{name: string, amount: int}')
    assert t.fields== ['name', 'amount']

    assert eval(str(t['name'])) == t['name']
    assert str(t['name']) == "t['name']"
    with pytest.raises(AttributeError):
        t['name'].balance
    with pytest.raises((NotImplementedError, ValueError)):
        getitem(t, set('balance'))
Example #10
0
def getitem_rec(*args):
    import operator
    if len(args)>2:
        if isinstance(args[1],slice):
            return [getitem_rec(a,*args[2:]) for a in \
                        operator.getitem(args[0],args[1])]
        else:
            return getitem_rec(operator.getitem(args[0],args[1]),*args[2:])
    else:
        return operator.getitem(args[0],args[1])
Example #11
0
    def __find_next_task(self):
        self.tasks_lock.acquire()
        items = self.tasks.items()
        # remove none
        items = [x for x in items if operator.getitem(x, 1).scheduled_time is not None]

        by_time = lambda x: operator.getitem(x, 1).scheduled_time
        items.sort(key=by_time)
        try:
            receipt = items[0][0]
        except Exception, e:
            receipt = None
Example #12
0
 def __getitem__(self, i):
     if isinstance(i, slice):
         return self.clone(
             operator.getitem(self._value, i)
             )
     else:
         return self._value[i]
Example #13
0
def iunzip(iterable, n=None):
    """Takes an iterator that yields n-tuples and returns n iterators
    which index those tuples.  This function is the reverse of izip().
    n is the length of the n-tuple and will be autodetected if not
    specified.  If the iterable contains tuples of differing sizes,
    the behavior is undefined.

    >>> a0,b0,c0 = range(1, 10), range(21, 30), range(81, 90)
    >>> z = zip(a0,b0,c0)
    >>> a, b, c = iunzip(z)
    >>> a, b, c = map(list, (a,b,c))
    >>> assert a == a0 and b == b0 and c == c0
    >>> recombined = zip(a, b, c)
    >>> assert recombined == z

    """
    iterable = iter(iterable) # ensure we're dealing with an iterable
    if n is None: # check the first element for length
        first = iterable.next()
        n = len(first)
        # now put it back in to iterable is unchanged
        iterable = chain([first], iterable)

    iter_tees = tee(iterable, n)
    selector = lambda index: lambda item: getitem(item, index)
    return [imap(selector(i), it) for i,it in izip(count(), iter_tees)]
Example #14
0
def _compute_rechunk(x, chunks):
    """ Compute the rechunk of *x* to the given *chunks*.
    """
    ndim = x.ndim
    crossed = intersect_chunks(x.chunks, chunks)
    x2 = dict()
    intermediates = dict()
    token = tokenize(x, chunks)
    temp_name = 'rechunk-merge-' + token
    new_index = tuple(product(*(tuple(range(len(n))) for n in chunks)))
    for flat_idx, cross1 in enumerate(crossed):
        new_idx = new_index[flat_idx]
        key = (temp_name,) + new_idx
        cr2 = iter(cross1)
        old_blocks = [[ind for ind, _ in cr] for cr in cross1]
        subdims = [len(set([ss[i] for ss in old_blocks])) for i in range(ndim)]
        rec_cat_arg = np.empty(subdims).tolist()
        inds_in_block = product(*[range(s) for s in subdims])
        for old_block in old_blocks:
            ind_slics = next(cr2)
            old_inds = [[s[0] for s in ind_slics] for i in range(ndim)]
            # list of nd slices
            slic = [[s[1] for s in ind_slics] for i in range(ndim)]
            ind_in_blk = next(inds_in_block)
            temp = rec_cat_arg
            for i in range(ndim - 1):
                temp = getitem(temp, ind_in_blk[i])
            for ind, slc in zip(old_inds, slic):
                name = (('rechunk-split-' + token, ) + tuple(ind) +
                        sum([(s.start, s.stop) for s in slc], ()))
                intermediates[name] = (getitem, (x.name,) + tuple(ind), tuple(slc))
                temp[ind_in_blk[-1]] = name
        x2[key] = (concatenate3, rec_cat_arg)
    x2 = merge(x.dask, x2, intermediates)
    return Array(x2, temp_name, chunks, dtype=x.dtype)
Example #15
0
	def Reduce1D(self, col, func=operator.add):
	
		yi = numpy.zeros(self.x_len)
		cols = lambda c: numpy.array(operator.getitem(c,col))
		yi = reduce(func, map(cols,self.cdfs))
	
		return (self.x,yi)
Example #16
0
 def __getitem__(self, *args):
     try:
         return operator.getitem(self.value, *args)
     except Exception as e:
         if len(args) == 1 and args[0] == 0:
             return self.value
         raise e
Example #17
0
def get_dot_schema(db):
    # Identify the parameters for the database
    engine = create_engine('sqlite:///databases.db')
    Base.metadata.bind = engine  
    DBSession = sessionmaker(bind=engine)
    session = DBSession()
    db = session.query(Database).filter(Database.id == db).first()
    
    if db is None:
        raise Exception("There are no databases identified by " + db)
    
    # Get the dot file
    url = str(URL(db.engine, database = db.name))
    engine = create_engine(url)
    meta = MetaData()

    meta.reflect(bind=engine)
    
    tables = set(meta.tables.keys())

    desc = describe(map(lambda x: operator.getitem(meta.tables, x), tables))
    graph_file = to_dot(desc)
    with open('new.dot', 'w') as file:
        file.write(graph_file)
    return graph_file
Example #18
0
	def __qstep(self, palette, colors, depth):
		colors_n = len(colors)
		if depth == self.n:
			#print "FINISHING", colors
			r, g, b = zip(*colors)
			r, g, b = sum(r) / colors_n, sum(g) / colors_n, sum(b) / colors_n
			palette.append((r, g, b))
			return
		r1, g1, b1 = 255, 255, 255
		r2, g2, b2 = 0, 0, 0
		for r, g, b in colors:
			r1, g1, b1 = min(r, r1), min(g, g1), min(b, b1)
			r2, g2, b2 = max(r, r2), max(g, g2), max(b, b2)

		dr, dg, db = r2 - r1, g2 - g1, b2 - b1
		#print "ranges: ", dr, dg, db
		dm = max(dr, dg, db)
		if dr == dm:
			plane = 0
		elif dg == dm:
			plane = 1
		else:
			plane = 2

		#print "sorting by plane", plane
		colors = sorted(colors, key = lambda x: getitem(x, plane), reverse = True)

		depth += 1
		median = colors_n / 2
		next_1 = colors[: median]
		next_2 = colors[median:]
		self.__qstep(palette, next_1, depth)
		self.__qstep(palette, next_2, depth)
def main(separator='\t'):
    # input comes from STDIN (standard input)
    data = read_mapper_output(sys.stdin, separator=separator)
    
    # groupby groups multiple user_id sessions pairs by user_id,
    # and creates an iterator that returns consecutive keys and their group:
    #   current_key - string containing the concatenated key (user_id, session_id)
    #   group - iterator yielding all items
    for current_key, group in groupby(data, itemgetter(0)):
        try:
            key_list = current_key.split(',')
            user_id = key_list[0]
            session_id = key_list[1]
            
            # Loop through the items in group and extract created_at and action type
            for i in group:
                value_list = getitem(i, 1).split(',')
                created_at = value_list[0]
                action = value_list[1]
                movie_id = value_list[2]
                # print out the sessionised output
                print '%s\t%s\t%s\t%s\t%s' % (user_id, created_at, action, movie_id, session_id)
                
        except ValueError:
            # count was not a number, so silently discard this item
            pass
def f_word_slice(text, start, stop=0, by_spaces=False):
    """
    Extracts a substring spanning from start up to but not-including stop
    """
    text = val_to_string(text)
    start = val_to_integer(start)
    stop = val_to_integer(stop)
    by_spaces = val_to_boolean(by_spaces)

    if start == 0:
        raise ValueError("Start word cannot be zero")
    elif start > 0:
        start -= 1  # convert to a zero-based offset

    if stop == 0:  # zero is treated as no end
        stop = None
    elif stop > 0:
        stop -= 1  # convert to a zero-based offset

    words = get_words(text, by_spaces)

    selection = operator.getitem(words, slice(start, stop))

    # re-combine selected words with a single space
    return ' '.join(selection)
Example #21
0
 def __getitem__(self, key):
     for container in self.containers:
         try:
             return getitem(container, key)
         except KeyError:
             continue
     raise KeyError(key)
Example #22
0
        def encode(cls, iterable, result):
            '''Given an `iterable` string, send each character in a printable form to `result`.'''

            # construct a transformer that writes characters to result
            escape = internal.utils.character.escape(result); next(escape)

            # send the key prefix
            result.send(cls.prefix)

            # now we can actually process the string
            for ch in iterable:

                # first check if character has an existing key mapping
                if operator.contains(cls.mappings, ch):
                    for ch in operator.getitem(cls.mappings, ch):
                        result.send(ch)

                # otherwise pass it to the regular escape function
                else:
                    escape.send(ch)

                continue

            # submit the suffix and we're good
            result.send(cls.suffix)
            return
def run_services_testkit_jobs(host, port, testkit_cfg_fofn, nworkers=1,
                              ignore_test_failures=False, time_out=1800,
                              sleep_time=2, import_only=False):
    testkit_cfgs = testkit_cfg_fofn_to_files(testkit_cfg_fofn)
    nworkers = min(len(testkit_cfgs), nworkers)
    results = []
    started_at = time.time()
    misc_opts = [
        "--host", host,
        "--port", str(port),
        "--timeout", str(time_out),
        "--sleep", str(sleep_time)
    ]
    if ignore_test_failures:
        misc_opts.append("--ignore-test-failures")
    if import_only:
        misc_opts.append("--import-only")
    misc_opts = " ".join(misc_opts)
    if nworkers == 1:
        log.info("Running in serial mode.")
        for testkit_cfg in testkit_cfgs:
            bcfg, rcode, stdout, stderr, job_run_time = _run_testkit_cfg(
                testkit_cfg, misc_opts=misc_opts)
            d = dict(x=testkit_cfg, r=rcode, s=int(job_run_time),
                     m=job_run_time / 60.0)
            log.info("Completed running {x}. exit code {r} in {s} sec ({m:.2f} min).".format(**d))
            results.append((testkit_cfg, rcode, stdout, stderr, job_run_time))
    else:
        _results = []
        # XXX to avoid connection errors when submitting a job to services,
        # the calls to pbtestkit-service-runner run staggered with a sleep
        # time of Constants.SLEEP_TIME between each job.  This allows us to
        # start many more near-simultaneous pbsmrtpipe jobs than would
        # otherwise be the case.
        pool = multiprocessing.Pool(nworkers)
        for i_cfg, testkit_cfg in enumerate(testkit_cfgs):
            sleep_time = (i_cfg % nworkers) * Constants.SLEEP_TIME
            __run_testkit_cfg = functools.partial(_run_testkit_cfg,
                misc_opts=misc_opts, sleep_time=sleep_time)
            log.debug("Running {c} with sleep time {t}".format(
                      c=testkit_cfg, t=sleep_time))
            _results.append(pool.apply_async(__run_testkit_cfg, (testkit_cfg,)))
        pool.close()
        pool.join()
        results = [r.get() for r in _results]
            #results = pool.map_async(__run_testkit_cfg, testkit_cfgs).get(9999999)
    log.info("Results:")
    for bcfg, rcode, _, _, job_run_time in results:
        d = dict(r=rcode, j=bcfg, s=int(job_run_time), m=job_run_time / 60.0)
        log.info("exit code {r} in {s} sec ({m:.2f} min). job {j}".format(**d))
    njobs = len(results)
    rcodes = [operator.getitem(r, 1) for r in results]
    nfailed = len([r for r in rcodes if r != 0])
    run_time = time.time() - started_at
    d = dict(n=njobs, x=nfailed, s=int(run_time), m=run_time / 60.0)
    msg = "Completed {n} jobs in {s} sec ({m:.2f} min) {x} failed.".format(**d)
    log.info(msg)
    # should this propagate the rcodes from siv_butler calls?
    return 0 if nfailed == 0 else -1
Example #24
0
def rechunk(x, chunks):
    """
    Convert blocks in dask array x for new chunks.

    >>> import dask.array as da
    >>> a = np.random.uniform(0, 1, 7**4).reshape((7,) * 4)
    >>> x = da.from_array(a, chunks=((2, 3, 2),)*4)
    >>> x.chunks
    ((2, 3, 2), (2, 3, 2), (2, 3, 2), (2, 3, 2))

    >>> y = rechunk(x, chunks=((2, 4, 1), (4, 2, 1), (4, 3), (7,)))
    >>> y.chunks
    ((2, 4, 1), (4, 2, 1), (4, 3), (7,))

    chunks also accept dict arguments mapping axis to blockshape

    >>> y = rechunk(x, chunks={1: 2})  # rechunk axis 1 with blockshape 2

    Parameters
    ----------

    x:   dask array
    chunks:  the new block dimensions to create
    """
    if isinstance(chunks, dict):
        if not chunks or isinstance(next(iter(chunks.values())), int):
            chunks = blockshape_dict_to_tuple(x.chunks, chunks)
        else:
            chunks = blockdims_dict_to_tuple(x.chunks, chunks)
    chunks = normalize_chunks(chunks, x.shape)
    if not len(chunks) == x.ndim or tuple(map(sum, chunks)) != x.shape:
        raise ValueError("Provided chunks are not consistent with shape")

    crossed = intersect_chunks(x.chunks, chunks)
    x2 = dict()
    temp_name = next(rechunk_names)
    new_index = tuple(product(*(tuple(range(len(n))) for n in chunks)))
    for flat_idx, cross1 in enumerate(crossed):
        new_idx = new_index[flat_idx]
        key = (temp_name,) + new_idx
        cr2 = iter(cross1)
        old_blocks = tuple(tuple(ind  for ind,_ in cr) for cr in cross1)
        subdims = tuple(len(set(ss[i] for ss in old_blocks)) for i in range(x.ndim))
        rec_cat_arg =np.empty(subdims).tolist()
        inds_in_block = product(*(range(s) for s in subdims))
        for old_block in old_blocks:
            ind_slics = next(cr2)
            old_inds = tuple(tuple(s[0] for s in ind_slics) for i in range(x.ndim))
            # list of nd slices
            slic = tuple(tuple(s[1] for s in ind_slics)  for i in range(x.ndim))
            ind_in_blk = next(inds_in_block)
            temp = rec_cat_arg
            for i in range(x.ndim -1):
                temp = getitem(temp, ind_in_blk[i])
            for ind, slc in zip(old_inds, slic):
                temp[ind_in_blk[-1]] = (getitem, (x.name,) + ind, slc)
        x2[key] = (concatenate3, rec_cat_arg)
    x2 = merge(x.dask, x2)
    return Array(x2, temp_name, chunks, dtype=x.dtype)
def _is_valid_result(result):
    test_funcs = [
            operator.isMappingType,
            lambda x: operator.getitem(x, 'config'),
            # could do pickle.dumps, but that would be a bit compute heavy
            ]
    bools = [test_func(result) for test_func in test_funcs]
    return all(bools)
Example #26
0
def combos(toks):
    '''
    generates all possible orderings of 'toks'
    NOTE: You should be using itertools if possible
    '''
    if len(toks) == 1:
        yield toks
    else:
        for i in xrange(len(toks)):
            temp = operator.getitem(toks, 0)
            operator.setitem(toks, 0, operator.getitem(toks, i))
            operator.setitem(toks, i, temp)
            for rest in combos(toks[1:]):
                yield [operator.getitem(toks, 0)] + rest
            temp = operator.getitem(toks, 0)
            operator.setitem(toks, 0, operator.getitem(toks, i))
            operator.setitem(toks, i, temp)
Example #27
0
 def test_indexing(self):
     buff = [[0, 0, 0],
             [0, 0, 1],
             [0, 0, 19]]
     ca = CA(buff, seeds_rules, self.empty_cell)
     self.assertEqual(1, ca[2, 3])
     self.assertEqual(19, ca[3, 3])
     self.assertRaises(IndexError, lambda i: getitem(ca, i), (19, 0))
def PrintDOTNodes(stats):
	averages = operator.getitem(stats.values(),0)
	avg_max = max(averages)
	for k,v in stats.iteritems():
		average = v[0]
		std = v[1]
		print "\t%s [ height=%f, width=%f, fontsize=%d, style=filled, fillcolor=\"0.0,%f,1.0\" ];" % (k, average/10.0, std/10.0, 30, average/avg_max)

	return
Example #29
0
def run_testkit_cfgs(testkit_cfgs, nworkers, force_distributed=False, local_only=False, force_chunk_mode=False, disable_chunk_mode=False, ignore_test_failures=True):
    """Run all the butler cfgs in parallel or serial (nworkers=1)

    :param testkit_cfgs: (list of str) list of absolute paths to butler.cfgs)
    :param nworkers: (int) Number of workers to spawn.

    :type testkit_cfgs: list
    :type nworkers: int

    :rtype: bool
    """
    started_at = time.time()
    log.info("Starting with nworkers {n} and {m} butler cfg files".format(n=nworkers, m=len(testkit_cfgs)))
    results = []
    misc_opts = []
    if disable_chunk_mode:
        misc_opts.append("--disable-chunk-mode")
    elif force_chunk_mode:
        misc_opts.append("--force-chunk-mode")
    if local_only:
        misc_opts.append("--local-only")
    elif force_distributed:
        misc_opts.append("--force-distributed")
    if ignore_test_failures:
        misc_opts.append("--ignore-test-failures")
    misc_opts = " ".join(misc_opts)
    if nworkers == 1:
        log.info("Running in serial mode.")
        for testkit_cfg in testkit_cfgs:
            bcfg, rcode, stdout, stderr, job_run_time = _run_testkit_cfg(testkit_cfg, misc_opts=misc_opts)
            d = dict(x=testkit_cfg, r=rcode, s=int(job_run_time), m=job_run_time / 60.0)
            log.info("Completed running {x}. exit code {r} in {s} sec ({m:.2f} min).".format(**d))
            results.append((testkit_cfg, rcode, stdout, stderr, job_run_time))
    else:
        __run_testkit_cfg = functools.partial(_run_testkit_cfg,
            misc_opts=misc_opts)
        pool = multiprocessing.Pool(nworkers)
        results = pool.map_async(__run_testkit_cfg, testkit_cfgs).get(9999999)

    log.info("Results:")
    for bcfg, rcode, _, _, job_run_time in results:
        d = dict(r=rcode, j=bcfg, s=int(job_run_time), m=job_run_time / 60.0)
        log.info("exit code {r} in {s} sec ({m:.2f} min). job {j}".format(**d))

    njobs = len(results)
    rcodes = [operator.getitem(r, 1) for r in results]
    nfailed = len([r for r in rcodes if r != 0])
    run_time = time.time() - started_at

    d = dict(n=njobs, x=nfailed, s=int(run_time), m=run_time / 60.0)
    msg = "Completed {n} jobs in {s} sec ({m:.2f} min) {x} failed.".format(**d)
    print msg
    log.info(msg)

    # should this propagate the rcodes from siv_butler calls?
    return 0 if nfailed == 0 else -1
Example #30
0
 def from_dict(d):
     """Create a new record from the raw CSV header dict"""
     f = lambda x: operator.getitem(d, x)
     r = MotifRecord(f('motifString'), f('centerPos'), f('modificationType'),
                     f('fraction'), f('nDetected'), f(
                         'nGenome'), f('groupTag'),
                     f('partnerMotifString'), f(
                         'meanScore'), f('meanIpdRatio'),
                     f('meanCoverage'), f('objectiveScore'))
     return r
Example #31
0
def process_mod(target, mod):
    """Apply a *mod* to the target, mutating it."""
    for key in mod["path"]:
        target = getitem(target, key)
    action = mod["action"]
    if action == "append":
        target.append(mod["x"])
    elif action == "insert":
        target.insert(mod["i"], mod["x"])
    elif action == "pop":
        target.pop(mod["i"])
    elif action == "setitem":
        target.__setitem__(mod["key"], mod["value"])
    elif action == "delitem":
        target.__delitem__(mod["key"])
    else:
        raise ValueError
Example #32
0
def test_sbyte_array_conversion():
    """Test sbyte array conversion."""
    ob = ConversionTest()

    assert ob.SByteArrayField is None

    ob.SByteArrayField = [0, 1, 2, 3, 4]
    array = ob.SByteArrayField
    assert len(array) == 5
    assert array[0] == 0
    assert array[4] == 4

    value = b"testing"
    ob.SByteArrayField = value
    array = ob.SByteArrayField
    for i, _ in enumerate(value):
        assert array[i] == operator.getitem(value, i)
Example #33
0
	def evaluate(self, thing):
		resolved_obj = self.container.evaluate(thing)
		if resolved_obj is None:
			if self.safe:
				return resolved_obj
			raise errors.EvaluationError('data type mismatch')

		resolved_item = self.item.evaluate(thing)
		_assert_is_integer_number(resolved_item)
		resolved_item = int(resolved_item)
		try:
			value = operator.getitem(resolved_obj, resolved_item)
		except (IndexError, KeyError):
			if self.safe:
				return None
			raise errors.LookupError(resolved_obj, resolved_item)
		return coerce_value(value, verify_type=False)
Example #34
0
	def evaluate(self, thing):
		resolved_obj = self.container.evaluate(thing)
		if resolved_obj is None:
			if self.safe:
				return resolved_obj
			raise errors.EvaluationError('data type mismatch')

		resolved_start = self.start.evaluate(thing)
		if resolved_start is not None:
			_assert_is_integer_number(resolved_start)
			resolved_start = int(resolved_start)
		resolved_stop = self.stop.evaluate(thing)
		if resolved_stop is not None:
			_assert_is_integer_number(resolved_stop)
			resolved_stop = int(resolved_stop)
		value = operator.getitem(resolved_obj, slice(resolved_start, resolved_stop))
		return coerce_value(value, verify_type=False)
    def __find_next_task(self):

        self.tasks_lock.acquire()

        items = self.tasks.items()
        by_time = lambda x: operator.getitem(x, 1).scheduled_time

        sorted(items, key=by_time)
        items = list(items)

        print("items >>  %s " % items)

        receipt = items[0][0]
        print("receipt >> %s" % receipt)
        self.tasks_lock.release()

        return receipt
Example #36
0
def interpret_json_form(pairs):
    """The application/json form encoding algorithm.

    <https://www.w3.org/TR/html-json-forms/#dfn-application-json-encoding-algorithm>

    """
    result = {}
    for key, value in pairs:
        steps = parse_path(key)
        context = result
        for step in steps:
            try:
                current_value = operator.getitem(context, step[0])
            except LookupError:
                current_value = None
            context = set_value(context, step, current_value, value)
    return result
Example #37
0
def get_or_default(a, b, default):
    """
    get b from a if exists else return default
    :param a:
    :param b:
    :param default:
    :return:
    """
    if isinstance(a, list):
        a = a[0]
    if hasattr(a, '__contains__'):
        if b in a:
            return getitem(a, b)
        else:
            return default
    else:
        return default
def get_from(collection, keys, default=None):
    """
    toolz-style getter that will attempt both getattr and getitem (intended
    for named tuples nested inside of dicts, etc)
    (hierarchical list of keys, collection ->
    item of collection, possibly from a nested collection)
    """
    level = collection
    for key in keys:
        try:
            level = getitem(level, key)
        except (KeyError, IndexError, TypeError):
            try:
                level = getattr(level, key)
            except AttributeError:
                return default
    return level
Example #39
0
 def leaderboard(self):
     """
     Players Round Score Rank
     Ram     10      36  1
     Shiva   12      39  2
     """
     click.echo("\n----Leaderboard----")
     players = OrderedDict(
         sorted(self.players.items(), key=lambda x: getitem(x[1], "round"))
     )
     columns = ["Rank", "Name", "Round", "Score"]
     row_format = "{:>15}" * len(columns)
     click.echo(row_format.format(*columns))
     rank = 1
     for k, v in players.items():
         click.echo(row_format.format(rank, v["name"], v["round"], v["score"]))
         rank = rank + 1
Example #40
0
def jac_rank(filename):
    dic = [271955, 218445, 34466, 127372, 614, 219270, 1051, 246704, 34620, 75542, 221932, 34582, 644, 127417, 617, 246616, 218722, 35882, 137565, 1214, 216888, 46984, 553, 839, 14904, 35877, 135191, 34438, 75646, 215984, 921, 75761, 137384, 135194, 271641, 866, 75683, 36285, 218726, 34682, 1766, 216881, 216895, 34302, 845, 34549, 611, 34518, 225899, 271950, 1072, 75604, 34560, 219217, 225900, 218721, 36299, 36287, 34513, 75623, 34904, 34608, 865, 566, 14892, 34625, 137861, 222630, 271725, 34552, 216885, 810, 37188, 552, 271940, 35883, 573, 75657, 355584, 34970, 271727, 34439, 34883, 34786, 271884, 219275, 608, 75644, 271096, 34520, 218958, 1267, 75654, 34314, 246395, 273371, 36320, 267511, 26324, 802]
    vals = read_file("ms-project/grp_ontology.csv")
    ranked_genesets = {}
    # get ranked genesets
    for i in dic:
        x = vals[i]
        y = [i[0] for i in sorted(x.items(), key=lambda xx:getitem(xx[1], 'weight'), reverse=True)[:5]]
        if i in y:
            y.remove(i)
        ranked_genesets[i] = y
    ranked_genesets = dict(sorted(ranked_genesets.items()))
    # write_file('ranking_results/--JCD--Homology.csv', ranked_genesets)
    # sys.exit()

    # get related homology of ranked
    assoc_homs = {}
    for rank in ranked_genesets:
        for geneset in ranked_genesets[rank]:
            if rank in assoc_homs:
                assoc_homs[rank] |= to_onr_set(get_homology(geneset))
            else:
                assoc_homs[rank] = to_onr_set(get_homology(geneset))
    print("loop2")

    enrich = {}
    for geneset in assoc_homs.keys():
        # Iterate over homology
        _homs = set()
        _genes = set()
        _refs = set()
        for homology in assoc_homs[geneset]:
            res = get_genes(homology)
            for i in res.fetchall():
                _genes.add(i[0])
                _refs.add(i[1])
            _homs.add(homology)
        enrich[geneset] = [_homs, _genes, _refs]
    print("loop3")

    for i in enrich:
        print(i, len(enrich[i][1]), enrich[i])

    # Just for future use
    write_file('enrich/' + filename + '.csv', enrich)
Example #41
0
 def set_text_entry(field, output):
     txt = field.get_string_value().encode('utf-8').strip()
     if len(txt) == 0:
         operator.setitem(output, 'value', None)
         mforms.Utilities.add_timeout(
             0.1, self.call_create_preview_table)
     elif len(txt) == 1:
         operator.setitem(output, 'value', txt)
         mforms.Utilities.add_timeout(
             0.1, self.call_create_preview_table)
     else:
         field.set_value(
             operator.getitem(output,
                              'value').encode('utf-8').strip())
         mforms.Utilities.show_error(
             "Import Wizard",
             "Due to the nature of this wizard, you can't use unicode characters in this place, as also only one character is allowed.",
             "Ok", "", "")
Example #42
0
File: siu.py Project: machow/siuba
    def __call__(self, x):
        args, kwargs = self.map_subcalls(self.evaluate_calls, args=(x, ))
        inst, *rest = args

        #inst, *rest = (self.evaluate_calls(arg, x) for arg in self.args)
        #kwargs = {k: self.evaluate_calls(v, x) for k, v in self.kwargs.items()}

        # TODO: temporary workaround, for when only __get_attribute__ is defined
        if self.func == "__getattr__":
            return getattr(inst, *rest)
        elif self.func == "__getitem__":
            return operator.getitem(inst, *rest)
        elif self.func == "__call__":
            return getattr(inst, self.func)(*rest, **kwargs)

        # in normal case, get method to call, and then call it
        f_op = getattr(operator, self.func)
        return f_op(inst, *rest, **kwargs)
Example #43
0
def iunzip(iterable, n=None):
    """Takes an iterator that yields n-tuples and returns n iterators
    which index those tuples.  This function is the reverse of izip().
    n is the length of the n-tuple and will be autodetected if not
    specified.  If the iterable contains tuples of differing sizes,
    the behavior is undefined."""
    # a braindead implementation for now (since it relies on tee() which is
    # braindead in this module (but not in Python 2.4+))
    iterable = iter(iterable)  # ensure we're dealing with an iterable
    if n is None:  # check the first element for length
        first = iterable.next()
        n = len(first)
        # now put it back in to iterable is unchanged
        iterable = chain([first], iterable)

    iter_tees = tee(iterable, n)
    selector = lambda index: lambda item: getitem(item, index)
    return [imap(selector(i), it) for i, it in izip(count(), iter_tees)]
Example #44
0
    def __find_next_task(self):

        self.tasks_lock.acquire()

        items = self.tasks.items()

        by_time = lambda x: operator.getitem(x, 1).scheduled_time
        items = list(items)
        items.sort(key=by_time)
        log.info("items:  %s " % items)

        receipt = items[0][0]

        log.info("receipt: %s" % receipt)

        self.tasks_lock.release()

        return receipt
Example #45
0
    def __getitem__(self, key):
        if isinstance(key, tuple):
            if len(key) != 1:
                raise TypeError("Index must be one-dimensional")
            key, = key

        if isinstance(key, slice):
            try:
                val = operator.getitem(self._datap, key)
            except TypeError:
                if key.step:
                    raise TypeError("Key cannot have a step size other than 1")
                val = operator.getslice(self._datap, key.start, key.stop)
            value = (self.dtype * len(val))()
            value[:] = val
            return array(value)
        else:
            return self._backcast(self._datap[key])
Example #46
0
def view_issues_2(request, repo):
    # Security check if you have valid Token
    try:
        if not request.session[SESSION_VALIDATED] == "Validated":
            return HttpResponseRedirect(reverse('index_page'))
    except:
        return HttpResponseRedirect(reverse('index_page'))

    api_repo_issues = None
    api_repo_name = ""
    # Get selected Repo
    if repo == GITHUB_ORG_REPO_NAME_WASF:
        api_repo_issues = GITHUB_ORG_API_REPO_ISSUES_WASF
        api_repo_name = GITHUB_ORG_REPO_NAME_WASF
    elif repo == GITHUB_ORG_REPO_NAME_SFT:
        api_repo_issues = GITHUB_ORG_API_REPO_ISSUES_SFT
        api_repo_name = GITHUB_ORG_REPO_NAME_SFT
    else:
        return HttpResponseRedirect(reverse('index_page'))

    error = None
    issue_items = None

    # Set Auth
    github = OAuth2Session(
        GITHUB_APP_USERNAME,
        token=request.session[SESSION_APP_GITHUB_OAUTH_TOKEN])
    # Get data
    issue_items = jsonify(github.get(api_repo_issues).json())

    # Check if Request got no error
    # Sort by Number
    if issue_items:
        ln = lambda x: operator.getitem(x, 'number')
        issue_items = sorted(issue_items, key=ln)
    else:
        error = "Request error for {0}".format(api_repo_issues)
    t = loader.get_template('Issues/view_issues_2.html')
    c = Context({
        'error': error,
        'issue_items': issue_items,
        'GITHUB_ORG_REPO_NAME': api_repo_name,
    })
    return HttpResponse(t.render(c))
Example #47
0
def get_top_three_genres(animes): 
    data = animes["data"]
    # add genre to dictionary if the genre is not in dictionary keys, otherwise increase value by 1
    dict_of_genres = {} # {"Shounen": 5, "Adventure": 10}
    for a in data:
        if a["score"] < 0:
            continue
        for g in a["genres"]:
            if g not in dict_of_genres.keys():
                dict_of_genres[g] = {}
                dict_of_genres[g]["animes"] = []
                dict_of_genres[g]["occurrences"] = 1
            else:
                dict_of_genres[g]["occurrences"] += 1
            #if this anime is within the top 20? 30?
            dict_of_genres[g]["animes"].append(a)
    dict_of_genres = OrderedDict(sorted(dict_of_genres.items(), key = lambda x:getitem(x[1], 'occurrences'), reverse=True))
    top_three = dict(itertools.islice(dict_of_genres.items(), 3))
    return top_three
Example #48
0
    def get(self, request):
        link = request.GET.get('link', None)
        seller_slug = link.split('/')[len(link.split('/')) - 3]

        try:
            user = Seller.objects.get(slug=seller_slug)
            userData = SellerSerializer(user)
            userId = int(userData.data['id'])

            #TODO: Explore a more efficient approach, if time permits
            with connection.cursor() as cursor:
                cursor.execute(
                    "select sp.*, CC.name as categ_name, CC.count from seller_product sp inner join (select sp1.category_id, sc.name, count(category_id) from seller_product sp1 inner join seller_category sc on sp1.category_id = sc.id where sp1.seller_id = 1 group by sp1.category_id, sc.name) as CC on CC.category_id = sp.category_id where sp.seller_id = 1 order by CC.count"
                )
                desc = [
                    dict(zip([col[0] for col in cursor.description], row))
                    for row in cursor.fetchall()
                ]
                dict_x = {}
                for item in desc:
                    value = {
                        'name': item['name'],
                        'description': item['description'],
                        'id': item['id']
                    }
                    try:
                        dict_x[item['categ_name']]['count'] = item['count']
                        dict_x[item['categ_name']]['items'].append(value)
                        values = dict_x[item['categ_name']]
                    except KeyError:
                        dict_x[item['categ_name']] = {
                            'count': item['count'],
                            'items': [value]
                        }
                data = OrderedDict(
                    sorted(dict_x.items(),
                           key=lambda x: getitem(x[1], 'count'),
                           reverse=True))
            return JsonResponse(data, status=200, safe=False)
        except Seller.DoesNotExist:
            return HttpResponse(status=400)

        return HttpResponse(status=500)
Example #49
0
def get_statement_code_ids(statement_code_value):
    results = {}
    if "fundamentals" not in json_data:
        # if fundamentals key is not part of the JSON
        # check https://api-v2.intrinio.com/fundamentals/ID/reported_financials
        print("Cannot get statement for ID", statement_code_value)
        pprint(json_data)
        return None
    for sub in json_data["fundamentals"]:
        key, value = 'statement_code', statement_code_value
        if key in sub and value == sub[key]:
            # don't get the YTD, QxYTD or QxTTM
            if len(sub["fiscal_period"]) == 2:
                results[sub["id"]] = {}
                results[sub["id"]]["fiscal_year"] = sub["fiscal_year"]
                results[sub["id"]]["fiscal_period"] = sub["fiscal_period"]
    # have the dictionary sorted
    product_list = OrderedDict(
        sorted(results.items(), key=lambda k: getitem(k[1], 'fiscal_year')))
    return product_list
Example #50
0
    def filtering(self):
        global globalOrder
        globalOrder = []
        vD = {}
        combine = []

        # Replace special characters with a space
        # These can crash the search
        filterText = self.values['videoFilter'].translate(
            {ord(c): " "
             for c in "!@#$%^&*()[]{};:,./<>?\|`~-=_+"})

        videos = self.db.search(
            (self.link.videoId.search(filterText, flags=re.IGNORECASE))
            | (self.link.videoId.search(filterText[-11:], flags=re.IGNORECASE))
            |  # For youtube URL
            (self.link.title.search(filterText, flags=re.IGNORECASE))
            | (self.link.uploader.search(filterText, flags=re.IGNORECASE)))

        for e, x in enumerate(videos):
            # Converting order to float to fix sorting
            vD[e + 1] = {
                'videoId': x['videoId'],
                'duration': x['duration'],
                'title': x['title'],
                'order': float(x['order'])
            }

        for i in OrderedDict(
                sorted(vD.items(), key=lambda x: getitem(x[1], 'order'))):
            combine.append(
                f"{vD[i]['videoId']} - {vD[i]['duration']} - {vD[i]['title']}")
            # Convert float -> int -> str
            # Variable used for manual sorting
            globalOrder.append(str(int(vD[i]['order'])))

        # Quick Shuffle
        if self.shufflePlaylist is True:
            random.shuffle(combine)

        return combine
Example #51
0
    def viewData(self):
        global globalOrder
        globalOrder = []
        vD = {}
        combine = []

        try:
            # Add all videos in db to a dictionary
            for e, x in enumerate(self.db):
                # Converting order to float to fix sorting
                vD[e + 1] = {
                    'videoId': x['videoId'],
                    'duration': x['duration'],
                    'title': x['title'],
                    'order': float(x['order'])
                }

            # Sort dictionary based on key and add to a list
            for i in OrderedDict(
                    sorted(vD.items(), key=lambda x: getitem(x[1], 'order'))):
                # print(vD[i]['title'])
                # print(vD[i]['order'])
                combine.append(
                    f"{vD[i]['videoId']} - {vD[i]['duration']} - {vD[i]['title']}"
                )
                # Convert float -> int -> str
                # Variable used for manual sorting
                globalOrder.append(str(int(vD[i]['order'])))

            # Quick Shuffle
            if self.shufflePlaylist is True:
                random.shuffle(combine)

        # In case of missing video order information run script
        except KeyError:
            print("Database is corrupt")
            # runScript(2)
            # viewData(db)
            return None

        return combine
Example #52
0
def covidHome():
    list_states = lines
    totaldictionary = {}
    try:
        get_state = request.args.get('state', default='Tamil Nadu')
        covid19 = covid(get_state, None)
        data = covid19.getStateData()["districtData"]
        # Data Sorting as per the number of active cases
        sorted_data = OrderedDict(
            sorted(data.items(),
                   key=lambda x: getitem(x[1], 'active'),
                   reverse=True))
        totaldictionary = covid19.totalstats(data)
        return render_template("covidtable.html",
                               Statedata=sorted_data,
                               State=get_state,
                               totalstats=totaldictionary,
                               Statelist=list_states)
    except Exception as e:
        print(e)
        return render_template("error-404.html")
def get_completed_tasks(
        task_client: TaskWarrior, start_date: date,
        end_date: typing.Optional[date]) -> typing.List[task.Task]:
    """
    Returns a list of completed tasks between a start and end date
    """
    ret_tasks = []
    logger.debug("Fetching list of completed tasks.")
    completed_tasks = task_client.filter_tasks(
        filter_dict={"status":
                     Status.COMPLETED})  # type: typing.List[typing.Dict]
    for task in completed_tasks:
        # parse completed on
        completed_on = task.get("end")
        # completed on in window, then append
        if all([
                completed_on.date() >= start_date,
                completed_on.date() <= end_date
        ]):
            ret_tasks.append(task)
    return sorted(ret_tasks, key=lambda x: operator.getitem(x, "end"))
Example #54
0
 def get_at(self, path:str, convert:bool=True)->Optional[Union['Configuration', Any]]:
     """Returns Configuration branch at given address
     Args:
         path (Union[str,int]): path to get
         convert (Boolean): (deprecated) Embed target into Configuration object if if target element is an iterable
     Returns:
         [type]: [description]
     """
     try:
         if type(path) == int:
             res = operator.getitem(self._config_object, path)
         else:
             res = reduce(operator.getitem, path.split('.'), self._config_object)
         # if convert and ( type(res) == dict or type(res) == list):
         #     res = self._to_config_object(res)
     except (KeyError, TypeError) as e:
         return None
     
     if isinstance(res, Configuration) and self._is_native(res._config_object):
         return res.as_dict()
     return res
Example #55
0
 def _permsForSwiping(self):
     """Return the correct permutations of blocks for all swiping direction.
     """
     # We use an identity permutation to generate the permutations from by
     # slicing correctly.
     identity = scipy.array(list(range(self.sequenceLength)))
     identity.shape = tuple(s / b for s, b in zip(self.shape, self.blockshape))
     permutations = []
     # Loop over all possible directions: from each corner to each corner
     for direction in crossproduct([('+', '-')] * self.timedim):
         axises = []
         for _, axisdir in enumerate(direction):
             # Use a normal complete slice for forward...
             if axisdir == '+':
                 indices = slice(None, None, 1)
             # ...and a reversed complete slice for backward
             else:
                 indices = slice(None, None, -1)
             axises.append(indices)
         permutations.append(operator.getitem(identity, axises).flatten())
     return permutations
Example #56
0
    def test_getitem_float(self):
        trace = self.sdk.current_trace
        td = datetime.timedelta(seconds=1)
        self.assertEqual(operator.getitem(trace, td), [])

        start_time = datetime.datetime.utcnow()

        spans = []
        end_times = []

        for index, _ in enumerate(range(10)):
            end_time = start_time + datetime.timedelta(seconds=index + 1)
            end_times.append(datetime_to_float(end_time))
            spans.append(trace.span(start_time=start_time, end_time=end_time))

        start = datetime_to_float(start_time)

        for t in range(10):
            stop = end_times[t]
            trace_spans = trace[start:stop]
            self.assertEqual(spans[:t], trace_spans)
Example #57
0
def guide(total_points, graph_period):
    """Gerenate guide line with dayoffs."""

    def weekday(day):
        return not day.weekday() in (5, 6)

    days = [(day, weekday(day)) for day in graph_period]
    n_days = len(list(filter(lambda x: getitem(x, 1), days)))

    median_points = total_points / (n_days - 1)
    last_value = total_points
    out = []

    for n, (date, valid) in enumerate(days):
        if valid and n != 0:
            last_value = last_value - median_points
            out.append(last_value)
        else:
            out.append(last_value)

    return out
Example #58
0
def create_sort_metadata_ajax(property_qualifiers_stats_file):
    pstats = json.load(open(property_qualifiers_stats_file))

    sort_metadata = {}
    for property in pstats:
        key = property
        p_datatype = pstats[property]['datatype']
        sort_metadata[key] = sort_order_dict.get(p_datatype)
        qualifiers = pstats[property].get('qualifiers', {})
        if not qualifiers:
            sort_metadata[key] = {
                'sort_by': sort_order_dict[p_datatype],
                'value_counts': pstats[key]['value_counts'],
                'property_datatype': p_datatype,
                'qratio': 0.0
            }
        else:
            top_qualifier = \
                sorted(qualifiers.items(), key=lambda x: operator.getitem(x[1], 'value_counts'), reverse=True)[0]
            sort_metadata[key] = {
                'qualifier':
                top_qualifier[0],
                'sort_by':
                sort_order_dict[top_qualifier[1]['datatype']],
                'qualifier_value_counts':
                top_qualifier[1]['value_counts'],
                'property_value_counts':
                pstats[key]['value_counts'],
                'property_datatype':
                p_datatype,
                'qualifier_datatype':
                top_qualifier[1]['datatype'],
                'qratio':
                float(top_qualifier[1]['value_counts']) /
                float(pstats[key]['value_counts'])
            }

    # sorted_by_q_ratio = sorted(sort_metadata.items(), key=lambda x: operator.getitem(x[1], 'qratio'), reverse=True)

    return sort_metadata
Example #59
0
    def get(self, *args, **kwargs):
        '''return base page with form to select an order to import
        '''
        if not self.request.user.is_staff or not self.request.user.is_superuser:
            messages.warning(self.request, "You don't have permission to see this view.")
            return redirect('dashboard')

        # Get all current shipments
        shipments = get_shippo_shipments()

        # Get set of shipments we currently have as orders
        haves = set()
        for order in Order.objects.all():
  
            # These should be the same
            haves.add(order.label.get('object_id'))
            haves.add(order.transaction.get('object_id'))

        haves.remove(None)

        results = {}
        for s in shipments:
            if s['object_id'] not in haves and s['status'] == "SUCCESS" and not s['test'] and s.get('order') is not None:
                results[s['object_id']] = {"name": s['address_to']['name'],
                                           "address": s['address_to']['street1'],
                                           "created": s['object_created']}

        # Sort by date
        results = OrderedDict(sorted(results.items(), 
                              key = lambda x: getitem(x[1], 'created'), reverse=True))

        # only consider Awaiting Countersign and Generating Label
        statuses = ['Awaiting Countersign', 'Generating Label']

        # Add distributions
        context = {'shipments': results,
                   'distributions': Distribution.objects.all(),
                   'orders': Order.objects.filter(status__in=statuses)}

        return render(self.request, "shipping/import_shippo.html", context)
Example #60
0
 def get_selenzyme_annotation(self, rpsbml_path: str) -> Dict:
     rpsbml = rpSBML(str(rpsbml_path))
     pathway = rpPathway.from_rpSBML(rpsbml=rpsbml)
     for idx_rxn, rxn_id in enumerate(pathway.get_reactions_ids()):
         # Stop if too many reactions
         if idx_rxn > self._max_rxn_per_construct:
             raise ValueError(
                 f'Number of reactions exceed the defined allowed number of ',
                 f'enzymes : {self._max_rxn_per_construct}. Execution cancelled.'
             )
         #
         rxn = pathway.get_reaction(rxn_id)
         enzymes = rxn.get_selenzy()
         # Stop if no enzyme available
         if len(enzymes) == 0:
             raise ValueError(
                 f'Missing UniProt IDs from selenzyme annotation for '
                 f'reaction {rxn_id}. Execution cancelled.')
         # Collect enzyme ordered by score, the first is the best
         for idx_enz, enz in enumerate(sorted(
                 enzymes.items(),
                 key=lambda x: getitem(x[1], 'score'),
                 reverse=True),
                                       start=1):
             # Skip worst enzyme if too many
             if idx_enz > self._max_enz_per_rxn:
                 logging.warning(
                     f'Max number of enzyme per reaction reached ({self._max_enz_per_rxn}) '
                     f'for reaction {rxn_id}. Only the best one(s) are kept.'
                 )
                 break
             uniprot_id, _ = enz
             if uniprot_id in self._parts:
                 self._parts[uniprot_id].cds_steps.append(rxn_id)
             else:
                 self._parts[uniprot_id] = Part(id=uniprot_id,
                                                basic_role='part',
                                                biological_role='cds',
                                                cds_steps=[rxn_id],
                                                seq='atgc')