Example #1
0
def test_mapcat():
    assert (list(mapcat(identity, [[1, 2, 3], [4, 5, 6]])) ==
            [1, 2, 3, 4, 5, 6])

    assert (list(mapcat(reversed, [[3, 2, 1, 0], [6, 5, 4], [9, 8, 7]])) ==
            list(range(10)))

    inc = lambda i: i + 1
    assert ([4, 5, 6, 7, 8, 9] ==
            list(mapcat(partial(map, inc), [[3, 4, 5], [6, 7, 8]])))
Example #2
0
def get_taxid(db, seqids):  # (Path, str) -> dict[str,str]
    #res = sh.blastdbcmd(db=db, outfmt="'%g %T'", entry="'%s'" % seqid, _long_prefix='-')
    max_ids = 1000 / 80
    if len(seqids) > max_ids:
        xs = [seqids[i*max_ids:(i+1)*max_ids] \
           for i in range(len(seqids) / max_ids)]
        xs.extend([seqids[sum(map(len, xs)) - 1:]])
    else:
        xs = [seqids]
    res = mapcat(
        lambda x: blastdbcmd(
            db=db, outfmt="'%g %T'", entry="'%s'" % ','.join(x)), xs)
    #res = blastdbcmd(db=db, outfmt="'%g %T'", entry="'%s'" % ','.join(xs))
    res = ifilter(bool, res)
    res = imap(lambda s: s.strip("'"), res)
    return dict(imap(unicode.split, res))
Example #3
0
def converge_launch_server(desired_state, servers_with_cheese,
                           load_balancer_nodes, load_balancers,
                           now, timeout=3600):
    """
    Create steps that indicate how to transition from the state provided
    by the given parameters to the :obj:`DesiredServerGroupState` described by
    ``desired_state``.

    :param DesiredServerGroupState desired_state: The desired group state.
    :param set servers_with_cheese: a list of :obj:`NovaServer` instances.
        This must only contain servers that are being managed for the specified
        group.
    :param load_balancer_nodes: a set of :obj:`ILBNode` providers. This
        must contain all the load balancer mappings for all the load balancers
        (of all types) on the tenant.
    :param dict load_balancers: Collection of load balancer objects accessed
        based on its ID. The object is opaque and is not used by planner
        directly. It is intended to contain extra info for specific LB provider
    :param float now: number of seconds since the POSIX epoch indicating the
        time at which the convergence was requested.
    :param float timeout: Number of seconds after which we will delete a server
        in BUILD.
    :rtype: :obj:`pbag` of `IStep`

    """
    newest_to_oldest = sorted(servers_with_cheese, key=lambda s: -s.created)

    servers = defaultdict(lambda: [], groupby(get_destiny, newest_to_oldest))
    servers_in_active = servers[Destiny.CONSIDER_AVAILABLE]

    building_too_long, waiting_for_build = partition_bool(
        lambda server: now - server.created >= timeout,
        servers[Destiny.WAIT_WITH_TIMEOUT])

    create_server = CreateServer(server_config=desired_state.server_config)

    # delete any servers that have been building for too long
    delete_timeout_steps = [DeleteServer(server_id=server.id)
                            for server in building_too_long]

    # create servers
    create_steps = [create_server] * (
        desired_state.capacity - (
            len(servers_in_active) +
            len(waiting_for_build) +
            len(servers[Destiny.WAIT]) +
            len(servers[Destiny.AVOID_REPLACING])))

    # Scale down over capacity, starting with building, then WAIT, then
    # AVOID_REPLACING, then active, preferring older.  Also, finish
    # draining/deleting servers already in draining state
    servers_in_preferred_order = (
        servers_in_active +
        servers[Destiny.AVOID_REPLACING] +
        servers[Destiny.WAIT] +
        waiting_for_build)
    servers_to_delete = servers_in_preferred_order[desired_state.capacity:]

    def drain_and_delete_a_server(server):
        return _drain_and_delete(
            server,
            desired_state.draining_timeout,
            [node for node in load_balancer_nodes if node.matches(server)],
            now)

    try:
        scale_down_steps = list(
                mapcat(drain_and_delete_a_server,
                       servers_to_delete + servers[Destiny.DRAIN]))
    except DrainingUnavailable as de:
        return pbag([fail_convergence(de)])

    # delete all servers in error - draining does not need to be
    # handled because servers in error presumably are not serving
    # traffic anyway
    delete_error_steps = [DeleteServer(server_id=server.id)
                          for server in servers[Destiny.DELETE]]

    # clean up all the load balancers from deleted and errored servers
    cleanup_errored_and_deleted_steps = [
        remove_node_from_lb(lb_node)
        for server in servers[Destiny.DELETE] + servers[Destiny.CLEANUP]
        for lb_node in load_balancer_nodes if lb_node.matches(server)]

    # converge all the servers that remain to their desired load balancer state
    still_active_servers = filter(lambda s: s not in servers_to_delete,
                                  servers_in_active)
    try:
        lb_converge_steps = [
            step
            for server in still_active_servers
            for step in _converge_lb_state(
                server,
                [node for node in load_balancer_nodes if node.matches(server)],
                load_balancers,
                now,
                # Temporarily using build timeout as node offline timeout.
                # See https://github.com/rackerlabs/otter/issues/1905
                timeout)
            ]
    except DrainingUnavailable as de:
        return pbag([fail_convergence(de)])

    # Converge again if we expect state transitions on any servers
    converge_later = []
    if any((s not in servers_to_delete
            for s in waiting_for_build)):
        converge_later = [
            ConvergeLater(reasons=[ErrorReason.String('waiting for servers')])]

    unavail_fmt = ('Waiting for server {server_id} to transition to ACTIVE '
                   'from {status}')
    reasons = [ErrorReason.UserMessage(unavail_fmt.format(server_id=s.id,
                                                          status=s.state.name))
               for s in servers[Destiny.WAIT] if s not in servers_to_delete]
    if reasons:
        converge_later.append(ConvergeLater(limited=True, reasons=reasons))

    return pbag(create_steps +
                scale_down_steps +
                delete_error_steps +
                cleanup_errored_and_deleted_steps +
                delete_timeout_steps +
                lb_converge_steps +
                converge_later)
Example #4
0
print '|'.join(map('({0})'.format, SEGMENTS))
sdf.tail()
alns = glob('fastas/*.fas')
sdf = fastas2df(alns)
#map(lambda x: x[-1], filter(lambda x: len(x) > 1, map(str.split, map(str, mdf.columns))))
SEGMENTS = ('HA', 'MP', 'NA', 'NP', 'NS', 'PA', 'PB1', 'PB2')
#sdf.loc['>A/Wisconsin/18/2011']
all_segments = lambda df, s: df.loc[s] 
fetch_segment(sdf, '>A/Wisconsin/18/2011', 'HA')
res = all_segments(sdf ,'>A/Wisconsin/18/2011' )
extract = lambda df: (df.index.values[0], df.seq.values[0])
to_fasta = compose('\n'.join, P(get_fields, fields=('id', 'seq')))
#TODO: figure out how to get index speed for id \
#        without having to describe id as an index \
#        so can access it normally like other fields.
def get_fields(df, fields):
    return [df[f].values[0] for f in fields]

assert len(res) == 8
def fetch_segment(df, id, segment):
    return df[ (df.index == id) & (df.segment == segment)]

#mdf[mdf.SequenceName.str.startswith('>A/Wisconsin')]
# assert there should be an entry for each segment in the sequence dataframe
xs = mapcat(open, glob('fastas/*.fas'))
ids = [s for i, s in enumerate(xs) if (i % 2 == 0)]
sdf[sdf.index.str.startswith('>A/Wisconsin')]
len(sdf[sdf.index.str.startswith('>A/Wisconsin')].segment.values)

Example #5
0
 def get_all_children(branch):
     children = get_children(branch)
     return list(itz.mapcat(get_all_children,
                            strip_upstreams(children))) + children
Example #6
0
 def sample_images(xs):
     ads = random.sample(xs, limit if len(xs) >= limit else len(xs))
     images = mapcat(second, ads)
     return list(take(limit, images))