Пример #1
0
def vpss_from_cm(cm):
    return (set(redis_shell.lrange(cm + ':vpss', 0, -1))
            | set([
                entry.split('*')[0]
                for entry in redis_shell.lrange(cm + ':destroyq', 0, -1)
                if not entry.startswith('-1')
            ]))
Пример #2
0
def filter_queue(qname, fn):
    bakname = qname + '.bak'
    redis_shell.rename(qname, bakname)
    p = redis_shell.pipeline()
    for entry in redis_shell.lrange(bakname, 0, -1):
        p.lpush(qname, fn(entry))
    p.execute()
Пример #3
0
def actually_retire_proxy(name, ip, srv=None, pipeline=None):
    """
    While retire_proxy just enqueues the proxy for retirement, this actually
    updates the redis tables.
    """
    name, ip, srv = nameipsrv(name=name, ip=ip, srv=srv)
    cm = cm_by_name(name)
    region = region_by_name(name)
    txn = pipeline or redis_shell.pipeline()
    if srv:
        actually_close_proxy(name, ip, srv, txn)
        txn.hdel('srv->cfg', srv)
        txn.hdel('server->config', name)
        txn.hdel('srv->name', srv)
        txn.hdel('srv->srvip', srv)
        txn.hdel('name->srv', name)
        txn.hdel('srvip->srv', ip)
        # For debugging purposes; we can delete these anytime if they're a
        # space problem.
        txn.hset('history:srv->name', srv, name)
        txn.hset('history:name->srv', name, srv)
        txn.hset('history:srv->srvip', srv, ip)
        # An IP may be used by multiple servers through history.
        txn.rpush('history:srvip->srv:%s' % ip, srv)
        txn.incr('srvcount')
    else:
        print "No configs left to delete for %s." % name
    # Check whether this server is in the queue (because of recycling).
    for cfg in redis_shell.lrange(region + ':srvq', 0, -1):
        if cfg.split('|')[0] == ip:
            txn.lrem(region + ':srvq', cfg)
    txn.lrem(cm + ':vpss', name)
    txn.incr(cm + ':vpss:version')
    if txn is not pipeline:
        txn.execute()
Пример #4
0
def filter_queue(qname, fn):
    bakname = qname + '.bak'
    redis_shell.rename(qname, bakname)
    p = redis_shell.pipeline()
    for entry in redis_shell.lrange(bakname, 0, -1):
        p.lpush(qname, fn(entry))
    p.execute()
Пример #5
0
def retire_lcs(name,
               ip,
               # It's safe to cache this because a proxy will take at least 24h
               # since the time it's recycled (and thus new server IDs can be
               # entered for it) and the time it's destroyed. To be more
               # precise, 24h must elapse since the time it's been _split_. For
               # this to work, it's crucial to remove the
               # /home/lantern/server_split flag file whenever we recycle
               # proxies.
               byip=util.Cache(timeout=60*60,
                               update_fn=srv_cfg_by_ip)):
    cm = cm_by_name(name)
    region = region_by_name(name)
    srvs = byip.get().get(ip, (None, []))[1]
    txn = redis_shell.pipeline()
    if srvs:
        scores = [redis_shell.zscore(region + ':slices', srv) for srv in srvs]
        pairs = {"<empty:%s>" % int(score): score
                 for score in scores
                 if score}
        if pairs:
            txn.zadd(region + ":slices", **pairs)
            txn.zrem(region + ":slices", *srvs)
        txn.hdel('srv->cfg', *srvs)
        txn.incr('srvcount')
    else:
        print "No configs left to delete for %s." % name
    # Check whether this server is in the queue (because of recycling).
    for cfg in redis_shell.lrange(region + ':srvq', 0, -1):
        if cfg.split('|')[0] == ip:
            txn.lrem(region + ':srvq', cfg)
    txn.lrem(cm + ':vpss', name)
    txn.incr(cm + ':vpss:version')
    txn.execute()
Пример #6
0
def actually_retire_proxy(name, ip, pipeline=None):
    """
    While retire_proxy just enqueues the proxy for retirement, this actually
    updates the redis tables.
    """
    name, ip, srv = nameipsrv(name=name, ip=ip)
    cm = cm_by_name(name)
    region = region_by_name(name)
    txn = pipeline or redis_shell.pipeline()
    if srv:
        actually_close_proxy(name, ip, srv, txn)
        txn.hdel('srv->cfg', srv)
        txn.hdel('server->config', name)
        txn.hdel('srv->name', srv)
        txn.hdel('srv->srvip', srv)
        txn.hdel('name->srv', name)
        txn.hdel('srvip->srv', ip)
        # For debugging purposes; we can delete these anytime if they're a
        # space problem.
        txn.hset('history:srv->name', srv, name)
        txn.hset('history:name->srv', name, srv)
        txn.hset('history:srv->srvip', srv, ip)
        # An IP may be used by multiple servers through history.
        txn.rpush('history:srvip->srv:%s' % ip, srv)
        txn.incr('srvcount')
    else:
        print "No configs left to delete for %s." % name
    # Check whether this server is in the queue (because of recycling).
    for cfg in redis_shell.lrange(region + ':srvq', 0, -1):
        if cfg.split('|')[0] == ip:
            txn.lrem(region + ':srvq', cfg)
    txn.lrem(cm + ':vpss', name)
    txn.incr(cm + ':vpss:version')
    if txn is not pipeline:
        txn.execute()
Пример #7
0
def retire_lcs(name,
               ip,
               byip=util.Cache(timeout=60*60,
                               update_fn=srv_cfg_by_ip)):
    cm = cm_by_name(name)
    region = region_by_name(name)
    srvs = byip.get().get(ip, (None, []))[1]
    txn = redis_shell.pipeline()
    if srvs:
        scores = [redis_shell.zscore(region + ':slices', srv) for srv in srvs]
        pairs = {"<empty:%s>" % int(score): score
                 for score in scores
                 if score}
        if pairs:
            txn.zadd(region + ":slices", **pairs)
            txn.zrem(region + ":slices", *srvs)
        txn.hdel('srv->cfg', *srvs)
        txn.incr('srvcount')
    else:
        print "No configs left to delete for %s." % name
    # Check whether this server is in the queue (because of recycling).
    for cfg in redis_shell.lrange(region + ':srvq', 0, -1):
        if cfg.split('|')[0] == ip:
            txn.lrem(region + ':srvq', cfg)
    txn.lrem(cm + ':vpss', name)
    txn.incr(cm + ':vpss:version')
    txn.execute()
Пример #8
0
def proxy_status(name=None, ip=None, srv=None):
    name, _, srv = nameipsrv(name, ip, srv)
    if srv is None:
        if name is not None:
            region = region_by_name(name)
            for qentry in redis_shell.lrange(region + ':srvq', 0, -1):
                if qentry.split('|')[1] == name:
                    return 'enqueued'
        return 'baked-in'
    elif redis_shell.zscore(region_by_name(name) + ':slices', srv) is None:
        return 'closed'
    else:
        return 'open'
Пример #9
0
def vpss_from_cm(cm):
    try:
        local_version = file(cm + "_vpss_version").read()
    except IOError:
        local_version = None
    remote_version = redis_shell.get(cm + ":vpss:version")
    if local_version == remote_version:
        return set(map(str.strip, file(cm + "_vpss")))
    else:
        ret = redis_shell.lrange(cm + ":vpss", 0, -1)
        file(cm + "_vpss", "w").write("\n".join(ret))
        file(cm + "_vpss_version", "w").write(remote_version)
        return set(ret)
Пример #10
0
def vpss_from_cm(cm):
    try:
        local_version = file(cm + '_vpss_version').read()
    except IOError:
        local_version = None
    remote_version = redis_shell.get(cm + ':vpss:version')
    if local_version == remote_version:
        return set(map(str.strip, file(cm + '_vpss')))
    else:
        ret = redis_shell.lrange(cm + ':vpss', 0, -1)
        file(cm + '_vpss', 'w').write('\n'.join(ret))
        file(cm + '_vpss_version', 'w').write(remote_version)
        return set(ret)
Пример #11
0
def vpss_from_cm(cm):
    try:
        local_version = file(cm + '_vpss_version').read()
    except IOError:
        local_version = None
    remote_version = redis_shell.get(cm + ':vpss:version')
    if local_version == remote_version:
        return set(map(str.strip, file(cm + '_vpss')))
    else:
        ret = redis_shell.lrange(cm + ':vpss', 0, -1)
        file(cm + '_vpss', 'w').write('\n'.join(ret))
        file(cm + '_vpss_version', 'w').write(remote_version)
        return set(ret)
Пример #12
0
def proxy_status(name=None, ip=None, srv=None):
    name, _, srv = nameipsrv(name, ip, srv)
    if srv is None:
        if name is not None:
            region = region_by_name(name)
            for qentry in redis_shell.lrange(region + ':srvq', 0, -1):
                if qentry.split('|')[1] == name:
                    return 'enqueued'
        return 'baked-in'
    elif redis_shell.zscore(region_by_name(name) + ':slices', srv) is None:
        return 'closed'
    else:
        return 'open'
Пример #13
0
def fix_queue(qname, fix_fn):
    # The backup is left around just in case you bork something.
    # Delete it manually or by calling `delete_q_backups` after
    # making sure everything went well.
    print "handling %s..." % qname
    bakname = qname + '.bak'
    redis_shell.rename(qname, bakname)
    p = redis_shell.pipeline()
    for entry in redis_shell.lrange(bakname, 0, -1):
        ip, name, cfg = entry.split('|')
        needs_fixing, good_cfg = fix_fn(cfg)
        if needs_fixing:
            print "enqueued proxy %s needs fixing" % name
            entry = '|'.join([ip, name, good_cfg])
        p.rpush(qname, entry)
    print "fixing enqueued proxies..."
    p.execute()
    print "Done with %s" % qname
Пример #14
0
def fix_queue(qname, fix_fn):
    # The backup is left around just in case you bork something.
    # Delete it manually or by calling `delete_q_backups` after
    # making sure everything went well.
    print "handling %s..." % qname
    bakname = qname + '.bak'
    redis_shell.rename(qname, bakname)
    p = redis_shell.pipeline()
    for entry in redis_shell.lrange(bakname, 0, -1):
        ip, name, cfg = entry.split('|')
        needs_fixing, good_cfg = fix_fn(cfg)
        if needs_fixing:
            print "enqueued proxy %s needs fixing" % name
            entry = '|'.join([ip, name, good_cfg])
        p.rpush(qname, entry)
    print "fixing enqueued proxies..."
    p.execute()
    print "Done with %s" % qname
Пример #15
0
def srvq_integrity(region, cache=None):
    """
    perform sanity checks on the region's server queue.

    (i) all VPSs listed there actually exist.

    (ii) the IPs and names of the VPSs match those recorded in the queue.

    An actual proxying check is not performed. As of this writing, there's a
    ticket to make checkfallbacks do that.

    This assumes that you have checked for duplicate proxies.
    """
    if cache is None:
        cache = model.make_cache()
    if cache.srvq is None:
        cache.srvq = {}
    if region not in cache.srvq:
        cache.srvq[region] = redis_shell.lrange(region + ':srvq', 0, -1)
    cache.all_vpss = cache.all_vpss or vps_util.all_vpss()
    vps_by_name = {v.name: v for v in cache.all_vpss}
    not_ours = []
    bad_ip = []
    for entry in cache.srvq[region]:
        ip, name, cfg = entry.split('|')
        if name not in vps_by_name:
            not_ours.append((ip, name, entry))
            # XXX: factor out fixes from here.
            redis_shell.lrem(region + ':srvq', entry)
            continue
        actual_ip = vps_by_name[name].ip
        if actual_ip != ip:
            # XXX: factor out fixes from here.
            redis_shell.lrem(region + ':srvq', entry)
            bad_ip.append((ip, actual_ip, name, entry))
    ret = []
    if not_ours:
        ret.append(('Queued proxy no longer ours', not_ours))
    if bad_ip:
        ret.append(('Inconsistent IP in queued proxy', bad_ip))
    return ret
Пример #16
0
def retire_lcs(name,
               ip,
               byip=util.Cache(timeout=60 * 60, update_fn=srv_cfg_by_ip)):
    cm = cm_by_name(name)
    region = region_by_name(name)
    srvs = byip.get().get(ip, (None, []))[1]
    txn = redis_shell.pipeline()
    if srvs:
        scores = [redis_shell.zscore(region + ':slices', srv) for srv in srvs]
        pairs = {"<empty:%s>" % int(score): score for score in scores if score}
        if pairs:
            txn.zadd(region + ":slices", **pairs)
            txn.zrem(region + ":slices", *srvs)
        txn.hdel('srv->cfg', *srvs)
        txn.incr('srvcount')
    else:
        print "No configs left to delete for %s." % name
    # Check whether this server is in the queue (because of recycling).
    for cfg in redis_shell.lrange(region + ':srvq', 0, -1):
        if cfg.split('|')[0] == ip:
            txn.lrem(region + ':srvq', cfg)
    txn.lrem(cm + ':vpss', name)
    txn.incr(cm + ':vpss:version')
    txn.execute()
Пример #17
0
def queued_names():
    nbyip = name_by_ip()
    return set(nbyip.get(cfg.split('|')[0])
               for region in regions()
               for cfg in redis_shell.lrange('%s:srvq' % region, 0, -1))
Пример #18
0
def bakedin():
    return set(x.split('|')[1]
               for region in regions()
               for x in redis_shell.lrange(region + ':bakedin', 0, -1))
Пример #19
0
def vpss_from_cm(cm):
    return (set(redis_shell.lrange(cm + ':vpss', 0, -1))
            | set([entry.split('*')[0]
                   for entry in redis_shell.lrange(cm + ':destroyq', 0, -1)
                   if not entry.startswith('-1')]))
Пример #20
0
def get_registered_vpss():
    return set(
        map(toascii, (redis_shell.lrange('doams3:vpss', 0, -1) +
                      redis_shell.lrange('dosgp1:vpss', 0, -1) +
                      redis_shell.lrange('vltok1:vpss', 0, -1))))
Пример #21
0
def get_registered_vpss():
    return set(map(toascii, (redis_shell.lrange('doams3:vpss', 0, -1)
                             + redis_shell.lrange('dosgp1:vpss', 0, -1)
                             + redis_shell.lrange('vltok1:vpss', 0, -1))))