コード例 #1
0
def retire_lcs(name,
               ip,
               # It's safe to cache this because a proxy will take at least 24h
               # since the time it's recycled (and thus new server IDs can be
               # entered for it) and the time it's destroyed. To be more
               # precise, 24h must elapse since the time it's been _split_. For
               # this to work, it's crucial to remove the
               # /home/lantern/server_split flag file whenever we recycle
               # proxies.
               byip=util.Cache(timeout=60*60,
                               update_fn=srv_cfg_by_ip)):
    cm = cm_by_name(name)
    region = region_by_name(name)
    srvs = byip.get().get(ip, (None, []))[1]
    txn = redis_shell.pipeline()
    if srvs:
        scores = [redis_shell.zscore(region + ':slices', srv) for srv in srvs]
        pairs = {"<empty:%s>" % int(score): score
                 for score in scores
                 if score}
        if pairs:
            txn.zadd(region + ":slices", **pairs)
            txn.zrem(region + ":slices", *srvs)
        txn.hdel('srv->cfg', *srvs)
        txn.incr('srvcount')
    else:
        print "No configs left to delete for %s." % name
    # Check whether this server is in the queue (because of recycling).
    for cfg in redis_shell.lrange(region + ':srvq', 0, -1):
        if cfg.split('|')[0] == ip:
            txn.lrem(region + ':srvq', cfg)
    txn.lrem(cm + ':vpss', name)
    txn.incr(cm + ':vpss:version')
    txn.execute()
コード例 #2
0
def destroy_vps(name,
                server_cache=util.Cache(timeout=60*60,
                                        update_fn=lambda: vultr.server_list(None).values())):
    for d in server_cache.get():
        if d['label'] == name:
            vultr.server_destroy(d['SUBID'])
            break
    time.sleep(10)
    os.system('salt-key -yd ' + name)
コード例 #3
0
def destroy_vps(name,
                server_cache=util.Cache(
                    timeout=60 * 60,
                    update_fn=lambda: retrying_server_list().values())):
    server_list = server_cache.get()
    for d in server_list[:]:
        if d['label'] == name:
            try:
                try_vultr_cmd(vultr.server_destroy, d['SUBID'])
            except VultrError as e:
                if not e.message.lower().strip().startswith('invalid server'):
                    raise
                print "Subid %s with name %r not there anymore; ignoring..." % (
                    d['SUBID'], name)
            server_list.remove(d)
            time.sleep(2)
            # We don't `break` here because it's sadly possible, due to a bug,
            # that there is more than one VPS with the same name.
    time.sleep(10)
    os.system('salt-key -yd ' + name)
コード例 #4
0
def retire_lcs(name,
               ip,
               cfgcache=util.Cache(timeout=60*60,
                                   update_fn=lambda: redis_shell.hgetall('cfgbysrv'))):
    if name.startswith('fp-jp-'):
        dc = 'vltok1'
    elif name.startswith('fp-nl-'):
        dc = 'doams3'
    else:
        assert False
    srvs = [srv
            for srv, cfg in cfgcache.get().iteritems()
            if yaml.load(cfg).values()[0]['addr'].split(':')[0] == ip]
    if srvs:
        redis_shell.hdel('cfgbysrv', *srvs)
        redis_shell.incr('srvcount')
    else:
        "No configs left to delete for %s." % name
    redis_shell.lrem(dc + ':vpss', name)
    redis_shell.incr(dc + ':vpss:version')
コード例 #5
0
def retire_lcs(name,
               ip,
               byip=util.Cache(timeout=60 * 60, update_fn=srv_cfg_by_ip)):
    cm = cm_by_name(name)
    region = region_by_name(name)
    srvs = byip.get().get(ip, (None, []))[1]
    txn = redis_shell.pipeline()
    if srvs:
        scores = [redis_shell.zscore(region + ':slices', srv) for srv in srvs]
        pairs = {"<empty:%s>" % int(score): score for score in scores if score}
        if pairs:
            txn.zadd(region + ":slices", **pairs)
            txn.zrem(region + ":slices", *srvs)
        txn.hdel('srv->cfg', *srvs)
        txn.incr('srvcount')
    else:
        print "No configs left to delete for %s." % name
    # Check whether this server is in the queue (because of recycling).
    for cfg in redis_shell.lrange(region + ':srvq', 0, -1):
        if cfg.split('|')[0] == ip:
            txn.lrem(region + ':srvq', cfg)
    txn.lrem(cm + ':vpss', name)
    txn.incr(cm + ':vpss:version')
    txn.execute()
コード例 #6
0
    if not vps_util.highstate_pid(name):
        print("Highstate not running yet; waiting for a bit just in case...")
        time.sleep(10)
    while vps_util.highstate_pid(name):
        print("Highstate still running...")
        time.sleep(10)
    print("Highstate done!")
    return vps_util.hammer_the_damn_thing_until_it_proxies(
        name, ssh_tmpl % ip, fetchaccessdata_tmpl % ip)


def droplets_by_name():
    return {d.name: d for d in do.get_all_droplets()}


dbn_cache = misc_util.Cache(timeout=60 * 60, update_fn=droplets_by_name)


def destroy_vps(name):
    try:
        droplet = dbn_cache.get()[name]
        # We use the DO API directly and not salt-cloud here because the latter
        # takes forever and generates lots of API requests, which may make us run
        # out of our per-hour quota in busy times.
        requests.delete('https://api.digitalocean.com/v2/droplets/%s' %
                        droplet.id,
                        headers={"Authorization": "Bearer " + do_token})
    except KeyError:
        print >> sys.stderr, "Droplet not found:", name
    os.system('salt-key -yd' + name)