def actually_retire_proxy(name, ip, pipeline=None): """ While retire_proxy just enqueues the proxy for retirement, this actually updates the redis tables. """ name, ip, srv = nameipsrv(name=name, ip=ip) cm = cm_by_name(name) region = region_by_name(name) txn = pipeline or redis_shell.pipeline() if srv: actually_close_proxy(name, ip, srv, txn) txn.hdel('srv->cfg', srv) txn.hdel('server->config', name) txn.hdel('srv->name', srv) txn.hdel('srv->srvip', srv) txn.hdel('name->srv', name) txn.hdel('srvip->srv', ip) # For debugging purposes; we can delete these anytime if they're a # space problem. txn.hset('history:srv->name', srv, name) txn.hset('history:name->srv', name, srv) txn.hset('history:srv->srvip', srv, ip) # An IP may be used by multiple servers through history. txn.rpush('history:srvip->srv:%s' % ip, srv) txn.incr('srvcount') else: print "No configs left to delete for %s." % name # Check whether this server is in the queue (because of recycling). for cfg in redis_shell.lrange(region + ':srvq', 0, -1): if cfg.split('|')[0] == ip: txn.lrem(region + ':srvq', cfg) txn.lrem(cm + ':vpss', name) txn.incr(cm + ':vpss:version') if txn is not pipeline: txn.execute()
def retire_proxy(name=None, ip=None, srv=None, reason='failed checkfallbacks', pipeline=None, offload=False): name, ip, srv = nameipsrv(name, ip, srv) region = region_by_name(name) if redis_shell.sismember(region + ':fallbacks', srv): print >> sys.stderr, "I'm *not retiring* %s (%s) because it is a fallback server for region '%s'." % (name, ip, region) print >> sys.stderr, "Please remove it as a fallback first." return if redis_shell.sismember(region + ':honeypots', srv): print >> sys.stderr, "I'm *not retiring* %s (%s) because it is a honeypot server for region '%s'." % (name, ip, region) print >> sys.stderr, "Please remove it as a honeypot first." return p = pipeline or redis_shell.pipeline() if offload: qname = '%s:offloadq' % region_by_name(name) else: qname = '%s:retireq' % cm_by_name(name) p.rpush(qname, '%s|%s' % (name, ip)) log2redis({'op': 'retire', 'name': name, 'ip': ip, 'srv': srv, 'reason': reason}, pipeline=p) if not pipeline: p.execute()
def retire_proxy(name=None, ip=None, srv=None, reason='failed checkfallbacks', pipeline=None, offload=False): name, ip, srv = nameipsrv(name, ip, srv) region = region_by_name(name) if redis_shell.sismember(region + ':fallbacks', srv): print >> sys.stderr, "I'm *not retiring* %s (%s) because it is a fallback server for region '%s'." % ( name, ip, region) print >> sys.stderr, "Please remove it as a fallback first." return if redis_shell.sismember(region + ':honeypots', srv): print >> sys.stderr, "I'm *not retiring* %s (%s) because it is a honeypot server for region '%s'." % ( name, ip, region) print >> sys.stderr, "Please remove it as a honeypot first." return p = pipeline or redis_shell.pipeline() if offload: qname = '%s:offloadq' % region_by_name(name) else: qname = '%s:retireq' % cm_by_name(name) p.rpush(qname, '%s|%s' % (name, ip)) log2redis( { 'op': 'retire', 'name': name, 'ip': ip, 'srv': srv, 'reason': reason }, pipeline=p) if not pipeline: p.execute()
def actually_retire_proxy(name, ip, srv=None, pipeline=None): """ While retire_proxy just enqueues the proxy for retirement, this actually updates the redis tables. """ name, ip, srv = nameipsrv(name=name, ip=ip, srv=srv) cm = cm_by_name(name) region = region_by_name(name) txn = pipeline or redis_shell.pipeline() if srv: actually_close_proxy(name, ip, srv, txn) txn.hdel('srv->cfg', srv) txn.hdel('server->config', name) txn.hdel('srv->name', srv) txn.hdel('srv->srvip', srv) txn.hdel('name->srv', name) txn.hdel('srvip->srv', ip) # For debugging purposes; we can delete these anytime if they're a # space problem. txn.hset('history:srv->name', srv, name) txn.hset('history:name->srv', name, srv) txn.hset('history:srv->srvip', srv, ip) # An IP may be used by multiple servers through history. txn.rpush('history:srvip->srv:%s' % ip, srv) txn.incr('srvcount') else: print "No configs left to delete for %s." % name # Check whether this server is in the queue (because of recycling). for cfg in redis_shell.lrange(region + ':srvq', 0, -1): if cfg.split('|')[0] == ip: txn.lrem(region + ':srvq', cfg) txn.lrem(cm + ':vpss', name) txn.incr(cm + ':vpss:version') if txn is not pipeline: txn.execute()
def proxy_status(name=None, ip=None, srv=None): name, _, srv = nameipsrv(name, ip, srv) if srv is None: return 'baked-in' elif redis_shell.zscore(region_by_name(name) + ':slices', srv) is None: return 'closed' else: return 'open'
def proxy_status(name=None, ip=None, srv=None): name, _, srv = nameipsrv(name, ip, srv) if srv is None: if name is not None: region = region_by_name(name) for qentry in redis_shell.lrange(region + ':srvq', 0, -1): if qentry.split('|')[1] == name: return 'enqueued' return 'baked-in' elif redis_shell.zscore(region_by_name(name) + ':slices', srv) is None: return 'closed' else: return 'open'
def actually_close_proxy(name=None, ip=None, srv=None, pipeline=None): name, ip, srv = nameipsrv(name, ip, srv) region = region_by_name(name) slices_key = region + ':slices' def remove_if_there(k): score = redis_shell.zscore(slices_key, k) if score is None: return False else: txn.zrem(slices_key, k) txn.zadd(slices_key, "<empty:%s>" % int(score), score) return True txn = pipeline or redis_shell.pipeline() remove_if_there(srv) c = it.count() while remove_if_there('%s|%s' % (srv, c.next())): pass if txn is not pipeline: txn.execute()
def actually_offload_proxy(name=None, ip=None, srv=None, pipeline=None): name, ip, srv = nameipsrv(name, ip, srv) region = region_by_name(name) client_table_key = region + ':clientip->srv' packed_srv = redis_util.pack_srv(srv) #XXX: a proxy -> {clients} index is sorely needed! # Getting the set of clients assigned to this proxy takes a long time # currently. Let's get it done before pulling the replacement proxy, # so we're less likely to be left with an empty proxy if interrupted. clients = set(pip for pip, psrv in redis_shell.hgetall(client_table_key).iteritems() if psrv == packed_srv) dest = pull_from_srvq(region) # It's still possible that we'll crash or get rebooted here, so the # destination server will be left empty. The next closed proxy compaction # job will find this proxy and assign some users to it or mark it for # retirement. dest_psrv = redis_util.pack_srv(dest.srv) redis_shell.hmset(client_table_key, {pip: dest_psrv for pip in clients}) print "Offloaded clients from %s (%s) to %s (%s)" % (name, ip, dest.name, dest.ip)
def actually_offload_proxy(name=None, ip=None, srv=None, pipeline=None): name, ip, srv = nameipsrv(name, ip, srv) region = region_by_name(name) client_table_key = region + ':clientip->srv' packed_srv = redis_util.pack_srv(srv) #XXX: a proxy -> {clients} index is sorely needed! # Getting the set of clients assigned to this proxy takes a long time # currently. Let's get it done before pulling the replacement proxy, # so we're less likely to be left with an empty proxy if interrupted. clients = set( pip for pip, psrv in redis_shell.hgetall(client_table_key).iteritems() if psrv == packed_srv) dest = pull_from_srvq(region) # It's still possible that we'll crash or get rebooted here, so the # destination server will be left empty. The next closed proxy compaction # job will find this proxy and assign some users to it or mark it for # retirement. dest_psrv = redis_util.pack_srv(dest.srv) redis_shell.hmset(client_table_key, {pip: dest_psrv for pip in clients}) print "Offloaded clients from %s (%s) to %s (%s)" % (name, ip, dest.name, dest.ip)
def assign_clientip_to_srv(clientip, srvname=None, srvip=None, srv=None): nis = redis_util.nameipsrv(srvname, srvip, srv) region = region_by_name(nis.name) redis_shell.hset(region + ':clientip->srv', redis_util.pack_ip(clientip), redis_util.pack_srv(nis.srv))