def get_lcs_name(): date = vps_util.todaystr() if redis_shell.get(CM + ':lcsserial:date') == date: serial = redis_shell.incr(CM + ':lcsserial') else: pipe = redis_shell.pipeline() pipe.set(CM + ':lcsserial:date', date) pipe.set(CM + ':lcsserial', 1) pipe.execute() serial = 1 return 'fp-%s-%s-%03d' % (CM, date, serial)
def retire_lcs(name, ip, cfgcache=util.Cache(timeout=60*60, update_fn=lambda: redis_shell.hgetall('cfgbysrv'))): if name.startswith('fp-jp-'): dc = 'vltok1' elif name.startswith('fp-nl-'): dc = 'doams3' else: assert False srvs = [srv for srv, cfg in cfgcache.get().iteritems() if yaml.load(cfg).values()[0]['addr'].split(':')[0] == ip] if srvs: redis_shell.hdel('cfgbysrv', *srvs) redis_shell.incr('srvcount') else: "No configs left to delete for %s." % name redis_shell.lrem(dc + ':vpss', name) redis_shell.incr(dc + ':vpss:version')
def get_lcs_name(req): date = vps_util.todaystr() if redis_shell.get(CM + ':lcsserial:date') == date: serial = redis_shell.incr(CM + ':lcsserial') else: pipe = redis_shell.pipeline() pipe.set(CM + ':lcsserial:date', date) pipe.set(CM + ':lcsserial', 1) pipe.execute() serial = 1 type_prefix = 'obfs4' if 'obfs4_port' in req else 'https' return 'fp-%s-%s-%s-%03d' % (type_prefix, CM, date, serial)
def pull_from_srvq(prefix, refill=True): x = redis_shell.rpop(prefix + ':srvq') if x is None: raise RuntimeError("No servers to pull from the %s queue" % prefix) ip, name, cfg = x.split('|') srv = redis_shell.incr('srvcount') p = redis_shell.pipeline() if refill: p.lpush(prefix + ':srvreqq', srv) p.hset('server->config', name, cfg) p.hset('srv->cfg', srv, cfg) p.hset('srv->name', srv, name) p.hset('name->srv', name, srv) p.hset('srvip->srv', ip, srv) p.hset('srv->srvip', srv, ip) p.execute() return redis_util.nis(name, ip, srv)
def get_lcs_name(dc, redis_shell): if dc.startswith('vltok'): country = 'jp' elif dc.startswith('doams'): country = 'nl' else: assert False now = datetime.utcnow() date = "%d%02d%02d" % (now.year, now.month, now.day) if redis_shell.get(dc + ':lcsserial:date') == date: serial = redis_shell.incr(dc + ':lcsserial') else: pipe = redis_shell.pipeline() pipe.set(dc + ':lcsserial:date', date) pipe.set(dc + ':lcsserial', 1) pipe.execute() serial = 1 return 'fp-%s-%s-%03d' % (country, date, serial)
def register_vps(redis_shell, dc, name): print "Registering VPS", name redis_shell.rpush(dc + ':vpss', name) redis_shell.incr(dc + ':vpss:version')
def run(): qname = QPREFIX + ":srvreqq" print "Serving queue", qname, ", MAXPROCS:", repr(MAXPROCS) quarantine = CM + ":quarantined_vpss" reqq = redisq.Queue(qname, redis_shell, LAUNCH_TIMEOUT) procq = multiprocessing.Queue() pending = {} def kill_task(reqid): print "Killing timed out process and vps..." task = pending.pop(reqid) task['proc'].terminate() proc = multiprocessing.Process(target=vps_shell.destroy_vps, args=(task['name'],)) proc.daemon = True proc.start() while True: # If the request queue is totally empty (no tasks enqueued or even in # progress), flush the quarantine queue into the destroy queue. if redis_shell.llen(qname) == 1: # 1 for the redisq sentinel entry names = redis_shell.smembers(quarantine) if names: print "Flushing %s VPSs from quarantine." % len(names) p = redis_shell.pipeline() p.srem(quarantine, *names) p.lpush(CM + ":destroyq", *names) p.execute() while not procq.empty(): try: result = procq.get(False) print "Got result:", result task = pending.get(result['reqid']) if task and task['name'] == result['name']: p = redis_shell.pipeline() if result['blocked']: print "Quarantining %(name)s (%(ip)s)." % result p.sadd(quarantine, result['name']) p.incr(CM + ":blocked_vps_count") # stats # We'll remove the original request anyway because we # don't want it to stay around until timeout. Insert a # new one to replace it instead. reqid = redis_shell.incr('srvcount') p.lpush(qname, reqid) else: p.incr(CM + ":unblocked_vps_count") # stats del pending[result['reqid']] vps_util.enqueue_cfg(result['name'], result['access_data'], result['srvq']) register_vps(task['name']) task['remove_req'](p) p.execute() except Empty: print "Wat?" break if len(pending) < MAXPROCS: req_string, remover = reqq.next_job() if req_string: print "Got request", req_string req = json.loads(req_string) if isinstance(req, int): # Transition: support the old format while we are updating # the config server etc. req = {'id': req, 'srvq': QPREFIX + ':srvq'} req_string = json.dumps(req) reqid = req['id'] if reqid in pending: print "Killing task %s because of queue timeout" % reqid kill_task(reqid) name = new_proxy_name(req) proc = multiprocessing.Process(target=launch_one_server, args=(procq, reqid, name, req_string)) proc.daemon = True pending[reqid] = { 'name': name, 'proc': proc, 'starttime': time.time(), 'remove_req': remover} print "Starting process to launch", name proc.start() else: # Since we're not checking the queue when we've maxed out our # processes, we need to manually check for expired tasks. for reqid, d in pending.items(): if time.time() - d['starttime'] > LAUNCH_TIMEOUT: print "Killing task %s because of local timeout" % reqid kill_task(reqid) time.sleep(10)
def register_vps(name): print "Registering VPS", name redis_shell.rpush(CM + ':vpss', name) redis_shell.incr(CM + ':vpss:version')