def get_user(user=None, user_id=None): '''Get a user from the DataStore using a User object or a user ID''' id = user.user_id() if user else user_id if id == 'us': return False else: e = mget(key=id, namespace='profile_data') if e is None: try: q = User.all().filter('id', id).fetch(1) e = q[0] except: u = User( id=id, user=user, nickname=user.nickname(), ) e = db.get(u.put()) if e.bio is None or e.shared is None: if e.bio is None: m = User_Bio().put() e.bio = m if e.shared is None: p = User_Permissions().put() e.shared = p e = db.get(db.put(e)) mset(key=id, value=e, time=10, namespace='profile_data') return e
def get_gravatar(email): '''Generates a gravatar image url for the passed email address''' url = mget(key=email, namespace='gravatars') if url is not None: return url else: url = ''.join([ 'https://secure.gravatar.com/avatar/', md5('*****@*****.**' if email.lower() == 'us' else email.lower()).hexdigest(), '?', urlencode({'s': '150', 'd': 'retro'}), ]) mset(key=email, value=url, namespace='gravatars') return url
def data(**kwargs): '''Makes sure that certain pieces of information are always sent to the template engine along with the information supplied by the different handlers. ''' data = { 'site': web.ctx.homedomain, 'user': {}, } data.update(kwargs) user = users.get_current_user() if user: data['log_in_out'] = users.create_logout_url('/') data['logged_in'] = True data['user']['id'] = user.user_id() try: nickname = mget(key=user.user_id(), namespace='usernames') if nickname is None: q = User.all().filter('id', user.user_id()).fetch(1) nickname = q[0].nickname if not mset(key=user.user_id(), value=nickname, time=10, namespace='usernames'): logging.error('Could not set memcache value!') data['user']['nickname'] = nickname except: data['user']['nickname'] = user.nickname() try: data['gravatar'] = get_gravatar(get_user(user_id=data['user_id'] or 'us').user.email()) except KeyError: data['gravatar'] = get_gravatar('us') else: data['log_in_out'] = users.create_login_url('/') data['gravatar'] = get_gravatar('*****@*****.**') return data
def real_main(): args = parse_qs(environ['QUERY_STRING']) if not args: print "Status: 301 Moved Permanantly\nLocation: /\n\n", return for a in ('info_hash', 'port'): if a not in args or len(args[a]) != 1: if ERRORS: resps(bencode({'failure reason': "You must provide %s!" % a})) return key = args['info_hash'][0] if STATS: key_complete = '%s!complete' % key key_incomplete = '%s!incomplete' % key left = args.pop('left', [None])[0] err = None if (len(key) > 128): err = "Insanely long key!" else: try: port = int(args['port'][0]) if port > 65535 or port < 1: err = "Invalid port number!" except: err = "Invalid port number!" if err: if ERRORS: resps(bencode({'failure reason': err})) return # Crop raises chance of a clash, plausible deniability for the win! #phash = md5("%s/%d" % (ip, port)).hexdigest()[:16] # XXX TODO Instead of a hash, we should use the packed ip+port i = environ['REMOTE_ADDR'].split( '.') # TODO Check that it is an v4 address phash = pack('>4BH', int(i[0]), int(i[1]), int(i[2]), int(i[3]), port) # TODO BT: If left=0, the download is done and we should not return any peers. event = args.pop('event', [None])[0] if event == 'stopped': # Maybe we should only remove it from this track, but this is good enough. mdel(phash, namespace='P') if STATS: # XXX Danger of incomplete underflow! if left == '0': decr(key_complete, namespace='S') else: decr(key_incomplete, namespace='S') return # They are going away, don't waste bw/cpu on this. resps(bencode({'interval': INTERVAL, 'peers': []})) elif STATS and event == 'completed': decr(key_incomplete, namespace='S') incr(key_complete, namespace='S') updatetrack = False # Get existing peers PEER_SIZE = 6 MAX_PEERS = 32 MAX_PEERS_SIZE = MAX_PEERS * PEER_SIZE a = get(key, namespace='T') # TODO: perhaps we should use the array module: http://docs.python.org/library/array.html if a: als = [a[x:x + PEER_SIZE] for x in xrange(0, l, PEER_SIZE)] l = len(als) if l > MAX_PEERS: i = randrange(0, l - MAX_PEERS) ii = i * PEER_SIZE rs = a[ii:ii + MAX_PEERS_SIZE] rls = als[i:i + MAX_PEERS] else: rs = a rls = als rrls = get_multi(rls, namespace='P').keys() # NOTE Do not use a generator, generators are always true even if empty! lostpeers = [p for p in rls if p not in rrls] if lostpeers: # Remove lost peers rs = ''.join(rrls) [als.remove(p) for p in lostpeers if p in als] a = ''.join(als) updatetrack = True if STATS: # XXX medecau suggests we might use len(s) instead of counting leechers. # XXX If we underflow, should decrement from '!complete' decr(key_incomplete, len(lostpeers), namespace='S') # Remove self from returned peers # XXX Commented out as we are shorter on CPU than bw #if phash in peers: # peers.pop(phash, None) # New track! else: a = rs = '' als = [] if STATS: mset(key_complete, '0', namespace='S') mset(key_incomplete, '0', namespace='S') if phash not in als: # Assume new peer # XXX We don't refresh the peers expiration date on every request! mset(phash, 1, namespace='P') a += phash updatetrack = True if STATS: # Should we bother to check event == 'started'? Why? if left == '0': incr(key_complete, namespace='S') else: incr(key_incomplete, namespace='S') if updatetrack: mset(key, a, namespace='K') if STATS: resps( bencode({ 'interval': INTERVAL, 'peers': rs, 'complete': (get(key_complete, namespace='S') or 0), 'incomplete': (get(key_incomplete, namespace='S') or 0) })) else: resps(bencode({'interval': INTERVAL, 'peers': rs}))
def real_main(): args = parse_qs(environ['QUERY_STRING']) if not args: print "Status: 301 Moved Permanantly\nLocation: /\n\n", return for a in ('info_hash', 'port'): if a not in args or len(args[a]) != 1: if ERRORS: resps(bencode({'failure reason': "You must provide %s!"%a})) return key = args['info_hash'][0] if STATS: key_complete = '%s!complete'%key key_incomplete = '%s!incomplete'%key left = args.pop('left', [None])[0] err = None if(len(key) > 128): err = "Insanely long key!" else: try: port = int(args['port'][0]) if port > 65535 or port < 1: err = "Invalid port number!" except: err = "Invalid port number!" if err: if ERRORS: resps(bencode({'failure reason': err})) return # Crop raises chance of a clash, plausible deniability for the win! #phash = md5("%s/%d" % (ip, port)).hexdigest()[:16] # XXX TODO Instead of a hash, we should use the packed ip+port i = environ['REMOTE_ADDR'].split('.') # TODO Check that it is an v4 address phash = pack('>4BH', int(i[0]), int(i[1]), int(i[2]), int(i[3]), port) # TODO BT: If left=0, the download is done and we should not return any peers. event = args.pop('event', [None])[0] if event == 'stopped': # Maybe we should only remove it from this track, but this is good enough. mdel(phash, namespace='P') if STATS: # XXX Danger of incomplete underflow! if left == '0': decr(key_complete, namespace='S') else: decr(key_incomplete, namespace='S') return # They are going away, don't waste bw/cpu on this. resps(bencode({'interval': INTERVAL, 'peers': []})) elif STATS and event == 'completed': decr(key_incomplete, namespace='S') incr(key_complete, namespace='S') updatetrack = False # Get existing peers PEER_SIZE = 6 MAX_PEERS = 32 MAX_PEERS_SIZE = MAX_PEERS*PEER_SIZE a = get(key, namespace='T') # TODO: perhaps we should use the array module: http://docs.python.org/library/array.html if a: als = [a[x:x+PEER_SIZE] for x in xrange(0, l, PEER_SIZE)] l = len(als) if l > MAX_PEERS: i = randrange(0, l-MAX_PEERS) ii = i*PEER_SIZE rs = a[ii:ii+MAX_PEERS_SIZE] rls = als[i:i+MAX_PEERS] else: rs = a rls = als rrls = get_multi(rls, namespace='P').keys() # NOTE Do not use a generator, generators are always true even if empty! lostpeers = [p for p in rls if p not in rrls] if lostpeers: # Remove lost peers rs = ''.join(rrls) [als.remove(p) for p in lostpeers if p in als] a = ''.join(als) updatetrack = True if STATS: # XXX medecau suggests we might use len(s) instead of counting leechers. # XXX If we underflow, should decrement from '!complete' decr(key_incomplete, len(lostpeers), namespace='S') # Remove self from returned peers # XXX Commented out as we are shorter on CPU than bw #if phash in peers: # peers.pop(phash, None) # New track! else: a = rs = '' als = [] if STATS: mset(key_complete, '0', namespace='S') mset(key_incomplete, '0', namespace='S') if phash not in als: # Assume new peer # XXX We don't refresh the peers expiration date on every request! mset(phash, 1, namespace='P') a += phash updatetrack = True if STATS: # Should we bother to check event == 'started'? Why? if left == '0': incr(key_complete, namespace='S') else: incr(key_incomplete, namespace='S') if updatetrack: mset(key, a, namespace='K') if STATS: resps(bencode({'interval':INTERVAL, 'peers':rs, 'complete':(get(key_complete, namespace='S') or 0), 'incomplete':(get(key_incomplete, namespace='S') or 0)})) else: resps(bencode({'interval':INTERVAL, 'peers':rs}))
def real_main(): args = parse_qs(environ['QUERY_STRING']) if not args: print "Status: 301 Moved Permanantly\nLocation: /\n\n", return for a in ('info_hash', 'port'): if a not in args or len(args[a]) != 1: if ERRORS: resps(bencode({'failure reason': "You must provide %s!"%a})) return ip = environ['REMOTE_ADDR'] key = args['info_hash'][0] if STATS: key_complete = '%s!complete'%key key_incomplete = '%s!incomplete'%key left = args.pop('left', [None])[0] err = None if(len(key) > 128): err = "Insanely long key!" else: try: port = int(args['port'][0]) if port > 65535 or port < 1: err = "Invalid port number!" except: err = "Invalid port number!" if err: if ERRORS: resps(bencode({'failure reason': err})) return # Crop raises chance of a clash, plausible deniability for the win! phash = md5("%s/%d" % (ip, port)).hexdigest()[:16] # XXX TODO Instead of a hash, we should use the packed ip+port # TODO BT: If left=0, the download is done and we should not return any peers. event = args.pop('event', [None])[0] if event == 'stopped': # Maybe we should only remove it from this track, but this is good enough. mdel(phash, namespace='P') if STATS: # XXX Danger of incomplete underflow! if left == '0': decr(key_complete, namespace='S') else: decr(key_incomplete, namespace='S') return # They are going away, don't waste bw/cpu on this. resps(bencode({'interval': INTERVAL, 'peers': []})) elif STATS and event == 'completed': decr(key_incomplete, namespace='S') incr(key_complete, namespace='S') updatetrack = False # Get existing peers r = get(key, namespace='K') if r: s = r.split('|') if len(s) > 32: ks = sample(s, 32) else: ks = s peers = get_multi(ks, namespace='P') # NOTE Do not use a generator, generators are always true even if empty! lostpeers = [p for p in ks if p not in peers] if lostpeers: # Remove lost peers s = [k for k in s if k not in lostpeers] updatetrack = True if STATS: # XXX medecau suggests we might use len(s) instead of counting leechers. # XXX If we underflow, should decrement from '!complete' decr(key_incomplete, len(lostpeers), namespace='S') # Remove self from returned peers # XXX Commented out as we are shorter on CPU than bw #if phash in peers: # peers.pop(phash, None) # New track! else: s = [] peers = {} if STATS: mset(key_complete, '0', namespace='S') mset(key_incomplete, '0', namespace='S') if phash not in s: # Assume new peer # XXX We don't refresh the peers expiration date on every request! #mset(phash, '|'.join((ip, str(port))), namespace='I') i = ip.split('.') mset(phash, pack('>4BH', int(i[0]), int(i[1]), int(i[2]), int(i[3]), port), namespace='P') s.append(phash) updatetrack = True if STATS: # Should we bother to check event == 'started'? Why? if left == '0': incr(key_complete, namespace='S') else: incr(key_incomplete, namespace='S') if updatetrack: mset(key, '|'.join(s), namespace='K') #ps = dict((k, peers[k].split('|')) for k in peers) #pl = [{'ip': ps[h][0], 'port': ps[h][1]} for h in ps] cpl = ''.join(peers.values()) if STATS: resps(bencode({'interval':INTERVAL, 'peers':cpl, 'complete':(get(key_complete, namespace='S') or 0), 'incomplete':(get(key_incomplete, namespace='S') or 0)})) else: resps(bencode({'interval':INTERVAL, 'peers':cpl}))