def POST(self): user = users.get_current_user() d = web.input( first_name=False, middle_name=False, last_name=False, city=False, state=False, postal_code=False, country=False, bio=False, ) f = profile_form( first_name=d.first_name, middle_name=d.middle_name, last_name=d.last_name, city=d.city, state=d.state, postal_code=d.postal_code, country=d.country, bio=d.bio, ) if not f.validate(): return t.render( util.data( form=f, title="Preferences", instructions="Please indicate which items you wish to make public." ) ) else: prefs = [i.name for i in f if i.data] e = util.get_user(user=user) e.shared.public = prefs e.shared.put() mdel(key=user.user_id(), namespace="profile_data") raise web.seeother("/preferences")
def POST(self): f = submit_form() if not f.validates(): return env.get_template('submit.html').render(form=f.render()) else: episode = Episode( title=f.d.title, number=int(f.d.number), href=db.Link(f.d.href), thumb=db.Link(f.d.thumb if f.d.thumb else BLANK), writeup=markdown(f.d.writeup), ) episode.put() mdel(key='4_posts', namespace='posts') mdel(key='10_posts', namespace='posts') raise web.seeother(web.ctx.homedomain)
def POST(self): user = users.get_current_user() d = web.input() f = profile_form( nickname=d.nickname, first_name=d.first_name, middle_name=d.middle_name, last_name=d.last_name, city=d.city, state=d.state, postal_code=d.postal_code, country=d.country, bio=d.bio, ) if not f.validate(): return t.render( util.data( form=f, title="Edit Profile", instructions="""Please enter whatever information you feel comfortable sharing. (Please note that your information is not shared.public until you grant us permission to share it in your Preferences)""", ) ) else: e = util.get_user(user=user) if e.nickname: e.nickname = f.nickname.data db.put(e) e.bio.first_name = f.first_name.data or "" e.bio.middle_name = f.middle_name.data or "" e.bio.last_name = f.last_name.data or "" e.bio.city = f.city.data or "" e.bio.state = f.state.data or "" e.bio.postal_code = f.postal_code.data or "" e.bio.country = f.country.data or "" e.bio.bio = f.bio.data or "" e.bio.put() mdel(key=user.user_id(), namespace="profile_data") raise web.seeother("/profile")
def real_main(): args = parse_qs(environ['QUERY_STRING']) if not args: print "Status: 301 Moved Permanantly\nLocation: /\n\n", return for a in ('info_hash', 'port'): if a not in args or len(args[a]) != 1: if ERRORS: resps(bencode({'failure reason': "You must provide %s!" % a})) return key = args['info_hash'][0] if STATS: key_complete = '%s!complete' % key key_incomplete = '%s!incomplete' % key left = args.pop('left', [None])[0] err = None if (len(key) > 128): err = "Insanely long key!" else: try: port = int(args['port'][0]) if port > 65535 or port < 1: err = "Invalid port number!" except: err = "Invalid port number!" if err: if ERRORS: resps(bencode({'failure reason': err})) return # Crop raises chance of a clash, plausible deniability for the win! #phash = md5("%s/%d" % (ip, port)).hexdigest()[:16] # XXX TODO Instead of a hash, we should use the packed ip+port i = environ['REMOTE_ADDR'].split( '.') # TODO Check that it is an v4 address phash = pack('>4BH', int(i[0]), int(i[1]), int(i[2]), int(i[3]), port) # TODO BT: If left=0, the download is done and we should not return any peers. event = args.pop('event', [None])[0] if event == 'stopped': # Maybe we should only remove it from this track, but this is good enough. mdel(phash, namespace='P') if STATS: # XXX Danger of incomplete underflow! if left == '0': decr(key_complete, namespace='S') else: decr(key_incomplete, namespace='S') return # They are going away, don't waste bw/cpu on this. resps(bencode({'interval': INTERVAL, 'peers': []})) elif STATS and event == 'completed': decr(key_incomplete, namespace='S') incr(key_complete, namespace='S') updatetrack = False # Get existing peers PEER_SIZE = 6 MAX_PEERS = 32 MAX_PEERS_SIZE = MAX_PEERS * PEER_SIZE a = get(key, namespace='T') # TODO: perhaps we should use the array module: http://docs.python.org/library/array.html if a: als = [a[x:x + PEER_SIZE] for x in xrange(0, l, PEER_SIZE)] l = len(als) if l > MAX_PEERS: i = randrange(0, l - MAX_PEERS) ii = i * PEER_SIZE rs = a[ii:ii + MAX_PEERS_SIZE] rls = als[i:i + MAX_PEERS] else: rs = a rls = als rrls = get_multi(rls, namespace='P').keys() # NOTE Do not use a generator, generators are always true even if empty! lostpeers = [p for p in rls if p not in rrls] if lostpeers: # Remove lost peers rs = ''.join(rrls) [als.remove(p) for p in lostpeers if p in als] a = ''.join(als) updatetrack = True if STATS: # XXX medecau suggests we might use len(s) instead of counting leechers. # XXX If we underflow, should decrement from '!complete' decr(key_incomplete, len(lostpeers), namespace='S') # Remove self from returned peers # XXX Commented out as we are shorter on CPU than bw #if phash in peers: # peers.pop(phash, None) # New track! else: a = rs = '' als = [] if STATS: mset(key_complete, '0', namespace='S') mset(key_incomplete, '0', namespace='S') if phash not in als: # Assume new peer # XXX We don't refresh the peers expiration date on every request! mset(phash, 1, namespace='P') a += phash updatetrack = True if STATS: # Should we bother to check event == 'started'? Why? if left == '0': incr(key_complete, namespace='S') else: incr(key_incomplete, namespace='S') if updatetrack: mset(key, a, namespace='K') if STATS: resps( bencode({ 'interval': INTERVAL, 'peers': rs, 'complete': (get(key_complete, namespace='S') or 0), 'incomplete': (get(key_incomplete, namespace='S') or 0) })) else: resps(bencode({'interval': INTERVAL, 'peers': rs}))
def real_main(): args = parse_qs(environ['QUERY_STRING']) if not args: print "Status: 301 Moved Permanantly\nLocation: /\n\n", return for a in ('info_hash', 'port'): if a not in args or len(args[a]) != 1: if ERRORS: resps(bencode({'failure reason': "You must provide %s!"%a})) return key = args['info_hash'][0] if STATS: key_complete = '%s!complete'%key key_incomplete = '%s!incomplete'%key left = args.pop('left', [None])[0] err = None if(len(key) > 128): err = "Insanely long key!" else: try: port = int(args['port'][0]) if port > 65535 or port < 1: err = "Invalid port number!" except: err = "Invalid port number!" if err: if ERRORS: resps(bencode({'failure reason': err})) return # Crop raises chance of a clash, plausible deniability for the win! #phash = md5("%s/%d" % (ip, port)).hexdigest()[:16] # XXX TODO Instead of a hash, we should use the packed ip+port i = environ['REMOTE_ADDR'].split('.') # TODO Check that it is an v4 address phash = pack('>4BH', int(i[0]), int(i[1]), int(i[2]), int(i[3]), port) # TODO BT: If left=0, the download is done and we should not return any peers. event = args.pop('event', [None])[0] if event == 'stopped': # Maybe we should only remove it from this track, but this is good enough. mdel(phash, namespace='P') if STATS: # XXX Danger of incomplete underflow! if left == '0': decr(key_complete, namespace='S') else: decr(key_incomplete, namespace='S') return # They are going away, don't waste bw/cpu on this. resps(bencode({'interval': INTERVAL, 'peers': []})) elif STATS and event == 'completed': decr(key_incomplete, namespace='S') incr(key_complete, namespace='S') updatetrack = False # Get existing peers PEER_SIZE = 6 MAX_PEERS = 32 MAX_PEERS_SIZE = MAX_PEERS*PEER_SIZE a = get(key, namespace='T') # TODO: perhaps we should use the array module: http://docs.python.org/library/array.html if a: als = [a[x:x+PEER_SIZE] for x in xrange(0, l, PEER_SIZE)] l = len(als) if l > MAX_PEERS: i = randrange(0, l-MAX_PEERS) ii = i*PEER_SIZE rs = a[ii:ii+MAX_PEERS_SIZE] rls = als[i:i+MAX_PEERS] else: rs = a rls = als rrls = get_multi(rls, namespace='P').keys() # NOTE Do not use a generator, generators are always true even if empty! lostpeers = [p for p in rls if p not in rrls] if lostpeers: # Remove lost peers rs = ''.join(rrls) [als.remove(p) for p in lostpeers if p in als] a = ''.join(als) updatetrack = True if STATS: # XXX medecau suggests we might use len(s) instead of counting leechers. # XXX If we underflow, should decrement from '!complete' decr(key_incomplete, len(lostpeers), namespace='S') # Remove self from returned peers # XXX Commented out as we are shorter on CPU than bw #if phash in peers: # peers.pop(phash, None) # New track! else: a = rs = '' als = [] if STATS: mset(key_complete, '0', namespace='S') mset(key_incomplete, '0', namespace='S') if phash not in als: # Assume new peer # XXX We don't refresh the peers expiration date on every request! mset(phash, 1, namespace='P') a += phash updatetrack = True if STATS: # Should we bother to check event == 'started'? Why? if left == '0': incr(key_complete, namespace='S') else: incr(key_incomplete, namespace='S') if updatetrack: mset(key, a, namespace='K') if STATS: resps(bencode({'interval':INTERVAL, 'peers':rs, 'complete':(get(key_complete, namespace='S') or 0), 'incomplete':(get(key_incomplete, namespace='S') or 0)})) else: resps(bencode({'interval':INTERVAL, 'peers':rs}))
def real_main(): args = parse_qs(environ['QUERY_STRING']) if not args: print "Status: 301 Moved Permanantly\nLocation: /\n\n", return for a in ('info_hash', 'port'): if a not in args or len(args[a]) != 1: if ERRORS: resps(bencode({'failure reason': "You must provide %s!"%a})) return ip = environ['REMOTE_ADDR'] key = args['info_hash'][0] if STATS: key_complete = '%s!complete'%key key_incomplete = '%s!incomplete'%key left = args.pop('left', [None])[0] err = None if(len(key) > 128): err = "Insanely long key!" else: try: port = int(args['port'][0]) if port > 65535 or port < 1: err = "Invalid port number!" except: err = "Invalid port number!" if err: if ERRORS: resps(bencode({'failure reason': err})) return # Crop raises chance of a clash, plausible deniability for the win! phash = md5("%s/%d" % (ip, port)).hexdigest()[:16] # XXX TODO Instead of a hash, we should use the packed ip+port # TODO BT: If left=0, the download is done and we should not return any peers. event = args.pop('event', [None])[0] if event == 'stopped': # Maybe we should only remove it from this track, but this is good enough. mdel(phash, namespace='P') if STATS: # XXX Danger of incomplete underflow! if left == '0': decr(key_complete, namespace='S') else: decr(key_incomplete, namespace='S') return # They are going away, don't waste bw/cpu on this. resps(bencode({'interval': INTERVAL, 'peers': []})) elif STATS and event == 'completed': decr(key_incomplete, namespace='S') incr(key_complete, namespace='S') updatetrack = False # Get existing peers r = get(key, namespace='K') if r: s = r.split('|') if len(s) > 32: ks = sample(s, 32) else: ks = s peers = get_multi(ks, namespace='P') # NOTE Do not use a generator, generators are always true even if empty! lostpeers = [p for p in ks if p not in peers] if lostpeers: # Remove lost peers s = [k for k in s if k not in lostpeers] updatetrack = True if STATS: # XXX medecau suggests we might use len(s) instead of counting leechers. # XXX If we underflow, should decrement from '!complete' decr(key_incomplete, len(lostpeers), namespace='S') # Remove self from returned peers # XXX Commented out as we are shorter on CPU than bw #if phash in peers: # peers.pop(phash, None) # New track! else: s = [] peers = {} if STATS: mset(key_complete, '0', namespace='S') mset(key_incomplete, '0', namespace='S') if phash not in s: # Assume new peer # XXX We don't refresh the peers expiration date on every request! #mset(phash, '|'.join((ip, str(port))), namespace='I') i = ip.split('.') mset(phash, pack('>4BH', int(i[0]), int(i[1]), int(i[2]), int(i[3]), port), namespace='P') s.append(phash) updatetrack = True if STATS: # Should we bother to check event == 'started'? Why? if left == '0': incr(key_complete, namespace='S') else: incr(key_incomplete, namespace='S') if updatetrack: mset(key, '|'.join(s), namespace='K') #ps = dict((k, peers[k].split('|')) for k in peers) #pl = [{'ip': ps[h][0], 'port': ps[h][1]} for h in ps] cpl = ''.join(peers.values()) if STATS: resps(bencode({'interval':INTERVAL, 'peers':cpl, 'complete':(get(key_complete, namespace='S') or 0), 'incomplete':(get(key_incomplete, namespace='S') or 0)})) else: resps(bencode({'interval':INTERVAL, 'peers':cpl}))