Example #1
0
 def get(self, game_id, player_id, x, y):
     self.response.headers['Content-Type'] = 'text/plain'
     game = Game.get_by_id(game_id)
     if not game:
         self.response.status = 412
         self.response.write(self.response.status)
         return
     p = game.player(player_id)
     Cache.setPos(game_id, player_id, x, y)
     self.response.write(p.output())
Example #2
0
 def get_bulk_soup( self, urls, bar=None ):
     Cache.cache_bulk(urls, bar=bar)
     if bar:
         bar.label = 'Souping...'
         bar.expected_size = len(urls)
         bar.show(0)
     
     ret = {}
     for url in urls:
         ret[url] = self.getsoup(url)
         if bar:
             bar.show(bar.last_progress + 1)
     return ret
Example #3
0
 def get(self, game_id):
     pos = Cache.getPos(game_id)
     if pos:
         self.response.headers['Content-Type'] = 'text/plain'
         self.response.status = 200
         self.response.out.write("|".join(
             ["%s:%s,%s" % (p, x, y) for p, (x, y) in pos.iteritems()]))
     else:
         self.response.headers['Content-Type'] = 'text/plain'
         self.response.status = 404
Example #4
0
 def set_lang_id(self, chat_id, lang_id):
     if lang_id is None:
         lang_id = "en"
     Cache().invalidate_lang_cache(chat_id)
     try:
         self.cursor.execute(
             "INSERT INTO chats (chat_id, lang_id) VALUES(?, ?);",
             [chat_id, lang_id])
     except sqlite3.IntegrityError:
         self.cursor.execute(
             "UPDATE chats SET lang_id = ? WHERE chat_id = ?;",
             [lang_id, chat_id])
     self.connection.commit()
Example #5
0
 def get(self, game_id):
     game = Game.get_by_id(game_id)
     hist = Cache.getHist(game_id)
     if not hist:
         self.response.status = 404
         self.response.write(self.response.status)
         return
     self.response.headers['Content-Type'] = 'text/plain'
     self.response.status = 200
     self.response.out.write("|".join([
         "%s:%s" % (p, ",".join([str(h.coords) for h in hls]))
         for p, hls in hist.iteritems()
     ]))
Example #6
0
 def get(self, game_id):
     hist = Cache.getHist(game_id)
     if not hist or not paramVal(self, "modes"):
         self.response.status = 404
         self.response.write(self.response.status)
         return
     modes = paramVal(self, "modes").split(" ")
     self.response.headers['Content-Type'] = 'text/plain'
     self.response.status = 200
     self.response.out.write("|".join([
         "%s:%s" % (p, ",".join(
             Map.get_reachable_areas(
                 PlayerState([(h.pickup_code, h.pickup_id, h.removed)
                              for h in hls]), modes)))
         for p, hls in hist.iteritems()
     ]))
Example #7
0
    def get(self, game_id, player_id):
        seedlines = []
        lines = paramVal(self, "seed").split(",")
        game = Game.get_by_id(game_id)
        hist = Cache.getHist(game_id)
        if not hist:
            Cache.setHist(game_id, player_id, [])
        pos = Cache.getPos(game_id)
        if not pos:
            Cache.setPos(game_id, player_id, 0, 0)
        if not game:
            flags = lines[0].split("|")
            mode_opt = [f[5:] for f in flags if f.lower().startswith("mode=")]
            shared_opt = [
                f[7:].split(" ") for f in flags
                if f.lower().startswith("shared=")
            ]
            mode = mode_opt[0].lower() if mode_opt else None
            if mode:
                mode = mode_map[mode] if mode in mode_map else int(mode)

            shared = shared_opt[0] if shared_opt else None
            game = get_new_game(_mode=mode, _shared=shared, id=game_id)
        for l in lines[1:]:
            line = l.split("|")
            if len(line) < 3:
                print "ERROR: malformed seed line %s, skipping" % l
            else:
                seedlines.append("%s:%s" %
                                 (line[0], Pickup.name(line[1], line[2])))
        player = game.player(player_id)
        player.seed = "\n".join(seedlines)
        player.put()
        self.response.headers['Content-Type'] = 'text/plain'
        self.response.status = 200
        self.response.out.write("ok")
Example #8
0
 def scrape (self):
     if not self.rp:
         r = Cache.get(self.nation.cfg['entrypoint'])
         soup = BS(str(r))
         #find current release point
         log.info("No release point specified, retreiving latest...")
         # this failed fantastically - we'll get the RP from the zipurl
         #self.rp = utf8(soup.findAll('h3', attrs={'class': 'releasepointinformation'})[0].text.split()[-1])
         log.info("Found release point %s" % self.rp)
         #find the download url
         self.zipurl = self.nation.cfg['entrypoint'].rpartition('/')[0] + '/' + soup.findAll('a', title='All USC Titles in XML')[0]['href']
         # new way to set the rp using the zipurl's filename
         self.rp = utf8( self.zipurl.rpartition('@')[-1].partition('.')[0] )
     else:
         log.info('Using specified release point %s...' % self.rp)
         # don't actually need this
         # rpurl = 'http://uscode.house.gov/download/releasepoints/us/pl/%s/%s/usc-rp@%s.htm' % (tuple(self.rp.split()) + (self.rp,))
         self.zipurl = 'http://uscode.house.gov/download/releasepoints/us/pl/%s/%s/xml_uscAll@%s.zip'  % (tuple(self.rp.split('-')) + (self.rp,))
     
     log.debug("Using zipurl: %s" % self.zipurl)
     
     class FileNotThere (Exception): pass
     class XMLNotThere( Exception ): pass
     class AllGood( Exception ): pass
     
     filename = self.zipurl.rpartition('/')[-1]
     xmldir = self._workdir + os.sep + 'xml' + os.sep
     
     # check to see if we have xml that works
     # if we don't check to see if we have a zip file
     # if we don't, download it
     # if we do, extract it
     # check the xml again, if it's good, proceed
     # if it's not, error out
     
     try:
         assert os.path.exists(xmldir + 'usc01.xml')
         soup = BS(open(xmldir + os.sep + 'usc01.xml', 'r').read())
         xmlrp = soup.find('docpublicationname').text.split('@')[-1]
         #old way to get rp, the new way is much better
         # xmlrp = soup.title.first("note", topic="miscellaneous").text.split()[-1]
         if xmlrp == self.rp:
             raise AllGood
         else:
             raise XMLNotThere
     except (XMLNotThere,AssertionError):
         # delete directory if it exists
         if os.path.exists(xmldir):
             shutil.rmtree(xmldir)
         # if there's no xml file, download it
         if not os.path.exists(self._workdir + os.sep + filename):
             log.info('No zipfile found for release point, downloading...')
             self.downloadFile(self.zipurl, filename)
         # now we should have a zipfile and no existing xmldir
         log.info('Extracting file %s...' % filename)
         zf = ZipFile(self._workdir + os.sep + filename, 'r')
         # older release points do not have an interior xml/ dir
         if not all( [ n.startswith('xml/') for n in zf.namelist()]):
             zf.extractall(xmldir)
         else:
             zf.extractall(self._workdir)
         # double check the xml now...
         assert os.path.exists(xmldir + 'usc01.xml')
         # it may be problematic to rely on the RP information in the XML documents provided
         # rp 113-21 (the earliest presently available) does not include this in the 
         # docpublicationname meta tag
         #soup = BS(open(xmldir + os.sep + 'usc01.xml', 'r').read())
         #xmlrp = soup.find('docpublicationname').text.split('@')[-1]
         #if xmlrp != self.rp:
         #    raise XMLNotThere('XML did not check out after extraction.')
     except AllGood:
         pass
     except:
         raise
     
     log.info('All checks passed...')
     xf = os.listdir(xmldir)
     root = self.findOrCreateRoot()
     xf = [xmldir + f for f in xf if f.endswith('.xml')]
     xf.sort()
     log.info("Processing %i files..." % len(xf))
     self.bar = progress.Bar(label='US', expected_size=1000*len(xf))
     self.progress( i=len(xf) )
     for fn in xf:
         self.processFile(fn, root)
         self.progress(rollup=1000)
     log.info('Analyzing code...')
     self.progress(label="Analyzing")
     root.analyze(commit=True, bar=self.bar)
     store.commit()
     log.info('Scrape completed.')
Example #9
0
from urllib.parse import urlencode
from hashlib import md5
import json
import re
import datetime
import os

from tornado import gen
from tornado.httpclient import AsyncHTTPClient as HTTPClient
from tornado.httpclient import HTTPRequest
import bs4.element
import requests

from util import Cache, unicode_type, bytes_type

cache = Cache()
jwc_domain = 'http://202.119.81.112:9080'


def login_data(username, password):
    if isinstance(password, unicode_type):
        password = password.encode('utf-8')
    data = {
        'method': 'verify',
        'USERNAME': username,
        'PASSWORD': md5(password).hexdigest().upper()
    }
    return data


def login_session(username, password):
Example #10
0
 def geturl( self, url ):
     return Cache.get( url ).resp