def getSummonerByName(self, name): jsonUrl = "{}/lol/summoner/v4/summoners/by-name/{}?api_key={}".format( self.urlBase, urllib.parse.quote(name.encode()), self.api_key ) print("Pesquisando pelo nick \"{}\"...".format(name)) ret = utils.getJson(jsonUrl) if ret == 403: print("Erro 403: chave de API invalida.") return ret if ret == 404: print("Erro 404: Invocador não existe.") return ret return ret
def getEntityById(self, entityId): """Gets entity from LRI entityId: ID of the entity to get returns: JSON of the entity """ entities = [] q = '{"urn:lri:property_type:id":"%s"}' % entityId url = self.toUrlForm(q) print("StandardQuery.getEntityById: Running query: %s" % url) results = requests.get(url) statusCode = results.status_code j = utils.getJson(results) response = j["response"] for entity in response: entityId = entity["props"]["urn:lri:property_type:id"] entities.append(entity) print("StandardQuery.getEntityById: Added entity: %s" % entityId) print("StandardQuery.getEntityById: Got %d entity(s)" % len(entities)) if len(entities) > 1: print("StandardQuery.getEntityById: ERROR: Too many entities: %d from id: %d" % (len(entities), entityId)) entity = None try: entity = entities[0] except IndexError, e: raise IndexError("ERROR getting: %s" % entityId)
def main(planet=None, lang=None, layout=None): templateLoader = jinja2.FileSystemLoader(searchpath='print/templates/') templateEnv = jinja2.Environment(loader=templateLoader) planets = [ 'mercury', 'venus', 'earth', 'mars', 'jupiter', 'saturn', 'uranus', 'neptune', ] if planet is not None: data = [getJson('data/planets/'+planet+'.json')] else: data = [getJson('data/planets/'+planet+'.json') for planet in planets] if lang is None: lang = ['fr', 'en'] else: lang = [lang] cssPages = getText('print/stylesheet.css') cssInt = getText('print/stylesheet-intercalar.css') for planetData in data: catalog = Pages(planetData) intercalar = Intercalar(planetData) cribsheet = CribSheet(planetData) for l in lang: pages = catalog.generate(templateEnv, cssPages, l, layout) intercalars = intercalar.generate(templateEnv, cssInt, l, layout) cribsheetRender = cribsheet.generate(templateEnv, cssInt, l, None) print('Saving ' + planetData['name']['en'] + '' + l) if layout is None: pages = intercalars + pages saveAsPDF(pages, planetData['name']['en'] + '-' + l, folder='output/print/preview/') saveAsPDF(cribsheetRender, planetData['name']['en']+ '-cribsheet-' + l, folder='output/print/preview/') else: saveAsPDF(pages, planetData['name']['en'] + '-' + l) saveAsPDF(intercalars, planetData['name']['en'] + '-' + l, folder='output/print/intercalars/') saveAsPDF(cribsheetRender, planetData['name']['en']+ '-cribsheet-' + l) print('Done !\n\n\n')
def runQuery(self): """Runs the query. Returns QueryResult""" msg = "QueryRunner.runQuery: url = %s" % self.url web.debug(msg) r = requests.get(self.url) queryResult = QueryResult(r.status_code, utils.getJson(r)) return queryResult
def get(self): tic = utils.microtime(True) for n in range(1, 11): json_url = 'http://www.noticiashacker.com/nuevo.json?pagina=%d' noticias = utils.getJson(json_url % n) for noticia in noticias['posts']: user = utils.put_user(noticia['user']) post = utils.put_post(noticia,user) toc = utils.microtime(True) print "Time elapsed %f seconds" % (toc-tic)
def addUrl_TransitLand_Operator(url): operator = url.split('/')[-1] operatorResponse = getJson( f'https://api.transit.land/api/v1/operators/{operator}') onestopIds = operatorResponse['represented_in_feed_onestop_ids'] addEntry({ 'operator': operator, 'title': operatorResponse['name'], 'onestopIds': onestopIds, 'source': 'transitland' })
def getChampions(self): print("Obtendo informações dos campeões...") self.status1.set("Obtendo informações sobre os campões...") self.barra["maximum"] = 0 self.barra["value"] = 0 jsonUrl = self.CDN_URL + "/data/en_US/champion.json" self.status2.set("champion.json") dados = utils.getJson(jsonUrl) self.barra["value"] = 1 return dados
def getSummonerSpells(self): self.barra["value"] = 0 print("Obtendo informações sobre os feitiços de invocador...") self.status1.set( "Obtendo informações sobre os feitiços de invocador...") self.status2.set("summoner.json") jsonUrl = self.CDN_URL + "/data/en_US/summoner.json" #verifica se a pasta existe, caso não, crie-a if not os.path.exists(self.ss_iconsDir): os.mkdir(self.ss_iconsDir) dados = utils.getJson(jsonUrl) self.barra["value"] = 1 #Arquivo onde guarda cdrs das summoners spells ssArq = open(os.path.join(self.o_dir, "SS_CDR"), "w") ssIconeUrl = self.CDN_URL + "/img/spell/" spells = dados["data"] self.status1.set("Baixando icones dos feitiços de invocador...") self.barra["value"] = 0 self.barra["maximum"] = len(dados["data"]) time.sleep(5) for spellNome in dados["data"]: #Pega apenas id da spell, nome da imagem e cooldown spell = spells[spellNome] print("Baixando icones dos feitiços de invocador", spell["name"], "...") self.status2.set(spell["name"]) ssArq.write(spell["key"] + "=" + spell["cooldownBurn"] + "\n") request.urlretrieve( ssIconeUrl + spell["image"]["full"], os.path.join(self.ss_iconsDir, spell["key"] + ".png")) self.barra["value"] += 1 ssArq.close()
def defineFormats(distribution): planets = {} planetsData = getJson('data/planetsInput.json') for planet in planetsData: # 'area' is in km² area = areaOfOblateEllipsoid(*planet['radius']) symbol = planet['symbol'] width, height = rectSize(area) planet['area'] = area planet['size_km'] = [width, height] # turn kilometers into millimeter and round to the lowest integrer # Planet's paper format '0' area ~= planet area (rounded to the lowest # millimeter) so height / width is now ~= √2 height = floor(height * 1000000) width = floor(width * 1000000) planet['formats_mm'] = [[width, height]] serieAequi = {} planet['areaLost'] = [ area - (width * height * distribution[0]['total'] * 10**-12) ] i = 1 a = 0 # search for folded versions until we reach the equivalent of A10 while a <= 10: [height, width] = [width, floor(height / 2)] planet['formats_mm'].append([width, height]) planet['areaLost'].append(area - (width * height * distribution[i]['total'] * 10**-12)) if a == 0 and height <= 1189: serieAequi[str(a)] = {'number': i, 'size': [width, height]} a = 1 elif a > 0: serieAequi[str(a)] = {'number': i, 'size': [width, height]} a += 1 i += 1 planet['serieAequi'] = serieAequi planets[planet['name']['en']] = planet return planets
def getEnemiesInfo(self, summonerId): jsonUrl = "{}/lol/spectator/v4/active-games/by-summoner/{}?api_key={}".format( self.urlBase, urllib.parse.quote(summonerId), self.api_key ) print("Pesquisando pelo jogo atual do invocador \"{}\"...".format(summonerId)) ret = utils.getJson(jsonUrl) if ret == 404: print("Erro: o invocador não está em partida (a partida terminou ?)") return ret players = ret["participants"] #[spell1, spell2, champion id, [runas]] playersInfo = [] for player in players: if player["summonerId"] == summonerId: timeAliado = player["teamId"] break for player in players: if player["teamId"] == timeAliado: continue playersInfo.append([ player["spell1Id"], player["spell2Id"], player["championId"], player["perks"]["perkIds"] ]) return playersInfo
def getCDN_URL(self): #consulta apenas para pegar a versão atual da cdn print("Obtendo URL da CDN...") self.barra["maximum"] = 1 self.barra["value"] = 0 self.status1.set("Consultando URL da CDN...") json_arq = self.realm + ".json" self.status2.set(json_arq) jsonUrl = "https://ddragon.leagueoflegends.com/realms/" + json_arq dados = utils.getJson(jsonUrl) self.barra["value"] = 1 #BUGADO #return dados["cdn"] + "/" + dados["dd"] return dados["cdn"] + "/10.10.3208608"
def main(): templateLoader = jinja2.FileSystemLoader(searchpath='web/templates/') templateEnv = jinja2.Environment(loader=templateLoader) texts = getYaml('data/texts.yaml') data = [ getJson('data/planets/' + planet + '.json') for planet in [p['name']['en'].lower() for p in texts['planets']] ] for lang in ['fr', 'en']: homePage = HomePage(templateEnv, lang, texts[lang], texts['planets']) homePage.generate() planetPages = [ PlanetPage(templateEnv, lang, texts[lang], texts['planets'], d) for d in data ] for pp in planetPages: pp.generate() with open('web/script.js', 'r') as copy: with open('output/web/script.js', 'w') as paste: paste.write(copy.read())
def getStandards(self): """Gets standards from LRI returns: JSON of the standards """ print("StandardQuery.getStandards") print("StandardQuery.getStandards: self.opts = %r" % self.opts) print("StandardQuery.getStandards: Running query: %s" % self.toUrlForm()) results = requests.get(self.toUrlForm()) statusCode = results.status_code j = utils.getJson(results) print("StandardQuery.getStandards: j=",j) response = j["response"] # Get standards standards = [] for item in response: print itemId = item["props"]["urn:lri:property_type:id"] print("StandardQuery.getStandards: Processing: %s" % itemId) # If item has a CCID it is a CCSS type if "urn:ccss:property_type:ccid" not in item["props"]: # Custom standards will not have ccid print("StandardQuery.getStandards: Processing Custom item: %s" % itemId) # Custom standards will be type competency customType = "urn:lri:entity_type:competency" if customType in item["props"]["urn:lri:property_type:types"]: standards.append(item) print("StandardQuery.getStandards: Added Custom standard: %s" % itemId) else: print("StandardQuery.getStandards: ERROR: type: %s not in: %r" % (customType, item["props"]["urn:lri:property_type:types"])) else: ccid = item["props"]["urn:ccss:property_type:ccid"] print("StandardQuery.getStandards: Processing CCSS item: %s" % ccid) if ccid.find("Math") != -1: # Prune CCSS Math structure print("StandardQuery.getStandards: Processing CCSS Math item: %s" % ccid) if "urn:ccss:entity_type:standard" in item["props"]["urn:lri:property_type:types"]: standards.append(item) print("StandardQuery.getStandards: Added CCSS Math standard: %s" % itemId) elif ccid.find("ELA-Literacy") != -1: # Prune CCSS ELA structure print("StandardQuery.getStandards: Processing CCSS ELA item: %s" % ccid) cbProp = "urn:lri:property_type:contained_by" containedBy = item["props"][cbProp] if type(containedBy) == str or type(containedBy) == unicode: containedBy = [containedBy] # Handle contained_by domain or grade_level for cb in containedBy: print("StandardQuery.getStandards: contained_by = %s" % cb) if cb.find(":domain:") != -1: # contained_by domain means item is grade_level # grade_level contains standard gradeId = item["props"]["urn:lri:property_type:id"] print("StandardQuery.getStandards: Checking: (%s, %s)" % (self.grade_level, gradeId)) if gradeId == self.grade_level: print("StandardQuery.getStandards: Processing grade_level: %s" % gradeId) for standardId in item["props"]["urn:lri:property_type:contains"]: print("StandardQuery.getStandards: Processing CCSS ELA standard: %s" % standardId) standard = self.getEntityById(standardId) standards.append(standard) newId = standard["props"]["urn:lri:property_type:id"] if newId != standardId: print("StandardQuery.getStandards: ERROR: %s != %s" % (newId, standardId)) print("StandardQuery.getStandards: Added CCSS ELA standard: %s, (domain = %s, grade = %s)" % (newId, cb, gradeId)) else: print("StandardQuery.getStandards: Skipping grade_level: %s" % gradeId) elif cb.find(":grade") != -1: # contained_by grade_level means item is standard standard = self.getEntityById(itemId) standards.append(standard) newId = standard["props"]["urn:lri:property_type:id"] if newId != itemId: print("StandardQuery.getStandards: ERROR: %s != %s" % (newId, itemId)) print("StandardQuery.getStandards: Added CCSS ELA standard: %s" % newId) print("StandardQuery.getStandards: Processed %d standards" % len(standards)) # Get components if self.getChildren: print("StandardQuery.getStandards: Getting standard_components") for std in standards: if not "urn:lri:property_type:contains" in std["props"]: continue stdId = std["props"]["urn:lri:property_type:id"] print("StandardQuery.getStandards: Getting components of: %s" % stdId) components = [] print("StandardQuery.getStandards: contains: ") print(std["props"]["urn:lri:property_type:contains"]) print("StandardQuery.getStandards: /contains") contains = std["props"]["urn:lri:property_type:contains"] if type(contains) == str or type(contains) == unicode: contains = [contains] for componentId in contains: print("StandardQuery.getStandards: Getting standard_component: %s" % componentId) component = self.getEntityById(componentId) components.append(component) newId = component["props"]["urn:lri:property_type:id"] if newId != componentId: print("StandardQuery.getStandards: ERROR: %s != %s" % (newId, standardId)) std["props"]["urn:lri:property_type:contains"] = components print("StandardQuery.getStandards: Added: %s components to standard: %s" % (len(std["props"]["urn:lri:property_type:contains"]), stdId)) # Get anchors if self.getAnchors: print("StandardQuery.getStandards: Getting anchor_standards") for std in standards: if not "urn:ccss:property_type:is_anchored_to" in std["props"]: continue stdId = std["props"]["urn:lri:property_type:id"] print("StandardQuery.getStandards: Getting anchor for: %s" % stdId) anchorId = std["props"]["urn:ccss:property_type:is_anchored_to"] anchor = self.getEntityById(anchorId) newId = anchor["props"]["urn:lri:property_type:id"] if newId != anchorId: print("StandardQuery.getStandards: ERROR: %s != %s" % (newId, anchorId)) std["props"]["urn:ccss:property_type:is_anchored_to"] = anchor print("StandardQuery.getStandards: Set is_anchored_to: %s" % anchor) # Flatten r = {} r["response"] = [] r["response"].append({}) r["response"][0]["standards"] = standards r["status"] = "success" r["status_code"] = statusCode return r
def __init__(self, planetData): super().__init__(planetData) self.texts = getYaml('data/textsPrint.yaml')['pages'] self.distrib = getJson('data/formatsDistribution.json') self.lost = [round(lost, 3) for lost in planetData['areaLost']]
# Size of result set returned w/cursor limit = 1 q = query.QueryFactory().CreateQuery(pathInfo, opts, httpConfig, limit) if q is None: code = 404 return code, formatted # Run query once w/small limit # Extract cursor from results # Run subsequent queries in loop # until cursor comes back False print ("doGet: url = %s" % q.getUrl()) response = requests.get(q.getUrl()) j = utils.getJson(response) if "cursor" in j: cursor = j["cursor"] print ("\ndoGet: cursor = %s\n" % cursor) statusCode = response.status_code status = json.dumps(j["status"]) items = [] if cursor is False: items = j["response"] else: count = 0 while cursor is not False: # != "false": print ("doGet: Count = %d" % count) print ("doGet: Getting next %d results" % limit) url = 'http://%s:%s/entity/search?q={"limit":%d,"cursor":%s}&opts=%s' % (
def getLocation(location): api_url = f'https://api.transitfeeds.com/v1/getFeeds?key={TRANSITFEEDS_API_KEY}&location={location}&descendants=1&page=1&limit=100' return getJson(api_url)
def doPost(pathInfo, opts, data): """Does HTTP POST""" web.debug("doPost") formatted = "" results = QueryResult() code = 201 try: # Create & run inserts (create, update) toFormat = "xml" if not "format" in opts: userData = web.input() toFormat = userData.get("format", "xml") web.debug("to format = %s" % toFormat) pattern = "(/[a-z_]+)(/[a-z]+)" m = re.match(pattern, pathInfo) ccssType, action = m.groups() if action == "/create": i = insert.InsertFactory().CreateInsert(ccssType, action, opts, data) for url in i.getUrls(): r = QueryRunner(url) results = r.runQuery() if not isinstance(results, QueryResult): raise WrongTypeError("doPost: QueryRunner.runQuery(): expected QueryResult, got %s" % type(results)) code = results.getHttpStatusCode() # Convert to requested format f = format.FormatterFactory().CreateFormatter(toFormat) formatted = f.format(results.getData()) elif action == "/update": web.debug("===== %s =====" % action) updates = insert.parseXml(data) web.debug("doPost: updates = %r" % updates) responses = insert.runUpdates(updates, opts) count = 0 for r in responses: f = format.FormatterFactory().CreateFormatter(toFormat) formatted += f.format(utils.getJson(r)) # What to do about multiple <?xml...?> lines? # tmp = f.format(r.json) # if count > 0: # if toFormat in ["xml", "johnxml", "oldxml"]: # lines = tmp.split("\n") # lines.remove(lines[0]) # tmp = "\n".join(lines) # formatted += tmp # count += 1 except NotImplementedError, e: exc_type, exc_value, exc_traceback = sys.exc_info() formatted = "doPost: Caught NotImplementedError: " + str(e) + "\n" formatted += "-" * 60 + "\n" formatted += traceback.format_exc() formatted += "-" * 60 + "\n" code = 501
] i = 1 a = 0 # search for folded versions until we reach the equivalent of A10 while a <= 10: [height, width] = [width, floor(height / 2)] planet['formats_mm'].append([width, height]) planet['areaLost'].append(area - (width * height * distribution[i]['total'] * 10**-12)) if a == 0 and height <= 1189: serieAequi[str(a)] = {'number': i, 'size': [width, height]} a = 1 elif a > 0: serieAequi[str(a)] = {'number': i, 'size': [width, height]} a += 1 i += 1 planet['serieAequi'] = serieAequi planets[planet['name']['en']] = planet return planets if __name__ == '__main__': distribution = getJson('data/formatsDistribution.json') planets = defineFormats(distribution) for planet in planets.values(): dumpJson('data/planets/' + planet['name']['en'].lower() + '.json', planet)
def getFeedVersions(feedId): api_url = f'https://api.transitfeeds.com/v1/getFeedVersions?key={TRANSITFEEDS_API_KEY}&feed={feedId}&page=1&limit=10&err=1&warn=1' return getJson(api_url)