def pagesToBistracker(pages): dic = {} key_pages = lambda p: p.metadata['classe'] sorted_pages = sorted(pages, key=key_pages) group_pages = groupby(sorted_pages, key=key_pages) for key_pages, pages_classes in group_pages: for page in pages_classes: name_classe = ClasseEnum(key_pages).name name_spe = (WowIsClassicSpe[page.metadata['spe']].value).name name_phase = PhaseEnum(page.metadata['phase']).name slots = {} for item in page.metadata['items']: log(name_classe + ' ' + name_spe + ' ' + name_phase + ' :' + item.url) slot = { 'itemID': item.id, 'obtain': { 'Zone': item.location, 'Type': item.type, 'Method': item.method, 'Drop': item.dropRate, 'Url': item.url } } slotName = item.slot.name # case: several ring and trinket if (item.slot in {SlotEnum.Ring, SlotEnum.Trinket}): dic_found = { k: v for k, v in slots.items() if slotName in k } slotName += "2" if len(dic_found) > 0 else "1" # case: classe with ambidexterity if (item.slot in {SlotEnum.MainHand}): dic_found = { k: v for k, v in slots.items() if slotName in k } slotName = SlotEnum.OffHand.name if len( dic_found) > 0 else SlotEnum.MainHand.name slots = merge(slots, {slotName: slot}) dic = merge(dic, {name_classe: {name_spe: {name_phase: slots}}}) return dic
def Section(title, links, description=None, headerStyle={}): return html.Div([ html.H2(title, style=merge(styles['underline'], headerStyle)), ( html.Div(description) if description is not None else None ), html.Ul(links) ])
def generate_list(): logger.info("Generating list.") list_of_recipes = json.loads(request.data) # scrapping print('call scraping') ingredients = scrape(list_of_recipes) # merge ingredients print('call merge') merged_ingredients = merge(ingredients) print(merged_ingredients) print('send response') return json.dumps(merged_ingredients)
''' This module reindexes a Mahir study set from 1 to N. This is necessary when terms are deleted or merged and the IDs have gaps. ''' import sys import json from tools import reindex, merge, save try: file = sys.argv[1] except: print('no file given...doing nothing...') # get ids to merge tomerge = sys.argv[2:] # load the vocab file with open(file, 'r') as infile: vocab = json.load(infile) # merge requested term ids merged = merge(vocab, tomerge) # reindex term ids from 1 to N reindexed = reindex(vocab) # export data back to file save(reindexed, file)