def main(remove_duplicates=True):
    """Creates a .json file that contains the RGB values for all item icons"""

    if remove_duplicates:
        # remove all duplicate items to lower search space for later colour-matching
        # most duplicate items are just noted items so make little difference in the final image
        # this step improves runtime of later colour-matching by ~50%
        items = [x for x in items_api.load() if not x.duplicate]
    else:
        items = items_api.load()

    # you can add/remove banned item IDs as you wish if for example one item always seems to dominate
    banned_IDs = [
        7586,  # Plain cream colour that fills entire icon - makes white areas of final image look bad
        9974,  # Giant eagle - matches too many pale tones
        22704,  # Portal nexus - matches lots of colours that often aren't visually similar
    ]
    # + list(range(3869, 3893))  # board game pieces - red versions of runes, red pixels always go to red fire rune

    rgb_dict = {}
    for item in items:
        try:
            myimg = cv2.imread(f"images/{item.id}.png")
            avg_color = np.average(np.average(myimg, axis=0), axis=0).tolist()
            if item.id not in banned_IDs:
                rgb_dict[item.id] = avg_color

        except np.AxisError:
            # some images that downloaded 'successfully' are still corrupt
            # print(item.id, item.name)
            pass

    with open("RGB_values.json", "w") as f:
        json.dump(rgb_dict, f)
Ejemplo n.º 2
0
def create_dict():
    all_db_items = items_api.load()

    stacked_items_link = dict()
    stack_count = dict()
    meta_link = dict()

    for item in all_db_items:
        if item.linked_id_item is not None and item.stacked is not None:
            linked_id = item.linked_id_item

            if linked_id not in meta_link:
                if linked_id > item.id:
                    meta_link[linked_id] = item.id
                else:
                    meta_link[linked_id] = linked_id
            correct_linked_id = meta_link[linked_id]
            if correct_linked_id in stack_count:
                if item.stacked > stack_count[correct_linked_id]:
                    stacked_items_link[correct_linked_id] = item.id
                    stack_count[correct_linked_id] = item.stacked
            else:
                stacked_items_link[correct_linked_id] = item.id
                stack_count[correct_linked_id] = item.stacked

    stacked_items_link[617] = 1004  # coins
    stacked_items_link[4561] = 10483  # purple sweets

    return stacked_items_link
Ejemplo n.º 3
0
def init():
    global duels
    duels = {}
    global lastMessages
    lastMessages = {}
    global all_db_items
    all_db_items = items_api.load()
Ejemplo n.º 4
0
def main():
    # Load all items from osrsbox item API
    all_db_items = items_api.load()

    # Get a dict with: id -> ItemProperties
    all_item_ids = dict()
    for item in all_db_items:
        all_item_ids[item.id] = item

    # Load the ge-limits-ids.json file from RuneLite
    ge_limits_path = Path(config.DATA_ITEMS_PATH / "ge-limits-ids.json")
    with open(ge_limits_path) as f:
        ge_limits = json.load(f)

    # Make a dict of: name -> buy_limit
    buy_limits = dict()
    for item_id, buy_limit in ge_limits.items():
        item_id = int(item_id)
        item_name = all_item_ids[item_id].name
        buy_limits[item_name] = buy_limit

    # Write out buy limit data file
    out_fi = Path(config.DATA_ITEMS_PATH / "ge-limits-names.json")
    with open(out_fi, "w") as f:
        json.dump(buy_limits, f, indent=4)
Ejemplo n.º 5
0
 def loadGear(self):
     items = items_api.load()
     for item in items:
         if item.equipable_by_player:
             if item.equipment.slot == 'weapon':
                 self.mainHandSlotItems.append(item)
             elif item.equipment.slot == '2h':
                 self.twohandSlotItems.append(item)
             elif item.equipment.slot == 'ammo':
                 self.ammoSlotItems.append(item)
             elif item.equipment.slot == 'body':
                 self.chestSlotItems.append(item)
             elif item.equipment.slot == 'cape':
                 self.capeSlotItems.append(item)
             elif item.equipment.slot == 'feet':
                 self.footSlotItems.append(item)
             elif item.equipment.slot == 'hands':
                 self.handSlotItems.append(item)
             elif item.equipment.slot == 'head':
                 self.headSlotItems.append(item)
             elif item.equipment.slot == 'legs':
                 self.legSlotItems.append(item)
             elif item.equipment.slot == 'neck':
                 self.neckSlotItems.append(item)
             elif item.equipment.slot == 'ring':
                 self.ringSlotItems.append(item)
             elif item.equipment.slot == 'shield':
                 self.shieldSlotItems.append(item)
Ejemplo n.º 6
0
def main():
    # Output dictionary of all items in items-search
    items_search = dict()

    # Start processing all items in database
    all_db_items = items_api.load()

    for item in all_db_items:
        # Make a temporary dictionary for each item
        temp_dict = dict()

        # Add id, name, type and duplicate status
        temp_dict["id"] = item.id
        temp_dict["name"] = item.name
        temp_dict["type"] = None
        if item.noted:
            temp_dict["type"] = "noted"
        elif item.placeholder:
            temp_dict["type"] = "placeholder"
        else:
            temp_dict["type"] = "normal"
        temp_dict["duplicate"] = item.duplicate

        # Add temp_dict to all items
        items_search[item.id] = temp_dict

    # Write out file
    out_fi_path = Path(config.DOCS_PATH / "items-search.json")
    with open(out_fi_path, "w") as f:
        json.dump(items_search, f, indent=4)
Ejemplo n.º 7
0
def insert_api_data(db_type: str):
    # Insert database contents using osrsbox-api
    if db_type == "items":
        all_db_entries = items_api.load()
    elif db_type == "monsters":
        all_db_entries = monsters_api.load()
    elif db_type == "prayers":
        all_db_entries = prayers_api.load()

    count = 0
    print(f">>> Inserting {db_type} data...")
    for entry in all_db_entries:
        # Make a dictionary from the ItemProperties object
        entry_dict = entry.construct_json()

        # Dump dictionary to JSON for API parameter
        entry_json = json.dumps(entry_dict)

        # Send POST request
        status, response = perform_api_post(API_ENDPOINT + f"/{db_type}",
                                            entry_json)

        if response["_status"] == "ERR":
            status, response = perform_api_put(API_ENDPOINT + f"/{db_type}",
                                               entry_json)

        if response["_status"] == "ERR":
            print(response)
            print(">>> Data insertion error... Exiting.")
            quit()

        count += 1
        print(f"  > Processed: {count:05} of {len(all_db_entries)}", end="\r", flush=True)
Ejemplo n.º 8
0
def main(export_monster: bool = False):
    # Load the current database contents
    monsters_compltete_file_path = Path(config.DOCS_PATH /
                                        "monsters-complete.json")
    with open(monsters_compltete_file_path) as f:
        all_db_monsters = json.load(f)

    # Load the current item database contents
    all_db_items = items_api.load()

    # Load the item wikitext file
    wiki_text_file_path = Path(config.EXTRACTION_WIKI_PATH /
                               "extract_page_text_monsters.json")
    with open(wiki_text_file_path) as f:
        all_wikitext_raw = json.load(f)

    # Temp loading of monster ID -> wikitext
    processed_wikitextfile_path = Path(config.EXTRACTION_WIKI_PATH /
                                       "processed_wikitext_monsters.json")
    with open(processed_wikitextfile_path) as f:
        all_wikitext_processed = json.load(f)

    # Load the raw OSRS cache monster data
    # This is the final data load, and used as baseline data for database population
    all_monster_cache_data_path = Path(config.DATA_PATH /
                                       "monsters-cache-data.json")
    with open(all_monster_cache_data_path) as f:
        all_monster_cache_data = json.load(f)

    # Initialize a list of known monsters
    known_monsters = list()

    # Start processing every monster!
    for monster_id in all_monster_cache_data:
        # Toggle to start, stop at a specific monster ID
        # if int(monster_id) < 231:
        #     continue

        # Initialize the BuildMonster class, used for all monster
        builder = monster_builder.BuildMonster(monster_id,
                                               all_monster_cache_data,
                                               all_wikitext_processed,
                                               all_wikitext_raw,
                                               all_db_monsters, all_db_items,
                                               known_monsters, export_monster)

        status = builder.preprocessing()
        if status:
            builder.populate_monster()
            known_monster = builder.check_duplicate_monster()
            known_monsters.append(known_monster)
            builder.parse_monster_drops()
            builder.generate_monster_object()
            builder.compare_new_vs_old_monster()
            builder.export_monster_to_json()
            builder.validate_monster()

    # Done processing, rejoice!
    print("Done.")
Ejemplo n.º 9
0
def insert_api_data(db_type: str):
    print(f">>> Inserting {db_type} data...")

    # Insert database contents using osrsbox-api
    if db_type == "items" or db_type == "icons_items":
        all_db_entries = items_api.load()
    elif db_type == "monsters":
        all_db_entries = monsters_api.load()
    elif db_type == "prayers" or db_type == "icons_prayers":
        all_db_entries = prayers_api.load()

    all_entries = list()
    bulk_entries = list()

    for entry in all_db_entries:
        # Check if we are processing icons, and strip to id, name
        if "icons" in db_type:
            new_entry = dict()
            new_entry["id"] = entry.id
            new_entry["icon"] = entry.icon
            entry = new_entry.copy()
        # Append to a list of all entries
        all_entries.append(entry)

    for db_entries in itertools.zip_longest(*[iter(all_entries)] * 50):
        # Remove None entries from the list
        db_entries = filter(None, db_entries)
        # Cast from filter object to list
        db_entries = list(db_entries)
        # Append to list of bulk entries
        bulk_entries.append(db_entries)

    for i, block in enumerate(bulk_entries):
        print(f"  > Processed: {i*50}")
        to_insert = list()
        for entry in block:
            # Make a dictionary from the *Properties object
            if not isinstance(entry, dict):
                entry = entry.construct_json()
            # Dump dictionary to JSON for API parameter
            entry_json = json.dumps(entry)
            # Append to the to_insert list
            to_insert.append(entry_json)

        # Convert list to JSON list
        to_insert = json.dumps(to_insert)

        # Send POST request, or PUT request if that fails
        status, response = perform_api_post(API_ENDPOINT + f"/{db_type}",
                                            to_insert)

        if response["_status"] == "ERR":
            status, response = perform_api_put(API_ENDPOINT + f"/{db_type}",
                                               to_insert)

        if response["_status"] == "ERR":
            print(response)
            print(">>> Data insertion error... Exiting.")
            quit()
Ejemplo n.º 10
0
def main():
    # Get list of all tradeable items
    items = [item for item in items_api.load() if item.tradeable_on_ge]

    # Specify data structure for storage of buy_limits
    buy_limits = dict()

    for item in items:
        # Extract name to lookup from wiki_exchange property
        wiki_name = item.wiki_exchange.split(":")[2]

        # Get URL
        url = f"https://oldschool.runescape.wiki/w/Module:Exchange/{wiki_name}?action=raw"
        r = requests.get(url)
        data = r.text

        # Set default values
        item_id_checker = None
        buy_limit = None

        for line in data.split("\n"):
            if "itemId     =" in line:
                item_id_checker = line.split("=")[1]
                item_id_checker = item_id_checker.strip()
                item_id_checker = item_id_checker.replace(",", "")
                if item.id == int(item_id_checker):
                    continue
                else:
                    print("Warning: Item IDs don't match")
                    print(item.id, item_id_checker, item.name, wiki_name)
            if "limit      =" in line:
                buy_limit = line.split("=")[1]
                buy_limit = buy_limit.strip()
                buy_limit = buy_limit.replace(",", "")
                if buy_limit == "nil":
                    buy_limit = None
                else:
                    buy_limit = int(buy_limit)
        if not item_id_checker:
            print("Warning: No item ID", item.id, item_id_checker)
            buy_limits[item.id] = buy_limit
            continue
        if not buy_limit:
            print("Warning No item buy limit", item.id, item_id_checker)
            buy_limits[item_id_checker] = buy_limit
            continue
        buy_limits[item_id_checker] = buy_limit

    file_name = "ge-limits-ids.json"
    file_path = Path(config.DATA_ITEMS_PATH / file_name)
    with open(file_path, "w") as f:
        json.dump(buy_limits, f, indent=4)
Ejemplo n.º 11
0
def insert_data(db_type: str):
    print(f">>> Inserting {db_type} data...")

    # Insert database contents using osrsbox-api
    if db_type == "items" or db_type == "icons_items":
        all_db_entries = items_api.load()
    elif db_type == "monsters":
        all_db_entries = monsters_api.load()
    elif db_type == "prayers" or db_type == "icons_prayers":
        all_db_entries = prayers_api.load()

    all_entries = list()
    bulk_entries = list()

    for entry in all_db_entries:
        # Check if we are processing icons, and strip to id, name
        if "icons" in db_type:
            new_entry = dict()
            new_entry["id"] = entry.id
            new_entry["icon"] = entry.icon
            entry = new_entry.copy()
        # Append to a list of all entries
        all_entries.append(entry)

    # Remove all entries in the collection
    collection = db[db_type]
    collection.remove()

    for db_entries in itertools.zip_longest(*[iter(all_entries)] * 50):
        # Remove None entries from the list
        db_entries = filter(None, db_entries)
        # Cast from filter object to list
        db_entries = list(db_entries)
        # Append to list of bulk entries
        bulk_entries.append(db_entries)

    for i, block in enumerate(bulk_entries):
        print(f"  > Processed: {i*50}")
        to_insert = list()
        for entry in block:
            # Make a dictionary from the *Properties object
            if not isinstance(entry, dict):
                entry = entry.construct_json()
                # Convert item ID to string for lookup
                entry["id"] = str(entry["id"])
            # Append to the to_insert list
            to_insert.append(entry)

        # Insert into MongoDB
        collection = db[db_type]
        collection.insert_many(to_insert)
Ejemplo n.º 12
0
def create_json(drops):
    all_db_items = items_api.load()

    with open(r"./stacked_items_link.json") as json_file:
        stacked_dict = json.load(json_file)

    actual_chance = []
    base_chances = []
    ids = []

    json_data = dict()
    json_data["name"] = "Rare drop Table"
    #json_data["indexMapping"] = []

    for drop in drops:
        actual_chance.append(drop.actual_chance)
        base_chances.append(drop.base_chance)
        ids.append(all_db_items.lookup_by_item_name(drop.name).id)

    actual_chance_arr = np.array(actual_chance).astype(np.int)
    base_chance_arr = np.array(base_chances).astype(np.int)

    lcm = np.lcm.reduce(base_chance_arr)
    base_chance = int(lcm)
    json_data["baseChance"] = base_chance
    scaled_chances = (actual_chance_arr * (lcm / base_chance_arr))

    json_data["basicLoots"] = []

    index_mapping = 0
    for i, drop in enumerate(drops):
        index_mapping = index_mapping + scaled_chances[i]

        basic_loot = dict()
        basic_loot = dict()
        try:
            basic_loot["id"] = stacked_dict[str(ids[i])]
        except KeyError:
            basic_loot["id"] = ids[i]
        basic_loot["weight"] = scaled_chances[i]
        basic_loot["amountMin"] = drop.amount_min
        basic_loot["amountMax"] = drop.amount_max
        json_data["basicLoots"].append(basic_loot)

        #json_data["indexMapping"].append(index_mapping)

    out_file_name = r"./rare_drop_table.json"
    with open(out_file_name, "w", newline="\n") as out_file:
        json.dump(json_data, out_file, indent=4)
Ejemplo n.º 13
0
def main():
    """The main function for generating the `docs/items-complete.json` file"""
    # Read in the item database content
    all_db_items = items_api.load()

    items = {}

    for item in all_db_items:
        json_out = item.construct_json()
        items[item.id] = json_out

    # Save all items to docs/items_complete.json
    out_fi = Path(config.DOCS_PATH / "items-complete.json")
    with open(out_fi, "w") as f:
        json.dump(items, f)
Ejemplo n.º 14
0
def main():
    # Output dictionary of DMM-only items
    dmm_only_items = dict()

    # Start processing all items in database
    all_db_items = items_api.load()

    for item in all_db_items:
        if item.name in DMM_MODE_ITEM_NAMES:
            dmm_only_items[item.id] = item.name

    # Write out file
    out_fi_path = Path(config.DATA_ITEMS_PATH / "dmm-only-items.json")
    with open(out_fi_path, "w") as f:
        json.dump(dmm_only_items, f, indent=4)
Ejemplo n.º 15
0
def process_weapon_types(weapon_types: Dict):
    """Extract weapon types, correlate to weapons in items_api

    :param weapon_types: A dictionary of weapon types.
    """
    # Load all normalized names
    normalized_names = dict()
    normalized_names_path = Path(config.ITEMS_BUILDER_PATH /
                                 "normalized_names.txt")
    with open(normalized_names_path) as f:
        for line in f:
            line = line.strip()
            if "#" in line or line.startswith("TODO"):
                continue
            line = line.split("|")
            normalized_names[line[0]] = [line[1], line[2], line[3]]

    extracted_weapon_types = dict()
    for weapon_type in weapon_types:
        for weapon_name in weapon_types[weapon_type]:
            extracted_weapon_types[weapon_name] = weapon_type

    weapon_type_dict = dict()

    # Load the osrsbox items API
    all_db_items = items_api.load()
    for item in all_db_items:
        if item.equipable_by_player:
            if item.equipment.slot in ["2h", "weapon"]:
                try:
                    item_name = normalized_names[str(item.id)][1]
                except KeyError:
                    item_name = item.name
                try:
                    weapon_type = extracted_weapon_types[item_name]
                    weapon_type_dict[item.id] = {
                        "name": item.name,
                        "weapon_type": weapon_type
                    }
                except KeyError:
                    weapon_type_dict[item.id] = {
                        "name": item.name,
                        "weapon_type": None
                    }

    weapon_types_file = Path(config.DATA_PATH / "weapon-types.json")
    with open(weapon_types_file, mode='w') as f:
        json.dump(weapon_type_dict, f, indent=4)
Ejemplo n.º 16
0
def main(granularity=30):
    URLS = [[
        f'https://rsbuddy.com/exchange/graphs/{granularity}/{item.id}.json',
        item.name, item.id
    ] for item in items_api.load() if item.tradeable_on_ge]
    with concurrent.futures.ProcessPoolExecutor() as executor:
        future_to_url = {
            executor.submit(make_web_call, url[0], url[1], url[2]): url
            for url in URLS
        }
        for future in concurrent.futures.as_completed(future_to_url):
            url = future_to_url[future]
            try:
                data = future.result()
                # Output,item.name, item.id
                get_rsbuddy_price(data, url[1], url[2])

            except Exception as exc:
                print(f'URL: {url}, generated an exception: {exc}')
Ejemplo n.º 17
0
def get_trade_limit(item_to_lookup):
    """
	Looks items up by name

	Params:
		item_to_lookup <str>
			Name of the item you want to lookup, case insensitive
	Return:
		trade_limit <int> or False
			Returns False if the item cannot be found in the database
	"""
    trade_limit = False

    all_db_items = items_api.load()
    for item in all_db_items:
        if item_to_lookup.lower() == item.name.lower():
            trade_limit = item.buy_limit
            break

    return trade_limit
Ejemplo n.º 18
0
def main():
    # Start processing all items in database
    all_db_items = items_api.load()

    # Load current file
    iar_file = Path(config.DATA_ITEMS_PATH / "ammo-requirements.json")
    with open(iar_file) as f:
        known_ammo = json.load(f)

    done = list()
    for i in known_ammo:
        done.append(i)

    for item in all_db_items:
        if item.id in BAD_IDS:
            # Item ID is not really ammo, skip to next
            continue
        if str(item.id) in done:
            # Item is already processed...
            if not known_ammo[str(item.id)]:
                # If item has been processed before, but has a null value, print it...
                print(f"{item.name}")
                generic_output = (f'    "{item.id}": {{\n'
                                  f'        "ammo_type": unknown,\n'
                                  f'        "ammo_tier": unknown\n'
                                  f'    }}\n')
                print(generic_output)

            # Item ID is already done, skip to next
            continue

        if item.equipable_by_player and item.equipment.slot == "ammo":
            # If item is equipable, an ammo slot item, process it
            print(f"{item.name}")
            generic_output = (f'    "{item.id}": {{\n'
                              f'        "ammo_type": unknown,\n'
                              f'        "ammo_tier": unknown\n'
                              f'    }},\n')
            print(generic_output)
Ejemplo n.º 19
0
def generate_item_slot_files():
    """Generate the `docs/items-slot/` JSON files."""
    # Read in the item database content
    all_db_items = items_api.load()

    items = collections.defaultdict(list)

    # Fetch every equipable item with an item slot value
    for item in all_db_items:
        if item.equipable_by_player:
            items[item.equipment.slot].append(item)

    # Process each item found, and add to an individual file for each equipment slot
    for slot in items:
        json_out = {}
        for item in items[slot]:
            json_out_temp = item.construct_json()
            json_out[item.id] = json_out_temp
        out_fi = Path(config.DOCS_PATH / "items-json-slot" /
                      f"items-{slot}.json")
        with open(out_fi, "w") as f:
            json.dump(json_out, f)
Ejemplo n.º 20
0
def main():
    # Start processing all items in database
    all_db_items = items_api.load()

    # Load current file
    isr_file = Path(config.DATA_ITEMS_PATH / "skill-requirements.json")
    with open(isr_file) as f:
        known_items = json.load(f)

    done = list()
    for i in known_items:
        done.append(i)

    for item in all_db_items:
        if str(item.id) in done:
            # Item ID is already done, skip to next
            continue

        if item.equipable_by_player:
            # If item is equipable and not processed... process it!
            # Try to find the name in the existing file
            found = False
            for known_item_id in known_items:
                item_object = all_db_items[int(known_item_id)]
                if item_object.name == item.name:
                    # If we find a name match, fetch the requirements and break
                    requirements = known_items[str(item_object.id)]
                    if not requirements:
                        requirements = "null"
                    found = True
                    break

            # Print out JSON formatted data
            if found:
                print(f'    "{item.id}": \n        {requirements}\n    ,')
            else:
                print(item.name)
                print(f'    "{item.id}": {{\n        "skill": level\n    }},')
Ejemplo n.º 21
0
async def main():
    items = items_api.load()
    async with aiohttp.ClientSession() as session:
        await asyncio.gather(*[fetch(session, item) for item in items])
Ejemplo n.º 22
0
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program.  If not, see <http://www.gnu.org/licenses/>.
###############################################################################
"""

from osrsbox import items_api


if __name__ == "__main__":
    # Load all items
    all_db_items = items_api.load()

    # Loop through all items in the database and print the item name for each item
    for item in all_db_items:
        if "Scythe of vitur" in item.name:
            print(f'''
    "{item.id}": {{
        "id": {item.id},
        "name": "{item.name}",
        "status": "unequipable",
        "normalized_name": null
    }},''')
Ejemplo n.º 23
0
    async def fetch(self):
        """ Fetch the data from the GrandExchange """
        # Event loop
        loop = asyncio.get_event_loop()
        # Find item ID
        url = ge_query_url(self.query)
        match_req = loop.run_in_executor(None, requests.get, url)
        match_response = await match_req
        match_data = match_response.json()
        for item in match_data['items']:
            if self.query.lower() in item['name'].lower():
                self.matches.append(item)
        if len(self.matches) == 1:
            self.item_id = str(self.matches[0]['id'])
        elif len(self.matches) > 0 and self.matches[0]['name'].lower() == self.query.lower():
            self.item_id = str(self.matches[0]['id'])
        elif len(self.matches) == 0:
            # Search JSON file
            file = open('assets/item_ids.json')
            id_list = json.load(file)
            for i in id_list:
                if self.query.lower() in i['name'].lower():
                    self.item_id = str(i['id'])
                    break
            file.close()
        else:
            self.multiple_results = True
            return

        # Price info
        req = loop.run_in_executor(None, requests.get, ge_api_item_url + self.item_id)
        # Graph data
        graph_req = loop.run_in_executor(None, requests.get, f'{ge_graph_url}{self.item_id}.json')
        # Responses
        response = await req
        graph_response = await graph_req
        if response.status_code == 404:
            raise NoResults(f'No results for {self.query} found')
        data = response.json()
        self.graph_data = graph_response.json()

        # Assign variables
        self.icon = data['item']['icon_large']
        self.id = data['item']['id']
        self.name = data['item']['name']
        self.description = data['item']['description']
        self.is_members = (data['item']['members'] == 'true')

        # Current price
        self.current_price = data['item']['current']['price']
        self.current_price_trend = data['item']['current']['trend']

        # Prices over time
        self.todays_price_trend = data['item']['today']['trend']
        self.todays_price_change = data['item']['today']['price']
        self.day30_trend = data['item']['day30']['trend']
        self.day30_change = data['item']['day30']['change']
        self.day90_trend = data['item']['day90']['trend']
        self.day90_change = data['item']['day90']['change']
        self.day180_trend = data['item']['day180']['trend']
        self.day180_change = data['item']['day180']['change']

        # OSRSBox details
        all_db_items = items_api.load()
        for item in all_db_items:
            if str(item.id) == self.item_id:
                if item.buy_limit:
                    self.buy_limit = f'{item.buy_limit:,}'
                if item.highalch:
                    self.high_alch = f'{item.highalch:,}'
Ejemplo n.º 24
0
from osrsbox import items_api
import json

items = items_api.load()
store_data = {}
store_data['items'] = []

for item in items:
    if item.tradeable_on_ge and not item.duplicate:
        store_data['items'].append({'id': item.id, 'name': item.name})
        print(f'{item.id},{item.name}')

with open('tradeable_items.json', 'w', encoding='utf-8') as f:
    json.dump(store_data, f, ensure_ascii=False, indent=4)
Ejemplo n.º 25
0
    def __init__(self, query):
        # Check if query was entered
        self.query = query
        if query == '':
            raise MissingQuery("You must enter a search term")

        # This boolean is flipped if there is a lot of results from query
        self.multiple_results = False

        # Search using API query
        self.matches = self.get_matches()
        if len(self.matches) == 1:
            item_id = str(self.matches[0]['id'])
        elif len(self.matches) > 0 and self.matches[0]['name'].lower(
        ) == self.query.lower():
            item_id = str(self.matches[0]['id'])
        elif len(self.matches) == 0:
            # Search JSON file
            file = open('assets/item_ids.json')
            id_list = json.load(file)
            item_id = ''
            for i in id_list:
                if query.lower() in i['name'].lower():
                    item_id = str(i['id'])
                    break
            file.close()
        else:
            self.multiple_results = True
            return

        # Request data
        # Price info
        self.item = str(item_id)
        session = requests.session()
        req = session.get(ge_api_item_url + item_id)
        if req.status_code == 404:
            raise NoResults(f'No results for {query} found')
        data = req.json()
        # Graph data
        graph_req = session.get(f'{ge_graph_url}{item_id}.json')
        self.graph_data = graph_req.json()

        # Assign variables
        self.icon = data['item']['icon_large']
        self.id = data['item']['id']
        self.name = data['item']['name']
        self.description = data['item']['description']
        self.is_members = (data['item']['members'] == 'true')

        # Current price
        self.current_price = data['item']['current']['price']
        self.current_price_trend = data['item']['current']['trend']

        # Prices over time
        self.todays_price_trend = data['item']['today']['trend']
        self.todays_price_change = data['item']['today']['price']
        self.day30_trend = data['item']['day30']['trend']
        self.day30_change = data['item']['day30']['change']
        self.day90_trend = data['item']['day90']['trend']
        self.day90_change = data['item']['day90']['change']
        self.day180_trend = data['item']['day180']['trend']
        self.day180_change = data['item']['day180']['change']

        # Details from osrsbox
        all_db_items = items_api.load()
        for item in all_db_items:
            if str(item.id) == item_id:
                self.buy_limit = f'{item.buy_limit:,}'
                self.high_alch = f'{item.highalch:,}'
Ejemplo n.º 26
0
def show_all_item_trade_limits():
    all_db_items = items_api.load()
    for item in all_db_items:
        print(item.name + " : " + str(item.buy_limit))
Ejemplo n.º 27
0
 def __init__(self):
     self.all_db_items = items_api.load()
     self.exchange = GrandExchange.Exchange()
Ejemplo n.º 28
0
# Load the monster wikitext file of processed data
with open(
        Path(config.DATA_MONSTERS_PATH /
             "monsters-wiki-page-text-processed.json")) as f:
    all_wikitext_processed = json.load(f)

# Load the raw cache data that has been processed (this is ground truth)
with open(Path(config.DATA_MONSTERS_PATH / "monsters-cache-data.json")) as f:
    all_monster_cache_data = json.load(f)

# Data structure for any monster with multiple drop tables
# Format: id: query_string
multi_drop_tables = dict()

ITEMS = [
    item for item in items_api.load()
    if not item.duplicate and not item.stacked
]


def fetch():
    """Fetch monster drops using SMW queries.

    This is a request heavy method - querying about 1,000 endpoints
    to get monster drop data.
    """
    for monster_id, monster_list in all_wikitext_processed.items():
        if "drops (level" in monster_list[2].lower():
            name = all_monster_cache_data[monster_id]["name"]
            combat_level = all_monster_cache_data[monster_id]["combatLevel"]
            multi_drop_tables[
Ejemplo n.º 29
0
def id_to_name():
    idToName = {}

    for item in items_api.load():
        id_to_name[item.id] = item.name
    return idToName
Ejemplo n.º 30
0
import datetime
import io

from requests.compat import urljoin
from requests.exceptions import Timeout
import matplotlib.pyplot as plt
import matplotlib.dates as mdate
from osrsbox import items_api

import osrs_discord_bot.formatter_discord as f
from osrs_discord_bot.constants import (SKILL_NAMES, WIKI_BASE_URL,
                                        WISE_BASE_URL, EXCHANGE_BASE_URL,
                                        HISCORE_BASE_URL, PRICES_WIKI_URL)
from osrs_discord_bot.settings import DISCORD_CONTACT

ALL_DB_ITEMS = items_api.load()


def get_response(base_url, path=None, params=None, headers=None, timeout=5):
    """Sends a get request to the URL provided.

    The request timesout if the reponse takes longer than 5 seconds.
    
    Args:
        base_url (str): Base URL.
        path (str): Path from base URL.
        params (dict): key:value of param:value, defaults to None.
        headers (dict): header:value, defaults to None.
        timeout (int): Seconds until request is timed out, defaults to 5.
    
    """