コード例 #1
0
ファイル: access.py プロジェクト: JackHarley/Abyssalearn
def item_matrix(type_id):
    db_conn = database.get_db_conn()
    cur = db_conn.cursor()
    cur.row_factory = sqlite3.Row
    cur.execute(
        '''
        SELECT c.price, ao.dogma_attributes FROM abyssal_observations ao
        LEFT JOIN contracts c ON c.contract_id=ao.contract_id
        WHERE ao.type_id=?''', (type_id, ))

    matrix = []

    while True:
        item = cur.fetchone()

        if item == None:
            break

        attribs = json.loads(item["dogma_attributes"])

        attribs_dict = {}
        for attrib in attribs:
            if attrib["attribute_id"] not in blacklisted_attribute_ids:
                attribs_dict[attrib["attribute_id"]] = attrib["value"]

        attribs_values = list(attribs_dict.values())

        vector = [item["price"]] + attribs_values
        matrix.append(vector)

    return matrix
コード例 #2
0
def mark_contract_scraped(data):
    db_conn = database.get_db_conn()
    cur = db_conn.cursor()
    cur.execute("INSERT INTO contracts (contract_id, date_issued, date_expired, price) VALUES (?,?,?,?)", (
        data["contract_id"],
        parse_eve_date(data["date_issued"]),
        parse_eve_date(data["date_expired"]),
        data["price"]
    ))
    db_conn.commit()
コード例 #3
0
def scrape_types(type_ids):
    db_conn = database.get_db_conn()
    c = db_conn.cursor()
    c.row_factory = lambda cur, row: row[0]
    c.execute("SELECT type_id FROM types")
    already_scraped = c.fetchall()

    for type_id in type_ids:
        if type_id not in already_scraped:
            scrape_type(type_id)
コード例 #4
0
def scrape_incomplete_abyssal_items():
    db_conn = database.get_db_conn()
    cur = db_conn.cursor()
    cur.execute("SELECT type_id, item_id FROM abyssal_observations WHERE source_type_id ISNULL")

    while True:
        row = cur.fetchone()
        if row == None:
            break
        print("Scraping dynamic data for abyssal item #%d" % row[1])
        scrape_abyssal_item(row[0], row[1])
コード例 #5
0
def scrape_type(type_id):
    r = requests.get("https://esi.evetech.net/v3/universe/types/%d" % type_id)
    try:
        d = r.json()
    except:
        print("Failed to scrape type id #%d", type_id)
        return False

    db_conn = database.get_db_conn()
    c = db_conn.cursor()
    c.execute("INSERT OR IGNORE INTO types VALUES (?,?)", (d["type_id"], d["name"]))
    db_conn.commit()
    return True
コード例 #6
0
def scrape_dogma_attribute(attribute_id):
    r = requests.get("https://esi.evetech.net/latest/dogma/attributes/%d" % attribute_id)
    d = r.json()

    db_conn = database.get_db_conn()
    c = db_conn.cursor()
    c.execute("INSERT INTO dogma_attributes VALUES (?,?,?,?,?,?)", (
        d["attribute_id"],
        d["name"]          if "name" in d else "",
        d["display_name"]  if "display_name" in d else "",
        d["description"]   if "description" in d else "",
        d["high_is_good"]  if "high_is_good" in d else 0,
        d["default_value"] if "default_value" in d else "NULL"
    ))
    db_conn.commit()
コード例 #7
0
def scrape_dogma_attributes():
    r = requests.get("https://esi.evetech.net/latest/dogma/attributes")
    attribute_ids = r.json()

    db_conn = database.get_db_conn()
    c = db_conn.cursor()
    c.row_factory = lambda cur, row: row[0]
    c.execute("SELECT attribute_id FROM dogma_attributes")
    already_scraped = c.fetchall()

    count = 1
    for attribute_id in attribute_ids:
        if attribute_id not in already_scraped:
            print("Scraping attribute id #%d (%d/%d)" % (attribute_id, count, len(attribute_ids)))
            scrape_dogma_attribute(attribute_id)
        count += 1
コード例 #8
0
ファイル: access.py プロジェクト: JackHarley/Abyssalearn
def random_item(type_id):
    db_conn = database.get_db_conn()
    cur = db_conn.cursor()
    cur.row_factory = sqlite3.Row
    cur.execute(
        '''
        SELECT ao.*, c.price, t.name FROM abyssal_observations ao
        LEFT JOIN contracts c ON c.contract_id=ao.contract_id
        LEFT JOIN types t ON t.type_id=ao.type_id
        WHERE ao.type_id=?
        ORDER BY RANDOM()
        LIMIT 1''', (type_id, ))

    item = cur.fetchone()

    summary = ""
    summary += "Type: " + item["name"] + "\n"
    summary += f"List Price: {item['price']:,} ISK\n"
    summary += "Attributes:\n"

    item_attribs = json.loads(item["dogma_attributes"])
    attribute_ids = []

    for attrib in item_attribs:
        attribute_ids.append(attrib["attribute_id"])

    q = f"SELECT * FROM dogma_attributes WHERE attribute_id in ({','.join(['?']*len(attribute_ids))}) ORDER BY name"
    cur.execute(q, attribute_ids)
    attribs = cur.fetchall()
    attrib_ids_to_display_names = {}
    for attrib in attribs:
        if attrib["display_name"] != "":
            attrib_ids_to_display_names[
                attrib["attribute_id"]] = attrib["display_name"]
        else:
            attrib_ids_to_display_names[
                attrib["attribute_id"]] = attrib["name"]

    for attrib in item_attribs:
        if attrib["attribute_id"] not in blacklisted_attribute_ids:
            summary += (attrib_ids_to_display_names[attrib["attribute_id"]].
                        ljust(30)) + ("(#" + str(attrib["attribute_id"]) + "):"
                                      ).ljust(10) + "%f\n" % (attrib["value"])

    return summary
コード例 #9
0
ファイル: access.py プロジェクト: JackHarley/Abyssalearn
def generate_summary():
    summary = ""

    db_conn = database.get_db_conn()
    cur = db_conn.cursor()

    q = f"SELECT * FROM types WHERE type_id in ({','.join(['?']*len(scrape.abyssal_type_ids))}) ORDER BY name"
    cur.execute(q, scrape.abyssal_type_ids)
    types = cur.fetchall()

    for t in types:
        cur.execute(
            "SELECT COUNT(*) FROM abyssal_observations WHERE type_id = ?",
            (t[0], ))
        count = cur.fetchall()
        summary += "%s - %s (#%d)\n" % (str(count[0][0]).rjust(6), t[1], t[0])

    return summary
コード例 #10
0
def scrape_abyssal_item(type_id, item_id):
    r = requests.get("https://esi.evetech.net/v1/dogma/dynamic/items/%d/%d" % (type_id, item_id))
    item = r.json()

    db_conn = database.get_db_conn()
    cur = db_conn.cursor()

    cur.execute('''
        UPDATE abyssal_observations 
        SET (dogma_attributes, dogma_effects, source_type_id, mutator_type_id) = (?,?,?,?)
        WHERE item_id = ?
        ''', (
            json.dumps(item["dogma_attributes"], separators=(',', ':')),
            json.dumps(item["dogma_effects"], separators=(',', ':')),
            item["source_type_id"],
            item["mutator_type_id"],
            item_id
        ))
    db_conn.commit()
コード例 #11
0
import bcrypt
import database
import pymongo
import datetime
import json
from bson.json_util import dumps
import base64
import sys
import uuid

# Global variables
db_con = database.get_db_conn()


def prepare():
    database.prepare_db()


def check_login_success(username, password):
    users = db_con.users
    login_user = users.find_one({'username': username})
    print('Trying loggin', username)
    if login_user:
        stored_password = login_user['password']
        # Comparing stored password and the users hashed password
        if bcrypt.hashpw(password.encode('utf8'),
                         stored_password) == stored_password:
            print('Login Success')
            return True
        else:
            print('Login Failed bad password')
コード例 #12
0
def scrape_public_contracts(region_id):
    r = requests.get("https://esi.evetech.net/v1/contracts/public/%d" % region_id)
    pages = int(r.headers["X-Pages"])
    
    db_conn = database.get_db_conn()
    cur = db_conn.cursor()
    cur.row_factory = lambda cur, row: row[0]
    cur.execute("SELECT contract_id FROM contracts")
    already_scraped = cur.fetchall()

    for page in range(1, pages+1):
        print("Pulling contracts page %d/%d" % (page, pages))

        r = requests.get("https://esi.evetech.net/v1/contracts/public/%d?page=%d" % (region_id, page))
        contracts = r.json()

        for contract in contracts:
            # skip contracts we already scraped
            if int(contract["contract_id"]) in already_scraped:
                continue

            print("Scraping contract #%d (page %d/%d)" % (int(contract["contract_id"]), page, pages))

            # only interested in item exchange contracts
            if contract["type"] != "item_exchange":
                mark_contract_scraped(contract)
                continue
            
            # scrape items
            r = requests.get("https://esi.evetech.net/v1/contracts/public/items/%d/" % int(contract["contract_id"]))
            if r.status_code != 200:
                continue

            try:
                items = r.json()
            except:
                print("Error on contract #%d (page %d/%d)" % (int(contract["contract_id"]), page, pages))
                continue

            # only interested in contracts for singleton item
            if len(items) != 1:
                mark_contract_scraped(contract)
                continue

            item = items[0]

            # only interested in 1x quantity of item
            if int(item["quantity"]) != 1:
                mark_contract_scraped(contract)
                continue

            # only interested in abyssal items
            if not is_abyssal(int(item["type_id"])):
                mark_contract_scraped(contract)
                continue

            print("Abyssal item found, item id #%d" % item["item_id"])
            cur.execute("INSERT INTO abyssal_observations (item_id, type_id, contract_id) VALUES (?,?,?)", (
                item["item_id"],
                item["type_id"],
                contract["contract_id"]
            ))

            mark_contract_scraped(contract)
            db_conn.commit()
コード例 #13
0
class event_proc(threading.Thread):

    mgdDB = database.get_db_conn(config.mainDB)
    dispatchDB = database.get_db_conn(config.pytavia_dispatchDB)

    g_collection = None
    g_handler = None
    g_handler_name = None
    g_query = None
    g_event_loop = True

    def __init__(self, handler_name, collection, handler, query):
        threading.Thread.__init__(self)

        self.g_handler_name = handler_name
        self.g_collection = collection
        self.g_handler = handler
        self.g_query = query

    # end def

    def get_resume_token(self, params):
        handler_name = params["handler_name"]
        collection_name = params["collection_name"]
        resume_history_rec = self.dispatchDB.db_sys_resume_history.find_one({
            "collection":
            collection_name,
            "handler_name":
            handler_name
        })
        if resume_history_rec != None:
            resume_token = resume_history_rec["resume_token"]
            return resume_token
        # end if
        return None

    # end def

    def save_resume_token(self, params):
        event = params["event"]
        handler_name = params["handler_name"]

        resume_token = event["_id"]
        operation_type = event["operationType"]
        database_name = event["ns"]["db"]
        collection_name = event["ns"]["coll"]
        document_key = event["documentKey"]["_id"]
        mgd_timestamp = event["clusterTime"]

        now_time = int(time.time() * 1000)

        resume_history_rec = self.dispatchDB.db_sys_resume_history.find_one({
            "collection":
            collection_name,
            "handler_name":
            handler_name
        })
        if resume_history_rec != None:
            self.dispatchDB.db_sys_resume_history.update(
                {
                    "collection": collection_name,
                    "handler_name": handler_name
                }, {
                    "$set": {
                        "resume_token": resume_token,
                        "operation_type": operation_type,
                        "document_key": document_key,
                        "cluster_time": mgd_timestamp,
                        "rec_timestamp": now_time
                    }
                })
        else:
            sys_resume_history = database.new(self.dispatchDB,
                                              "db_sys_resume_history")
            sys_resume_history.put("resume_token", resume_token)
            sys_resume_history.put("operation_type", operation_type)
            sys_resume_history.put("document_key", document_key)
            sys_resume_history.put("cluster_time", mgd_timestamp)
            sys_resume_history.put("handler_name", handler_name)
            sys_resume_history.put("collection", collection_name)
            sys_resume_history.insert()
        # end if

    # end def

    def shutdown(self, params):
        self.g_event_loop = params["event_loop_status"]

    # end def

    def extract_event(self, event):
        operation_type = event["operationType"]
        if operation_type == "delete":
            clusterTime = event["clusterTime"]
            collection = event["ns"]["coll"]
            db = event["ns"]["db"]
            object_id = event["documentKey"]["_id"]
            handler_name = event["handler_name"]
            m_event = msg_event.msg_event(
                object_id, operation_type.upper(), handler_name, collection,
                db, clusterTime,
                operation_type.upper() + "_" + collection.upper(), {})
            return m_event
        elif operation_type == "insert":
            clusterTime = event["clusterTime"]
            collection = event["ns"]["coll"]
            db = event["ns"]["db"]
            object_id = event["documentKey"]["_id"]
            handler_name = event["handler_name"]
            event["fullDocument"]["_id"] = str(event["fullDocument"]["_id"])
            full_document = event["fullDocument"]
            m_event = msg_event.msg_event(
                object_id, operation_type.upper(), handler_name, collection,
                db, clusterTime,
                operation_type.upper() + "_" + collection.upper(),
                full_document)
            return m_event
        else:
            # this is for update
            clusterTime = event["clusterTime"]
            collection = event["ns"]["coll"]
            db = event["ns"]["db"]
            object_id = event["documentKey"]["_id"]
            handler_name = event["handler_name"]
            changed_field = event["updateDescription"]
            m_event = msg_event.msg_event(
                object_id, operation_type.upper(), handler_name, collection,
                db, clusterTime,
                operation_type.upper() + "_" + collection.upper(),
                changed_field)
            return m_event
        # end if

    # end if

    def run(self):
        try:
            print("handler_listener - register() handler_name: " +
                  str(self.g_handler_name))
            while self.g_event_loop:

                resume_token = self.get_resume_token({
                    "handler_name":
                    self.g_handler_name,
                    "collection_name":
                    self.g_collection
                })
                msg_event = self.mgdDB[self.g_collection].watch(
                    resume_after=resume_token, pipeline=self.g_query)
                doc_event = next(msg_event)
                doc_event["handler_name"] = self.g_handler_name
                self.save_resume_token({
                    "event": doc_event,
                    "handler_name": self.g_handler_name
                })
                event_msg = self.extract_event(doc_event)
                self.g_handler.event_switch(event_msg)
            # end while
        except:
            print(traceback.format_exc())