Ejemplo n.º 1
0
def process_pool_log():
    global LOGGER
    global CONFIG
    # Get a handle to the DB API
    db = db_api.db_api()

    POOL_LOG = CONFIG["stratum"]["log_dir"] + "/" + CONFIG["stratum"]["log_filename"]

    # (re)Process all logs
    logfiles = glob.glob(POOL_LOG + '*')
    LOGGER.warn("Processing existing logs: {}".format(logfiles))
    sys.stdout.flush()
    for logfile in logfiles:
        with open(logfile) as f:
            for line in f:
                try:
                    process_pool_logmessage(line, db)
                except:
                    LOGGER.error("Failed to process log message: ",format(sys.exc_info()[0]))
        f.close()

    # Read future log messages
    LOGGER.warn("Processing new logs: {}".format(POOL_LOG))
    sys.stdout.flush()
    poollog = subprocess.Popen(
        ['tail', '-F', POOL_LOG],
        stdout=subprocess.PIPE,
        stderr=subprocess.PIPE)
    while True:
        line = poollog.stdout.readline().decode('utf-8')
        try:
            process_pool_logmessage(line, db)
        except:
            LOGGER.warn("Failed to process log message: ".format(sys.exc_info()[0]))
Ejemplo n.º 2
0
    def run(self, ijson):
        filter_ids = ijson.get('row_ids', [])
        import db_api
        db_obj = db_api.db_api(self.storage_ip + "##" + self.cmp_table)
        client_id = 1
        cmp_info = db_obj.read_company_ids(client_id)
        cmp_id_str = []
        for each in cmp_info:
            row_id, company_id = each
            cmp_id_str.append(str(int(company_id)))
        cmp_id_str1 = ','.join(cmp_id_str)
        all_cmp_info = db_obj.read_meta_info(cmp_id_str1)
        for each_cmp in all_cmp_info:
            row_id, company_name, company_display_name, meta_data = each_cmp
            if row_id not in filter_ids: continue
            meta_data = eval(meta_data)
            row_id = int(row_id)
            ijson_1 = {
                "user_id": "sunil",
                "ProjectName": company_display_name,
                "oper_flag": 622,
                "ProjectID": row_id
            }
            #{"oper_flag":97030,"excel_name":"/var/www/html/WorkSpaceBuilder_DB/1406/1/upload/1406.xlsx","stage_lst":"1~6~7","project_id":1406,"db_name":"DataBuilder_1406","ws_id":1,"meta_data":{},"doc_type":"HTML2PDF","user_id":"demo_user1"}

            #ijson_2 = {"user_id":"sunil", "ProjectID":row_id, "WSName":company_display_name ,"db_name":"DataBuilder_%s"%(row_id),"oper_flag":90014}
            #print ijson_1
            #print ijson_2
            #res1 = self.execute_url(ijson_1)
            #res2 = self.execute_url(ijson_2)
        db_obj.con.close()
        return 'done'
Ejemplo n.º 3
0
def main():
    db = db_api.db_api()
    logger = lib.get_logger(PROCESS)
    logger.warn("=== Starting {}".format(PROCESS))

    new_poolshares = db.get_unvalidated_poolshares()
    for pool_share in new_poolshares:
        invalid_reason = "NULL"
        ok = True
        (ps_height, ps_nonce, ps_worker_difficulty, ps_timestamp, ps_found_by,
         ps_validated, ps_is_valid, ps_invalid_reason) = pool_share
        grin_share = db.get_grin_share_by_nonce(ps_nonce)
        if grin_share == None:
            ok = False
            invalid_reason = "no grin_share"
            # continue # Check again later
        else:
            (gs_hash, gs_height, gs_nonce, gs_actual_difficulty,
             gs_net_difficulty, gs_timestamp, gs_found_by,
             gs_is_solution) = grin_share
            if ps_nonce != gs_nonce:
                ok = False
                invalid_reason = "nonce mismatch"
            if ps_worker_difficulty > gs_actual_difficulty:
                ok = False
                invalid_reason = "low difficulty"
        # Update record
        logger.warn("Share {}, {} is {} because {}".format(
            ps_height, ps_nonce, ok, invalid_reason))
        db.set_poolshare_validation(ok, invalid_reason, ps_nonce)

    db.set_last_run(PROCESS, str(time.time()))
    db.close()
    logger.warn("=== Completed {}".format(PROCESS))
Ejemplo n.º 4
0
def main():
    db = db_api.db_api()
    config = lib.get_config()
    logger = lib.get_logger(PROCESS)
    logger.warn("=== Starting {}".format(PROCESS))

    grin_api_url = "http://" + config["grin_node"]["address"] + ":" + config[
        "grin_node"]["api_port"]
    status_url = grin_api_url + "/v1/status"
    blocks_url = grin_api_url + "/v1/blocks/"
    validation_depth = int(config[PROCESS]["validation_depth"])

    response = requests.get(status_url)
    latest = int(response.json()["tip"]["height"])
    last = latest - validation_depth  # start a reasonable distance back
    logger.warn("Starting from block #{}".format(last))
    #    last = 0
    for i in range(last, latest):
        url = blocks_url + str(i)
        response = requests.get(url).json()
        # print("{}: {}".format(response["header"]["height"], response["header"]["hash"]))
        data_block = (response["header"]["hash"],
                      response["header"]["version"],
                      response["header"]["height"],
                      response["header"]["previous"],
                      response["header"]["timestamp"][:-1],
                      response["header"]["output_root"],
                      response["header"]["range_proof_root"],
                      response["header"]["kernel_root"],
                      response["header"]["nonce"],
                      response["header"]["total_difficulty"],
                      response["header"]["total_kernel_offset"])

        try:
            rec = db.get_blocks_by_height([i])
            if len(rec) > 0:
                r = rec[0]
                #print("Got block {} at height {}".format(r[0], r[2]))
                if r[0] != response["header"]["hash"]:
                    logger.warn(
                        "Found an orphan - height: {}, hash: {} vs {}".format(
                            r[2], r[0], response["header"]["hash"]))
                    db.set_block_state("orphan", int(i))
            else:
                logger.warn("Adding missing block - height: {}".format(
                    response["header"]["height"]))
                # XXX TODO:  Probably want to mark it as "missing" so we know it was filled in after the fact?
                db.add_blocks([data_block], True)
        except:
            # XXX TODO: Something
            pass
        sys.stdout.flush()
    db.set_last_run(PROCESS, str(time.time()))
    db.close()
Ejemplo n.º 5
0
def main():
    db = db_api.db_api()
    logger = lib.get_logger(PROCESS)
    logger.warn("=== Starting {}".format(PROCESS))

    latest_block = 0

    # XXX All in one db transaction....
    # Get unlocked blocks from the db
    unlocked_blocks = db.get_poolblocks_by_state("unlocked")
    for pb in unlocked_blocks:
        logger.warn("Processing unlocked block: {}".format(pb))
        # XXX TODO: If there are no shares for this block dont process it
        (pb_hash, pb_height, pb_nonce, pb_actual_difficulty, pb_net_difficulty,
         pb_timestamp, pb_found_by, pb_state) = pb
        if pb_height > latest_block:
            latest_block = pb_height
    # Get valid pool_shares for that block from the db
        pool_shares = db.get_valid_poolshares_by_height(pb_height)
        # Calculate Payment info:
        worker_shares = {}
        for ps in pool_shares:
            logger.warn("Processing pool_shares: {}".format(ps))
            (ps_height, ps_nonce, ps_difficulty, ps_timestamp, ps_found_by,
             ps_validated, ps_is_valid, ps_invalid_reason) = ps
            gs = db.get_grin_share_by_nonce(ps_nonce)
            if gs == None:
                # XXX NOTE: no payout for shares not accepted by grin node
                continue
            (gs_hash, gs_height, gs_nonce, gs_actual_difficulty,
             gs_net_difficulty, gs_timestamp, gs_found_by, gs_state) = gs
            if ps_found_by in worker_shares:
                worker_shares[ps_found_by] += gs_actual_difficulty
            else:
                worker_shares[ps_found_by] = gs_actual_difficulty
        if len(worker_shares) > 0:
            # Calcualte reward/difficulty: XXX TODO: Enhance
            #  What algorithm to use?  Maybe: https://slushpool.com/help/manual/rewards
            r_per_d = REWARD / sum(worker_shares.values())
            for worker in worker_shares.keys():
                # Calculate reward per share
                worker_rewards = worker_shares[worker] * r_per_d
                # Add or create worker rewards
                # XXX TODO: Batch these
                db.create_or_add_utxo(worker, worker_rewards)
                logger.warn("Credit to user: {} = {}".format(
                    worker, worker_rewards))
        # Mark the pool_block state="paid" (maybe "processed" would be more accurate?)
        db.set_poolblock_state("paid", int(pb_height))
    db.set_last_run(PROCESS, str(time.time()))
    db.close()
    logger.warn("=== Completed {}".format(PROCESS))
    sys.stdout.flush()
Ejemplo n.º 6
0
def main():
    db = db_api.db_api()
    config = lib.get_config()
    logger = lib.get_logger(PROCESS)
    logger.warn("=== Starting {}".format(PROCESS))

    # Get the list of pool_blocks that are
    # old enough to unlock and
    # are not orphan blocks

    logger.debug(config.sections())

    # XXX TODO: The node may not be synced, may need to wait?

    grin_api_url = "http://" + config["grin_node"]["address"] + ":" + config[
        "grin_node"]["api_port"]
    status_url = grin_api_url + "/v1/status"
    blocks_url = grin_api_url + "/v1/blocks/"
    block_locktime = int(config[PROCESS]["block_locktime"])
    block_expiretime = int(config[PROCESS]["block_expiretime"])

    response = requests.get(status_url)
    latest = int(response.json()["tip"]["height"])
    logger.debug("Latest: {}", format(latest))

    new_poolblocks = db.get_poolblocks_by_state('new')
    for (pb_hash, pb_height, pb_nonce, pb_actual_difficulty, pb_net_difficulty,
         pb_timestamp, pb_found_by, pb_state) in new_poolblocks:
        if pb_height < latest - block_expiretime:
            # Dont re-process very old blocks - protection against duplicate payouts.
            logger.debug(
                "Processed expired pool block at height: {}".format(pb_height))
            db.set_poolblock_state("expired", int(pb_height))
            continue
        response = requests.get(blocks_url + str(pb_height)).json()
        # print("Response: {}".format(response))
        if int(response["header"]["nonce"]) != int(pb_nonce):
            logger.debug(
                "Processed orphan pool block at height: {}".format(pb_height))
            db.set_poolblock_state("orphan", int(pb_height))
        else:
            if pb_height < (latest - block_locktime):
                logger.debug(
                    "Unlocking pool block at height: {}".format(pb_height))
                db.set_poolblock_state("unlocked", int(pb_height))
        sys.stdout.flush()

    db.set_last_run(PROCESS, str(time.time()))
    db.close()
    logger.warn("=== Completed {}".format(PROCESS))
    sys.stdout.flush()
Ejemplo n.º 7
0
 def get_doc_res(self, row_id, db_obj=''):
     if not db_obj:
         import db_api
         db_obj = db_api.db_api(self.storage_ip + "##" + self.cmp_table)
     docs_res = db_obj.read_doc_meta_info(row_id)
     FYE_num = 12
     filing_map = {}
     for row in docs_res:
         doc_id, company_id, document_type, filing_type, period, year, document_name, document_release_date, document_from, document_to, document_download_date, previous_release_date, language, meta_data, user_name, date, time, assension_number, source, next_release_date, other_info, url_info, sec_filing_number, source_type = row
         if not document_to:
             year, ptype = '', ''
         else:
             year, ptype = compute_period_and_date_obj.get_ph_from_date(
                 document_to, FYE_num, filing_type)
         #print [doc_id, document_type, filing_type, document_release_date, year, ptype, document_to.strftime('%d-%b-%Y')]
     return []
Ejemplo n.º 8
0
def main():
    db = db_api.db_api()
    config = lib.get_config()
    logger = lib.get_logger(PROCESS)
    logger.warn("=== Starting {}".format(PROCESS))

    grin_api_url = "http://" + config["grin_node"]["address"] + ":" + config[
        "grin_node"]["api_port"]
    status_url = grin_api_url + "/v1/status"
    blocks_url = grin_api_url + "/v1/blocks/"
    check_interval = float(config[PROCESS]["check_interval"])

    last = get_current_height(status_url)
    while True:
        latest = get_current_height(status_url)
        for i in range(last + 1, latest + 1):
            last = latest
            url = blocks_url + str(i)
            response = requests.get(url).json()
            logger.warn("New Block: {} at {}".format(
                response["header"]["hash"], response["header"]["height"]))
            data_block = (response["header"]["hash"],
                          response["header"]["version"],
                          response["header"]["height"],
                          response["header"]["previous"],
                          response["header"]["timestamp"][:-1],
                          response["header"]["output_root"],
                          response["header"]["range_proof_root"],
                          response["header"]["kernel_root"],
                          response["header"]["nonce"],
                          response["header"]["total_difficulty"],
                          response["header"]["total_kernel_offset"])
            try:
                db.add_blocks([data_block])
            except:
                pass
        sys.stdout.flush()
        sleep(check_interval)
    logger.warn("=== Completed {}".format(PROCESS))
Ejemplo n.º 9
0
 def get_page_cords(self, ijson):
     pid, db_string = ijson['Project'].split('__')
     docs = ijson['docs']
     page_cords = {}
     for ddoc in docs:
         if ddoc not in page_cords:
             v_sh_path = "/var/www/cgi-bin/INC_Interface/pysrc_29_01_20/page_cords/"
             v_sh_path = v_sh_path+"/"+pid+"/"+str(ddoc)+".sh"
             if not os.path.exists(v_sh_path):
                 import get_txt_info_new
                 vobj = get_txt_info_new.text_layer(self.config_path)
                 mount_path = self.config.get('mount_path', 'value')
                 path = mount_path+pid+"/"+"1"+"/pdata/docs/"
                 try:
                     vobj.page_cords(str(ddoc),path,ijson['Project'],pid)
                 except:pass
             try:
                 d = shelve.open(v_sh_path)
                 page_cords[ddoc] = d['data']
                 d.close()
             except:pass
     import db_api 
     doc_str = ','.join(map(lambda x: str(x), docs))
     pdf_cloud_data = self.config.get('pdf_cloud_data', 'value')
     db_str = pdf_cloud_data+"#"+db_string
     db_obj = db_api.db_api(db_str)
     docs_info = db_obj.get_docs_meta_info(doc_str)
     p_type_map = {}
     for doc_info in docs_info:
         doc_id, doc_name, doc_type, meta_data = doc_info
         doc_type = doc_type
         if 'html' in doc_type.lower():
             p_type_map[doc_id] = 'html'
         else: 
             p_type_map[doc_id] = doc_type
     return page_cords, p_type_map 
Ejemplo n.º 10
0
    def run(self, ijson):
        filter_ids = ijson.get('row_ids', [])
        import db_api
        db_obj = db_api.db_api(self.storage_ip + "##" + self.cmp_table)
        client_id = None
        cmp_info = db_obj.read_company_ids(client_id)
        cmp_id_str = []
        for each in cmp_info:
            row_id, company_id = each
            print row_id, company_id
            print type(row_id), int(row_id)
            print "filter_ids::::::::", filter_ids
            if int(row_id) not in filter_ids: continue
            cmp_id_str.append(str(int(company_id)))
        cmp_id_str1 = ','.join(cmp_id_str)
        all_cmp_info = db_obj.read_meta_info(', '.join(
            map(lambda x: str(x), filter_ids)))
        for each_cmp in all_cmp_info:
            print('D::::::::::::::', each_cmp)
            row_id, company_name, company_display_name, meta_data = each_cmp
            print "1::::::::::::::::::::::::", row_id, company_name, company_display_name, meta_data
            print "filter ids:::::::::::::", filter_ids
            if row_id not in filter_ids: continue
            print "y::::::::::::::::::::::", row_id
            meta_data = eval(meta_data) if meta_data else {}
            row_id = int(row_id)
            print "G:::::::::::::", row_id
            company_display_name = company_display_name.replace('&amp;', '&')
            company_display_name = company_display_name.replace('&', ' And ')
            company_display_name = ' '.join(company_display_name.split())
            ijson_1 = {
                "user_id": "sunil",
                "ProjectName": company_display_name,
                "oper_flag": 622,
                "ProjectID": row_id
            }  #http://172.16.20.52:5010/tree_data
            ijson_2 = {
                "user_id": "sunil",
                "ProjectID": row_id,
                "WSName": company_display_name,
                "db_name": "DataBuilder_%s" % (row_id),
                "oper_flag": 90014
            }  #http://172.16.20.52:5010/tree_data
            ijson_3 = {
                "user_id": "sunil",
                "oper_flag": 97031,
                "ws_id": 1,
                "project_id": row_id
            }  #http://172.16.20.52:5010/tree_data
            self.execute_url(ijson_1)
            self.execute_url(ijson_2)
            self.execute_url(ijson_3)

    #       cmd = "python cgi_wrapper_python.py '%s'"%(json.dumps(ijson_1)) #
    #      cmd1 = "python cgi_wrapper_python.py '%s'"%(json.dumps(ijson_2))
    #      cmd3 = "python cgi_wrapper_python.py '%s'"%(json.dumps(ijson_3))
    #      print 'cmd 1', [cmd]
    #      res1 = os.system(cmd) #CALL AJAX
    #       print 'cmd 2', [cmd1]
    #       res2 = os.system(cmd1)   #CALL JAX
    #       print 'cmd 3', [cmd3]
    #       res3 = os.system(cmd3)   #CALL AJAX
        db_obj.con.close()
        return 'done'
 def __init__(self, source_video_path, match_id, db_host, db_port, db_user, db_passwd, db_name):
     self.api = db_api(db_host = db_host, db_port = db_port, db_user = db_user, db_passwd = db_passwd, db_name = db_name)
     self.match_id = match_id
     self.video_load_path = source_video_path