コード例 #1
0
def scrapePageOfBlocks(p):
    blocks = []
    url = f"https://vcash.tech/?page={p}"
    soup = getHtml(url)

    # find all the <tr/> html elements - these are table rows
    # (the first one just contains column headers so we discard it)
    rows = soup.findAll("tr")[1:]

    # extract the data from each row of the table
    for row in rows:
        tds = row.findAll("td")
        blocks.append({
            "Height": int(tds[0].a.string),
            "Hash": tds[1].a.font.string,
            "Date": getDateFromAge(tds[2].string),
            "Difficulty": float(tds[3].string.replace(",", "")),
            "Reward": float(tds[4].string),
            "Kernels": int(tds[5].string),
            "TokenKernels": int(tds[6].string)
        })

    if p == 1:
        print("\n", blocks[0])

    return blocks
コード例 #2
0
def getTopHoldersForToken(contractId):
    holders = []

    url = "https://etherscan.io/token/" + contractId
    soup = getHtml(url)
    cards = soup.findAll("div", {"class": "card-body"})

    div = cards[1].find("div", {
        "id": "ContentPlaceHolder1_trDecimals"
    }).find("div", {"class": "col-md-8"})
    decimals = int(div.text.rstrip())

    div = cards[0].find("div", {"class": "col-md-8 font-weight-medium"})
    sParam = div.span["title"].replace(" ", "").replace(",", "").split(".")

    if len(sParam) == 2:
        decimals -= len(sParam[1])

    sParam = "".join(sParam) + ("0" * decimals)

    for p in range(1, 21):
        h = scrapePageOfTokenTopHolders(contractId, p, sParam)
        if len(h) == 0:
            break
        holders += h
        time.sleep(0.05)

    return holders
コード例 #3
0
def scrapePageOfBlocks(p):
    blocks = []
    url = f"https://vechainthorscan.com/blocks?page={p}"
    soup = getHtml(url)
   
    # find all the <tr/> html elements - these are table rows
    # (the first one just contains column headers so we discard it)
    rows = soup.findAll("tr")[1:]

    # extract the data from each row of the table
    for row in rows:
        tds = row.findAll("td")
        blocks.append({
            "Height": int(tds[0].a.string),
            "Date": tds[1].span.string, 
            "Transactions": tds[2].string,
            "Clauses": tds[3].string,
            "% Gas used": float(tds[4].div.div.string[:-1]),
            "Gas limit": float(tds[5].string.replace(",","")),
            "VTHO Burned": float(tds[6].string[:-5].replace(",","")),
            "Signer": tds[7].a["href"][9:]
        })

    if p == 0:
        print("\n", blocks[0])

    return blocks
コード例 #4
0
def scrapePageOfBlocks(hi):
    blocks = []
    url = f"https://minergate.com/blockchain/btg/blocks/{hi}?json=1"
    # print(url)
    soup = getHtml(url)

    # find all the <tr/> html elements - these are table rows
    # (the first one just contains column headers so we discard it)
    rows = soup.findAll("tr")[1:]

    # extract the data from each row of the table
    for row in rows:
        tds = row.findAll("td")
        blocks.append({
            "Height": int(tds[0].a.string),
            "Date": tds[1].span.text[:19],
            "Size": int(tds[2].string.replace(",", "")),
            "Transactions": int(tds[3].string),
            "Hash": tds[4].a.string
        })

    if hi == 632606:
        print("\n", blocks[0])

    return blocks
コード例 #5
0
def scrapePageOfBlocks(hi):
    blocks = []
    url = f"https://block.d.evco.in/chain/Devcoin?hi={hi}&count=1000"
    soup = getHtml(url)
   
    # find all the <tr/> html elements - these are table rows
    # (the first one just contains column headers so we discard it)
    rows = soup.findAll("tr")[1:]

    # extract the data from each row of the table
    for row in rows:
        tds = row.findAll("td")
        blocks.append({
            "Height": int(tds[0].a.string),
            "Date": tds[1].string, 
            "Transactions": int(tds[2].string),
            "Value out": float(tds[3].string),
            "Difficulty": float(tds[4].string),
            "Outstanding": float(tds[5].string),
            "Average age": float(tds[6].string),
            "Chain age": float(tds[7].string),
            "% CoinDD": float(tds[8].string[:-1]) if tds[8].string != '' else None 
        })

    if hi == 421106:
        print("\n", blocks[0])

    return blocks
コード例 #6
0
def scrapePageOfBlocks(p):
    blocks = []
    url = f"https://grin.blockscan.com/blocks?p={p}"
    soup = getHtml(url)
   
    # find all the <tr/> html elements - these are table rows
    # (the first one just contains column headers so we discard it)
    rows = soup.findAll("tr")[1:]

    # extract the data from each row of the table
    for row in rows:
        tds = row.findAll("td")
        blocks.append({
            "Height": int(tds[0].a.string),
            "Date": tds[1].span["title"], 
            "Difficulty": float(tds[2].string),
            "Inputs": int(tds[3].a.string),
            "Outputs": int(tds[4].a.string),
            "Kernels": int(tds[5].a.string),
            "PoW": tds[6].string,
            "Reward": tds[7].string,
            "Mining time": tds[8].string
        })

    if p == 1:
        print("\n", blocks[0])

    return blocks
コード例 #7
0
def scrapePageOfBlocks(hi, p):
    blocks = []
    url = "https://abe.dash.org/chain/Dash?hi=%d&count=1000" % hi
    soup = getHtml(url)

    # find all the <tr/> html elements - these are table rows
    # (the first one just contains column headers so we discard it)
    rows = soup.findAll("tr")[1:]

    # extract the data from each row of the table
    for row in rows:
        tds = row.findAll("td")
        blocks.append({
            "Height":
            int(tds[0].a.string),
            "Timestamp":
            parseTimestamp(tds[1].string),
            "Transactions":
            int(tds[2].string),
            "Value":
            float(tds[3].string),
            "Difficulty":
            float(tds[4].string),
            "Outstanding":
            float(tds[5].string),
            "Chain age":
            float(tds[7].string),
            "% CoinDD":
            float(tds[8].string[:-1]) if tds[8].string != '' else None
        })

    if p == 1:
        print("\n", blocks[0])

    return blocks
コード例 #8
0
def scrapePageOfBlocks(p):
    blocks = []
    url = "https://litecoinblockexplorer.net/blocks?page=%d" % p    
    soup = getHtml(url)
    rows = soup.findAll("tr")[1:]

    # extract the data from each row of the table
    for row in rows:
        tds = row.findAll("td")
        blocks.append({
            "Height": tds[0].a.string,
            "Hash": tds[1].string,
            "Date": datetime.strptime(tds[2].string, "%a, %d %b %Y %H:%M:%S UTC"),
            "Transactions": float(tds[3].string),
            "Size": float(tds[4].string)
        })

    return blocks
コード例 #9
0
def scrapePageOfBlocks(height):
    blocks = []

    url = "https://moneroblocks.info/browser/" + str(height)
    soup = getHtml(url)
    rows = soup.findAll("div", {"class": "row show-grid top-row"})

    for row in rows:
        columns = row.findAll("div")
        blocks.append({
            "Height": int(columns[0].strong.string),
            "Hash": columns[4].string,
            "Timestamp": parseTimestamp(columns[3].string),
            "Transactions": int(columns[2].string),
            "Size": int(columns[1].string),
        })

    return blocks
コード例 #10
0
def scrapePageOfBlocks(pageNum):
    blocks = []
    url = "https://explorer.pivx.link/blocks?page=%d" % pageNum
    soup = getHtml(url)
   
    # find all the <tr/> html elements - these are table rows
    # (the first one just contains column headers so we discard it)
    rows = soup.findAll("tr")[1:]

    # extract the data from each row of the table
    for row in rows:
        tds = row.findAll("td")
        blocks.append({
            "Height": tds[0].a.string,
            "Hash": tds[1].string,
            "Date": parseDate(tds[2].string),
            "Transactions": float(tds[3].string),
            "Size": float(tds[4].string)
        })

    return blocks
コード例 #11
0
def scrapePageOfBlocks(hi):
    blocks = []
    url = f"https://mainnet.decred.org/blocks?height={hi}&rows=100"
    soup = getHtml(url)

    # find all the <tr/> html elements - these are table rows
    # (the first one just contains column headers so we discard it)
    rows = soup.findAll("tr")[1:]

    # extract the data from each row of the table
    for row in rows:
        tds = row.findAll("td")
        blocks.append({
            "Height":
            None if not tds[0].a.string else int(tds[0].a.string),
            "Date":
            tds[10].text,
            "Size":
            tds[7].string,
            "Transactions":
            int(tds[1].string),
            "Votes":
            int(tds[2].string),
            "Tickets":
            int(tds[3].string),
            "Revocations":
            int(tds[4].string),
            "DCR":
            tds[6].text,
            "Version":
            int(tds[8].text)
        })

    if hi == 447366:
        print("\n", blocks[0])

    return blocks
コード例 #12
0
def scrapePageOfBlocks(pageNum):
    blocks = []
    url = "https://digibyteblockexplorer.com/blocks?page=" + str(pageNum)
    soup = getHtml(url)
   
    # find all the <tr/> html elements - these are table rows
    # (the first one just contains column headers so we discard it)
    rows = soup.findAll("tr")[1:]

    # extract the data from each row of the table
    for row in rows:
        tds = row.findAll("td")
        blocks.append({
            "Height": int(tds[0].a.string),
            "Hash": tds[1].string,
            "Timestamp": parseTimestamp(tds[2].string),
            "Transactions": int(tds[3].string),
            "Size": int(tds[4].string),
        })

    if pageNum == 1:
        print("\n", blocks[0])

    return blocks