Пример #1
0
def get_table(url) -> Dict:
    """Get the beuatiful soup"""
    fields = ['Loterry', 'Date', 'Result']
    # writing to csv file
    with open('data.csv', 'w') as file:
        # writing headers (field names)
        file.write('Loterry, Date, Result \n')
        page = gt(URL)
        soup = BeautifulSoup(page.text, 'html.parser')

        table = soup.find(lambda tag: tag.name == 'table' and tag.has_attr(
            'id') and tag['id'] == 'resultados_chances')
        rows = table.findAll(lambda tag: tag.name == 'tr')
        result = {}
        for row in rows[1:]:
            data = [data.text for data in row.find_all('td')]
            if not data[0] in result.keys():
                result[data[0]] = []
            date = data[2].split(' ')[::2]
            result[data[0]].append({
                'date':
                dt.strptime(' '.join(date), '%d %B %Y').strftime('%m-%d-%Y'),
                'value':
                data[-1]
            })
            file.write(
                f"{data[0]:<20}, {dt.strptime(' '.join(date), '%d %B %Y').strftime('%m-%d-%Y')}, {data[-1]:<20}\n"
            )
        df = pd.DataFrame(rows[1:], columns=rows[0].insert(1, "Sorteo"))
    return result
Пример #2
0
def getdata(data):
    team1,team2,team1_score,team2_score=data
    page = requests.gt(url)
    soup = BeautifulSoup(page.text, 'html.parser')
    team_1 = soup.find_all(class_ = "cb-ovr-flo cb-hmscg-tm-nm")[0].get_text()
    team_2 = soup.find_all(class_ = "cb-ovr-flo cb-hmscg-tm-nm")[1].get_text()
    
    team_1_score = soup.find_all(class_ = "cb-ovr-flo")[8].get_text()
    team_2_score = soup.find_all(class_ = "cb-ovr-flo")[10].get_text()
    team1.config(text = team_1)
    team2.config(text = team_2)
    team1_score.config(text=team_1_score)
    team2_score.config(text=team_2_score)
    
    a = Label(root,text='IPL Viewer', font=("",40))
    a.grid(row=0,columnspan=2)
    team1 = Label(root,text="Team 1",font=("",20))
    team1.grid(row=1,column=0)
    team2 = Label(root,text="Team 2",font=("",20))
    team2.grid(row=1,column=1)
    
    team1_score = Label(root,text="hit refresh",font = ("", 20))
    team1_score.grid(row=2,column=0)
    team2_score=Label(root,text="hit refresh", font=("", 20))
    team2_score.grid(row=2,column=1)
    data = [team1,team2,team1_score,team2_score]
    refresh = Button(root,text = "Refresh",command = lambda:getdata(data),
                    height=2,width=10,font=("",20))
    refresh.grid(row=3,columnspan=2)
    root.mainloop()
Пример #3
0
def get_folders(folder_key, folder_name="mediafire download"):

    # if the folder not exist, create and enter it
    if not path.exists(folder_name):
        makedirs(folder_name)
    chdir(folder_name)

    # downloading all the files in the main folder
    download_folder(folder_key, folder_name)

    # searching for other folders
    folder_content = gt(files_or_folders(
        "folders", folder_key)).json()["response"]["folder_content"]

    # downloading other folders
    if "folders" in folder_content:
        for folder in folder_content["folders"]:
            get_folders(folder["folderkey"], folder["name"])
            chdir("..")
Пример #4
0
def download(file):
    """
    used to download direct file links
    """
    try:
        html = get(file["links"]["normal_download"])
    except HTTPError:
        print("Deleted file or Dangerous File Blocked")
        return

    soup = Soup(html)
    link = (soup.find("div", {
        "class": "download_link"
    }).find("a", {
        "class": "input popsok"
    }).attrs["href"])

    filename = file["filename"]

    print(f"Downloading \"{filename}\".")
    with open(filename, "wb") as f:
        f.write(gt(link).content)
    print(f"\"{filename}\" downloaded.")
Пример #5
0
def download_folder(folder_key, folder_name):

    # getting all the files
    data = []
    chunk = 1
    more_chunks = True

    try:
        # if there are more than 100 files makes another request
        # and append the result to data
        while more_chunks:
            r_json = gt(files_or_folders("files", folder_key,
                                         chunk=chunk)).json()
            more_chunks = r_json["response"]["folder_content"][
                "more_chunks"] == "yes"
            data += r_json["response"]["folder_content"]["files"]
            chunk += 1

    except KeyError:
        print("Invalid link.")
        return

    threads = []

    # appending a new thread for downloading every link
    for file in data:
        threads.append(Thread(target=download, args=(file, )))

    # starting all threads
    for thread in threads:
        thread.start()

    # waiting for all threads to finish
    for thread in threads:
        thread.join()

    print(f"\"{folder_name}\" download completed.")