예제 #1
0
def main():
    parser = make_parser()
    args = parser.parse_args()

    manga = args.manga_name
    site  = args.site

    base_path = site+manga+'/'
    folder_name = manga.replace('_',' ').title()

    if not os.path.exists(folder_name):
        os.makedirs(folder_name) # Create the manga folder

    for n_volume in range(args.fr, args.to):
        # Get the volume string
        str_volume = pad_zeros(str(n_volume),3) # Ex: "001"
        filename_volume = "{}/Volume {}.cbz".format(folder_name, str_volume)

        zip_volume = zipfile.ZipFile(filename_volume, "w")

        url = base_path+str_volume

        for page in range(1,9999):
            # HTML request + souping
            try:
                html = urllib.request.urlopen(url).read() # We get the HTML of the page
                soup = BeautifulSoup(html, 'html.parser') # We cook it into a soup
                image_path = soup.find("img", class_="picture")['src'] # Image URL
                abs_image_path = site+image_path
            except TypeError:
                if page == 1:
                    print("Volume {} does not exist. Exiting.".format(n_volume, url))
                else:
                    print("An error occured while downloading page {} of volume {}. Exiting …".format(page, n_volume))
                exit()
            # Naming file
            str_page = pad_zeros(str(page),3)
            filename = '{}.jpg'.format(page)

            # Downloading page
            zip_volume.writestr(filename, urllib.request.urlopen(abs_image_path.replace(" ","%20")).read())
            print('Page {} of volume {} has been downloaded'.format(page, n_volume))

            # Search for next page
            next_a = soup.find(lambda c: 'Next Page' in str(c), href=True)

            if next_a:
                url = site + next_a['href']
            else:
                break
        zip_volume.close()
예제 #2
0
#-----------------------------------
# Let's the download begin!
#-----------------------------------
base_path = site + manga + '/'
folder_name = manga.replace('_', ' ').title()

if not os.path.exists(folder_name):
    os.makedirs(folder_name)  # Create the manga folder

is_last_volume = False  # When last is reached scrips ends

for n_volume in range(download_from, download_to):

    # Get the volume string
    str_volume = pad_zeros(str(n_volume), 3)  # Ex: "001"
    folder_volume = folder_name + '/Volume ' + str_volume  # Ex: "Volume 001"

    if not os.path.exists(folder_volume):
        os.makedirs(folder_volume)  # Create the volume folder

    # Generate the url
    url = base_path + str_volume

    for page in range(1, 9999):
        # HTML request + souping
        html = urllib.request.urlopen(
            url).read()  # We get the HTML of the page
        soup = BeautifulSoup(html, 'html.parser')  # We cook it into a soup
        image_path = soup.find("img", class_="picture")['src']  # Image URL
        abs_image_path = site + image_path
예제 #3
0
#-----------------------------------
# Let's the download begin!
#-----------------------------------
base_path = site+manga+'/'
folder_name = manga.replace('_',' ').title() 

if not os.path.exists(folder_name):
    os.makedirs(folder_name) # Create the manga folder

is_last_volume = False # When last is reached scrips ends

for n_volume in range(download_from, download_to):
    
    # Get the volume string
    str_volume = pad_zeros(str(n_volume),3) # Ex: "001"
    folder_volume = folder_name+'/Volume '+str_volume # Ex: "Volume 001"
    
    if not os.path.exists(folder_volume):
        os.makedirs(folder_volume) # Create the volume folder
    
    # Generate the url
    url = base_path+str_volume
    
    for page in range(1,9999):
        # HTML request + souping
        html = urllib.request.urlopen(url).read() # We get the HTML of the page
        soup = BeautifulSoup(html, 'html.parser') # We cook it into a soup
        image_path = soup.find("img", class_="picture")['src'] # Image URL
        abs_image_path = site+image_path