Exemplo n.º 1
0
 def run(self):
     if tinify.key != None:
         # If aws keys are available, use them to fetch the image and write back
         if aws_key_id and aws_secret and aws_bucket and aws_region:
             source_url_http = urlparse(StaticNode.handle_simple(
                 self.instance.file.name),
                                        scheme='http').geturl()
             source_url_https = urlparse(StaticNode.handle_simple(
                 self.instance.file.name),
                                         scheme='https').geturl()
             source = tinify.from_url(source_url_https)
             path = "%s/%s" % (aws_bucket, self.instance.file.name)
             source.store(service='s3',
                          aws_access_key_id=aws_key_id,
                          aws_secret_access_key=aws_secret,
                          region=aws_region,
                          path=path)
             if cf_zone_id and cf_api_key and cf_api_email:
                 cf = CloudFlare.CloudFlare()
                 cf.zones.purge_cache.delete(
                     cf_zone_id,
                     data={'files': [source_url_http, source_url_https]})
         # Else we grab the local image, optimize it and override the local file
         else:
             path = os.getcwd() + self.instance.url
             source = tinify.from_file(path)
             source.to_file(path)
     else:
         print "No tinify key"
Exemplo n.º 2
0
 def test_should_compress_from_url(self):
     source = tinify.from_url(
         'https://raw.githubusercontent.com/tinify/tinify-python/master/test/examples/voormedia.png'
     )
     with tempfile.NamedTemporaryFile() as tmp:
         source.to_file(tmp.name)
         self.assertTrue(0 < os.path.getsize(tmp.name) < 1500)
Exemplo n.º 3
0
 def test_should_return_source(self):
     httpretty.register_uri(httpretty.POST,
                            'https://api.tinify.com/shrink',
                            location='https://api.tinify.com/some/location')
     tinify.key = 'valid'
     self.assertIsInstance(tinify.from_url('http://example.com/test.jpg'),
                           tinify.Source)
Exemplo n.º 4
0
def url_image(url):
    output_path = str(output_dir + '\\' + 'tiny.png')
    try:
        source = tinify.from_url(url)
        source.to_file(output_path)
        print(url + "转换完成")
    except tinify.Error:
        print(url + "转换异常")
        pass
Exemplo n.º 5
0
def resize_image(image_url, index):
    source = tinify.from_url(image_url)
    resized = source.resize(method="cover", width=512, height=512)
    output_name = 'img/output_' + str(index)
    resized.to_file(output_name + '.jpg')

    img = Image.open(output_name + '.jpg')
    img.save(output_name + '.png')

    os.remove(output_name + '.jpg')
Exemplo n.º 6
0
def save_img(url, key, unique):
    print url
    img = requests.get(url, stream=True)
    if img.status_code == 200:
        source = tinify.from_url(url)
        source.to_file('vs/' + unicode(tid) + '/' + char[key] + '-' +
                       unicode(unique) + '.png')
        return char[key] + '-' + unicode(unique) + '.png'
    else:
        return ''
Exemplo n.º 7
0
 def compressFile(inputURL):
     print("compress_file-------------------------------------")
     filename = inputURL.split('/')[-1].strip()
     url = inputURL
     print(url)
     print("Getting:" + filename)
     complete_file = os.path.join(save_path, filename)
     source = tinify.from_url(url)
     print("---almost there---")
     source.to_file(complete_file)
     print("---Success?---")
     return
Exemplo n.º 8
0
def import_comics(request):
    if request.method == 'POST':
        spreadsheet = request.FILES['comics']
        reader = csv.reader(spreadsheet)
        reader.next()
        issues_uploaded = 0
        for row in reader:
            (description, stock_no, title, series_code, issue_number,
             publisher, ship_date, price, artist, writer, colorist,
             cover_image_link) = row
            series_title = title.split(' #')[0]
            issue_title = series_title + " #" + issue_number
            ship_date = datetime.datetime.strptime(ship_date, "%m/%d/%Y")
            if Series.objects.filter(series_code=series_code).count() > 0:
                print "SERIES ALREADY EXISTS, REGULAR UPDATE"
                series = Series.objects.get(series_code=series_code)
            else:
                print "CREATING A SERIES"
                series = Series.objects.create(publisher=publisher,
                                               title=series_title,
                                               series_code=series_code)
            if Issue.objects.filter(series_code=series_code,
                                    number=issue_number).count() > 0:
                print "ISSUE ALREADY EXISTS, DONT CREATE IT"
                continue
            else:
                new_issue = Issue.objects.create(title=issue_title,
                                                 description=description,
                                                 ship_date=ship_date,
                                                 series=series,
                                                 number=issue_number,
                                                 price=price,
                                                 artist=artist,
                                                 publisher=publisher,
                                                 series_code=series_code,
                                                 writer=writer)
            cover_image = urllib.urlretrieve(cover_image_link)
            new_issue.cover_image = File(open(cover_image[0], 'rb'))
            new_issue.cover_image.name = title + '.jpg'
            new_issue.save()
            source = tinify.from_url(cover_image_link)
            source.to_file(
                File(open(settings.BASE_DIR + new_issue.cover_image.url,
                          'wb')))
            new_issue.save()
            issues_uploaded += 1
            if issues_uploaded > 10:
                break
        return redirect(request.META.get('HTTP_REFERER', '/'))
    else:
        context = {'form': ImportComicForm()}
        return render(request, 'comics/import.html', context)
Exemplo n.º 9
0
    def test_should_compress_from_url(self):
        source = tinify.from_url('https://raw.githubusercontent.com/tinify/tinify-python/master/test/examples/voormedia.png')
        with tempfile.NamedTemporaryFile() as tmp:
            source.to_file(tmp.name)

            size = os.path.getsize(tmp.name)
            contents = tmp.read()

            self.assertTrue(1000 < size < 1500)

            # width == 137
            self.assertIn(b'\x00\x00\x00\x89', contents)
            self.assertNotIn(b'Copyright Voormedia', contents)
Exemplo n.º 10
0
    def test_should_compress_from_url(self):
        source = tinify.from_url('https://raw.githubusercontent.com/tinify/tinify-python/master/test/examples/voormedia.png')
        with tempfile.NamedTemporaryFile() as tmp:
            source.to_file(tmp.name)

            size = os.path.getsize(tmp.name)
            contents = tmp.read()

            self.assertTrue(1000 < size < 1500)

            # width == 137
            self.assertIn(b'\x00\x00\x00\x89', contents)
            self.assertNotIn(b'Copyright Voormedia', contents)
Exemplo n.º 11
0
def compress_save_image(image):
    print("Processing", image)
    start = time.time()
    img_name = str(image)
    temp_name = os.path.basename(img_name)
    temp_save_location = os.path.join("_temp", temp_name)
        # saving
    orig = tinify.from_url("https://" + str(AWS_BUCKET) + ".s3.amazonaws.com/" + image)
    if RESIZE_WIDTH != None:
        resized = orig.resize(
            mode="scale", 
            width=RESIZE_WIDTH
        )
        final = resized.to_file(temp_save_location)
    else:
        final = orig.to_file(temp_save_location)
    # TODO refactor and remove redundant try/catch blocks
    if ".jpg" in temp_name or ".jpeg" in temp_name:
        content_type = "image/jpeg"
    elif ".png" in temp_name:
        content_type = "image/png"

    with open(temp_save_location, 'rb') as source:
        img_data = source.read()

    print(image, "has been successfully compressed.")
    # upload to S3, overwrite old file
    save_img = s3.put_object(
        Body = img_data,
        Bucket = AWS_BUCKET,
        Key = image,
        ContentType = content_type
    )

    print(img_name, "has been saved to S3 bucket", AWS_BUCKET)
    s3.put_object_tagging(
        Bucket = AWS_BUCKET,
        Key = image,
        Tagging = {
            "TagSet" : [
                {
                    "Key": "s3-tinify-compressed",
                    "Value": "True"
                }
            ]
        }
    )
    print(image, "has been marked as compressed!")
    end = time.time() - start
    print("This took", round(end, 2), "seconds")
Exemplo n.º 12
0
async def on_message(message):
    if message.author == client.user:
        return
    elif message.channel.is_private:
        mp = await client.send_message(message.channel, 'hello')
        await client.edit_message(tmp, 'Hello.')
    elif message.content.startswith('!crop') and len(message.attachments)>0:
        source = tinify.from_url(message.attachments[0]['url'])
        resized = source.resize(
            method="cover",
            width=128,
            height=128
        )
        resized.to_file("img.png")
        await client.send_file(message.channel, "img.png")
    elif message.content.startswith('!crop') and len(message.embeds)>0:
        source = tinify.from_url(message.embeds[0]['url'])
        resized = source.resize(
            method="cover",
            width=128,
            height=128
        )
        resized.to_file("img.png")
        await client.send_file(message.channel, "img.png")
Exemplo n.º 13
0
def down(num, wxurl):
    dir = f'海賊王/{num}'
    os.makedirs(dir, exist_ok=True)
    res = requests.get(wxurl)
    imgs = BeautifulSoup(res.text,
                         'lxml').find('div', id="js_content").find_all('img')
    for px, img in enumerate(imgs, 1):
        url = img.get("data-src")
        # print(url)
        path = os.path.join(dir, f'{px}.jpg')
        # with open(path, 'wb') as f:
        #     f.write(requests.get(url).content)
        source = tinify.from_url(url)  # 下载并压缩图片
        source.to_file(path)
        print(f'{path} 下载成功')
    print(f"{dir} 下载完成!")
    def compress_and_resize(self) -> bool:
        try:
            compressed_images = tinify.from_url(self.parser.get_object_url())

            resized_image = compressed_images.resize(method='scale',
                                                     width=1200)

            S3Service.upload_file(resized_image.to_buffer(),
                                  self.parser.get_object_key())

            return True
        except tinify.AccountError as e:
            logger.error('Verify your API key and account limit.')
            logger.exception(f'The error message is: {e.message}')

            return False
        except tinify.ClientError as e:
            logger.error('Check your source image and request options.')
            logger.exception(f'The error message is: {e.message}')

            return False
        except tinify.ServerError as e:
            logger.error('Temporary issue with the Tinify API.')
            logger.exception(f'The error message is: {e.message}')

            return False
        except tinify.ConnectionError as e:
            logger.error('A network connection error occurred.')
            logger.exception(f'The error message is: {e.message}')

            return False
        except Exception as e:
            logger.info(
                'Something else went wrong, unrelated to the Tinify API.')
            logger.exception(f'The error message is: {e}')

            return False
Exemplo n.º 15
0
 def load_url(self, url):
     self._tiny_ref = tinify.from_url(url)
def image_spider():

    # This creates folders(or, in python diction, "directories") within the same folder as the script
    try:
        os.makedirs('CORE FILES')
    except Exception as error_code:
        print(error_code)
        logging.error(error_code)
    try:
        os.makedirs('IMAGES')
    except Exception as error_code:
        print(error_code)
        logging.error(error_code)
    try:
        os.makedirs('IMAGES/ORIGINAL')
    except Exception as error_code:
        print(error_code)
        logging.error(error_code)
    try:
        os.makedirs('IMAGES/COMPRESS')
    except Exception as error_code:
        print(error_code)
        logging.error(error_code)
    try:
        os.makedirs('DATABASE')
    except Exception as error_code:
        print(error_code)
        logging.error(error_code)

    try:
        # This downloads the sitemap file from the given url
        wget.download(sitemap_address, 'CORE FILES/sitemap.xml')
    except Exception as error_code:
        input(
            "The link address to the sitemap is invalid. Press any key to exit now. . ."
        )
        logging.info('Closed Program')
        print(error_code)
        logging.critical(error_code)
        sys.exit()

    # This indicates the file to open, in this case, "sitemap.xml'
    sitemap = 'CORE FILES/sitemap.xml'
    source_code = open(sitemap, 'r')
    # This tells the computer to convert the xml file into plain text
    plain_text = source_code.read()
    # This uses the 'BeautifulSoup' module  to parse the xml file
    soup = BeautifulSoup(plain_text, 'xml')
    # This calculates the number of a specific tag in the document. in this case, the 'loc' tag,
    # for which the number is stored as an integer variable
    url_count = len(soup.find_all('loc'))
    # This declares a list element to separate and store parsed links
    link_list = []

    # MICROSOFT EXCEL DATABASE
    # This creates an excel book and add a sheet named "default", then writes text in the first row and column
    # Using the 'xlsxwriter' module, the method for writing in the excel grids is (row, column, text)
    outbook = xlsxwriter.Workbook('analytics.xlsx')
    # This adds format to the excel file elements
    bold = outbook.add_format({'bold': True})
    italic = outbook.add_format({'italic': True})
    images_sheet = outbook.add_worksheet('images')
    statistics_sheet = outbook.add_worksheet('statistics')
    benchmark_sheet = outbook.add_worksheet('benchmark')

    # The 'images' sheet stores image size and corresponding url
    images_sheet.write(0, 0, 'Original Size', bold)
    images_sheet.set_column(0, 0, 17)
    images_sheet.write(0, 1, 'Compress Size', bold)
    images_sheet.set_column(0, 1, 17)
    images_sheet.write(0, 2, 'File Name', bold)
    images_sheet.set_column(0, 2, 50)
    images_sheet.write(0, 3, 'URL', bold)
    images_sheet.set_column(0, 3, 140)

    # The 'statistics' sheet tracks the results of the program
    statistics_sheet.write(0, 0, 'Number of Links Retrieved', bold)
    statistics_sheet.set_column(0, 0, 50)
    statistics_sheet.write(0, 1, 'Total Images Downloaded', bold)
    statistics_sheet.set_column(0, 1, 50)
    statistics_sheet.write(0, 2, 'Original Collective File Size', bold)
    statistics_sheet.set_column(0, 2, 50)
    statistics_sheet.write(0, 3, 'Compressed Collective File Size', bold)
    statistics_sheet.set_column(0, 3, 50)
    statistics_sheet.write(0, 4, 'Optimization Freed Space', bold)
    statistics_sheet.set_column(0, 4, 50)

    # The 'benchmark' sheet stores data tracing the performance
    benchmark_sheet.write(0, 0, 'Program Execution Time', bold)
    benchmark_sheet.set_column(0, 0, 50)
    benchmark_sheet.write(0, 1, 'Proxy Denied Requests', bold)
    benchmark_sheet.set_column(0, 1, 170)
    benchmark_sheet.write(0, 2, 'Broken URL (404 Not Found)', bold)
    benchmark_sheet.set_column(0, 2, 170)
    benchmark_sheet.write(0, 3, 'Unsupported Media Type', bold)
    benchmark_sheet.set_column(0, 3, 170)
    benchmark_sheet.write(0, 4, 'Other Skipped Links', bold)
    benchmark_sheet.set_column(0, 4, 170)

    # These are local varables
    images_row = 0
    proxy_row = 0
    brokenurl_row = 0
    media_row = 0
    otherlinks_row = 0
    link_counter = 0
    image_counter = 0
    collectivesize_before = 0
    collectivesize_after = 0

    # FOR LOOP
    # This creates a for loop to search through the xml file
    for i in range(
            0, 10
    ):  # The values in the range expression represent the number of tags

        try:

            # This uses the 'BeautifulSoup' module to find all the 'loc' tags in the xml file
            # Collecting plain text between the opening and closing tags, in this case, '<loc>' and '</loc>"
            url = soup.findAll("loc")[i].text
            # This adds each of the collected links into the list
            link_list.append(url)

            # For recoding purposes, this prints the url in the list and write it to the created excel file
            print(' ' + '\n' + 'Page URL: ' + link_list[i])
            images_sheet.merge_range(images_row + 1, 0, images_row + 1, 3, '')
            images_sheet.write(images_row + 1, 0, link_list[i], italic)
            images_row += 1
            link_counter += 1
            logging.info('Page URL: ' + link_list[i])

            # This conditional statement separates the two type of links: image and website
            # Direct image urls include file types (determined by the above string variables), while website urls do not

            # CONDITIONAL STATEMENT
            media_filetype = [
                '.jpg', '.jpeg', '.png', 'gif', 'tif', 'tiff', 'bmp', 'svg',
                'webp', 'heic', 'heif'
            ]
            website_filetype = ['.html', '.phtml', '.htm']

            # PART(1/2): If the link pertains to an image file, download the image directly and record it
            if any(media_item in link_list[i]
                   for media_item in media_filetype):
                # This uses the 'urllib' module to open each link
                d = urllib.request.urlopen(link_list[i])

                # This obtains information and specifies the type of properties wanted, in this case, 'Content-Length'
                original_str = d.info(
                )['Content-Length']  # 'Content-Length' is another word for file original_size

                # This converts the default unit from b (bytes) to kb (kilobytes)
                original_size = int(original_str) / 1000

                # This collects images of a file original_size above a threshold
                # <--INPUT VALUE ADJUSTS THE THRESHOLD--> (recommended minimum file original_size: 80kb )
                # Images greater than 80 kb are considered large and will require longer time to load,
                # thus slowing down website traffic and resulting in a lower Search Engine Ranking
                if int(min_threshold) < original_size:
                    if original_size < 1000:
                        # This converts the default unit from b (bytes) to kb (kilobytes)
                        original_size = int(original_str) / 1000
                        collectivesize_before += original_size
                        original_unit = ' KB '

                    # This outputs the unit of images greater than 1000 kb as 1 mb
                    # Though images are unlikely to exceed 1 gb, the next level can be created with the same logic
                    elif original_size > 1000:
                        # For recoding purposes, this prints the url in the list and write it to the created excel file
                        original_size = int(original_str) / 1000000
                        collectivesize_before += (original_size * 1000)
                        original_unit = ' MB '

                    # This downloads the images
                    wget.download(link_list[i], 'IMAGES/ORIGINAL')
                    logging.info('Image Downloaded: ' + link_list[i])

                    # This uses the 'tinify' module to compress images
                    saved = tinify.from_url(link_list[i])
                    name = os.path.basename(link_list[i])
                    location = 'IMAGES/COMPRESS/' + name
                    saved.to_file(str(location))
                    compressed_str = os.path.getsize(str(location))
                    compressed_size = int(compressed_str) / 1000
                    logging.info('Image Compressed: ' + location)

                    if compressed_size < 1000:
                        # This converts the default unit from b (bytes) to kb (kilobytes)
                        compressed_size = int(compressed_str) / 1000
                        collectivesize_after += compressed_size
                        compressed_unit = ' KB '

                    # This outputs the unit of images greater than 1000 kb as 1 mb
                    # Though images are unlikely to exceed 1 gb, the next level can be created with the same logic
                    elif compressed_size > 1000:
                        compressed_size = int(compressed_str) / 1000000
                        collectivesize_after += (compressed_size * 1000)
                        compressed_unit = ' MB '

                    # For recoding purposes, this prints the url in the list and write it to the created excel file
                    print(' ' + '\n' + 'Original Size: ' + str(original_size) +
                          original_unit + link_list[i])
                    print('Compressed Size: ' + str(compressed_size) +
                          compressed_unit + link_list[i])
                    logging.info('Original Size: ' + str(original_size) +
                                 original_unit + link_list[i])
                    logging.info('Compressed Size: ' + str(compressed_size) +
                                 compressed_unit + link_list[i])
                    images_sheet.write(images_row + 1, 0,
                                       str(original_size) + original_unit)
                    images_sheet.write(images_row + 1, 1,
                                       str(compressed_size) + compressed_unit)
                    images_sheet.write(images_row + 1, 2, name)
                    images_sheet.write(images_row + 1, 2, link_list[i])
                    images_row += 1
                    image_counter += 1

            # PART(2/2): If the link pertains to a website, parse its HTML source code,
            # search for the images, find them, then download and record them
            elif any(website_item in link_list[i]
                     for website_item in website_filetype):

                # This uses the 'requests' module to access the link
                html_code = requests.get(link_list[i])
                # This stores the plain text of the HTML file
                raw_text = html_code.text
                # This uses the 'BeautifulSoup' module to parse the HTML document
                webs = BeautifulSoup(raw_text, 'html.parser')
                # This calculates the number of a specific tag in the document. in this case, the 'img' tag,
                # for which the number is stored as an integer variable
                img_count = len(webs.div.find_all('img'))

                # This creates a for loop to search through the HTML file
                for k in range(
                        0, img_count
                ):  # The values in the range expression represent the number of tags

                    try:

                        # This uses the 'BeautifulSoup' module to find all the 'img' tags and retrieve its 'src' path
                        directory = webs.findAll('img')[k].get('src')
                        # These string variables will be used the following conditional statement
                        tag = 'https://www.viewsonic.com'
                        # If the link contains specific error-incurring text stored in the above string variables,
                        # or if the link does not have a complete url address, take certain actions
                        # (1/2) If the link is healthy, store it in the 'src' string variable
                        if tag in directory:
                            src = directory
                        # (2/2) If the link is missing the website domain (being abbreviated with only the file path),
                        # then add the text in the 'tag' variable to the 'src' string variable
                        else:
                            src = tag + directory

                        # This uses the 'urllib' module to open each link
                        d = urllib.request.urlopen(src)
                        # This obtains information and specifies the type of properties wanted, in this case, 'Content-Length'
                        original_str = d.info(
                        )['Content-Length']  # 'Content-Length' is another word for file original_size
                        # This converts the default unit from b (bytes) to kb (kilobytes)
                        original_size = int(original_str) / 1000

                        # This collects images of a file original_size above a threshold, in this case, 100 kb
                        # <--CHANGE THE VALUES TO ADJUST THE THRESHOLD--> (recommended minimum file original_size: 80kb )
                        # Images greater than 80 kb are considered large and will require longer time to load,
                        # thus slowing down website traffic and resulting in a lower Search Engine Ranking
                        if int(min_threshold) < original_size:
                            if original_size < 1000:
                                # This converts the default unit from b (bytes) to kb (kilobytes)
                                original_size = int(original_str) / 1000
                                collectivesize_before += original_size
                                original_unit = ' KB '
                            # This outputs the unit of images greater than 1000 kb as 1 mb
                            # Though images are unlikely to exceed 1 gb, the next level can be created with the same logic
                            elif original_size > 1000:
                                # For recoding purposes, this prints the url in the list and write it t+o the created excel file
                                original_size = int(original_str) / 1000000
                                collectivesize_before += (original_size * 1000)
                                original_unit = ' MB '

                            # This downloads the images
                            wget.download(src, 'IMAGES/ORIGINAL')
                            logging.info('Image Downloaded: ' + src)

                            # This uses the 'tinify' module to compress the images
                            saved = tinify.from_url(src)
                            name = os.path.basename(src)
                            location = 'IMAGES/COMPRESS/' + name
                            saved.to_file(str(location))
                            compressed_str = os.path.getsize(str(location))
                            compressed_size = int(compressed_str) / 1000
                            logging.info('Image Compressed: ' + location)

                            if compressed_size < 1000:
                                # This converts the default unit from b (bytes) to kb (kilobytes)
                                compressed_size = int(compressed_str) / 1000
                                collectivesize_after += compressed_size
                                compressed_unit = ' KB '

                            # This outputs the unit of images greater than 1000 kb as 1 mb
                            # Though images are unlikely to exceed 1 gb, the next level can be created with the same logic
                            elif compressed_size > 1000:
                                compressed_size = int(compressed_str) / 1000000
                                collectivesize_after += (compressed_size *
                                                         1000)
                                compressed_unit = ' MB '

                            # For recoding purposes, this prints the url in the list and write it to the created excel file
                            print(' ' + '\n' + 'Original Size: ' +
                                  str(original_size) + original_unit + src)
                            print('Compress Size: ' + str(compressed_size) +
                                  compressed_unit + src)
                            logging.info('Original Size: ' +
                                         str(original_size) + original_unit +
                                         src)
                            logging.info('Compress Size: ' +
                                         str(compressed_size) +
                                         compressed_unit + src)
                            images_sheet.write(
                                images_row + 1, 0,
                                str(original_size) + original_unit)
                            images_sheet.write(
                                images_row + 1, 1,
                                str(compressed_size) + compressed_unit)
                            images_sheet.write(images_row + 1, 2, name)
                            images_sheet.write(images_row + 1, 2, src)
                            images_row += 1
                            image_counter += 1

                    except Exception as error_code:
                        if '403' in str(error_code):
                            benchmark_sheet.write(proxy_row + 1, 1, src)
                            print('Proxy Denied Requests: ' + src)
                            logging.error('Proxy Denied Requests: ' + src)
                            proxy_row += 1
                        elif '404' in str(error_code):
                            benchmark_sheet.write(brokenurl_row + 1, 2, src)
                            print('Broken URL (404 Not Found): ' + src)
                            logging.error('Broken URL (404 Not Found): ' + src)
                            brokenurl_row += 1
                        elif '415' in str(error_code):
                            benchmark_sheet.write(media_row + 1, 3, src)
                            print('Unsupported Media Type: ' + src)
                            logging.error('Unsupported Media Type: ' + src)
                            media_row += 1
                        elif '429' in str(error_code):
                            print(error_code)
                            input(
                                "The program could not continue due to a fatal error. Press any key to exit now. . ."
                            )
                            logging.info('Closed Program')
                            logging.critical(error_code)
                            sys.exit()
                        elif '401' in str(error_code):
                            input(
                                "You're tinify API key is invalid. Press any key to exit now. . ."
                            )
                            logging.info('Closed Program')
                            print(error_code)
                            logging.critical(error_code)
                            sys.exit()
                        else:
                            benchmark_sheet.write(otherlinks_row + 1, 4, src)
                            print('Other Skipped Links: ' + src)
                            logging.error('Other Skipped Links: ' + src)
                            otherlinks_row += 1
                        print(str(error_code))
                        pass

        except Exception as error_code:
            if '403' in str(error_code):
                benchmark_sheet.write(proxy_row + 1, 1, link_list[i])
                print('Proxy Denied Requests: ' + link_list[i])
                logging.error('Proxy Denied Requests: ' + link_list[i])
                proxy_row += 1
            elif '404' in str(error_code):
                benchmark_sheet.write(brokenurl_row + 1, 2, link_list[i])
                print('Broken URL (404 Not Found): ' + link_list[i])
                logging.error('Broken URL (404 Not Found): ' + link_list[i])
                brokenurl_row += 1
            elif '415' in str(error_code):
                benchmark_sheet.write(media_row + 1, 3, link_list[i])
                print('Unsupported Media Type: ' + link_list[i])
                logging.error('Unsupported Media Type: ' + link_list[i])
                media_row += 1
            elif '429' in str(error_code):
                input(
                    "The program could not continue due to a fatal error. Press any key to exit now. . ."
                )
                logging.info('Closed Program')
                print(error_code)
                logging.critical(error_code)
                sys.exit()
            elif '401' in str(error_code):
                input(
                    "You're tinify API key is invalid. Press any key to exit now. . ."
                )
                logging.info('Closed Program')
                print(error_code)
                logging.critical(error_code)
                sys.exit()
            else:
                benchmark_sheet.write(otherlinks_row + 1, 4, link_list[i])
                print('Other Skipped Links: ' + link_list[i])
                logging.error('Other Skipped Links: ' + link_list[i])
                otherlinks_row += 1
            print(str(error_code))
            pass

    # This writes all the data to the excel sheet
    statistics_sheet.write(1, 0, str(link_counter))
    print(' ' + '\n' + 'Number of Links Retrieved: ' + str(link_counter))
    statistics_sheet.write(1, 1, str(image_counter))
    print('Total Images Downloaded: ' + str(image_counter))
    if int(collectivesize_before) < 1000000:
        statistics_sheet.write(1, 2, str(collectivesize_before) + ' KB ')
        print('Original Collective File Size: ' + str(collectivesize_before) +
              ' KB')
    elif 1000000 < int(collectivesize_before) < 1000000000:
        statistics_sheet.write(1, 2,
                               str(collectivesize_before / 1000) + ' MB ')
        print('Original Collective File Size: ' +
              str(collectivesize_before / 1000) + ' MB')
    elif int(collectivesize_before) > 1000000000:
        statistics_sheet.write(1, 2,
                               str(collectivesize_before / 1000000) + ' GB ')
        print('Original Collective File Size: ' +
              str(collectivesize_before / 1000000) + ' GB')
    if int(collectivesize_after) < 1000000:
        statistics_sheet.write(1, 3, str(collectivesize_after) + ' KB ')
        print('Compressed Collective File Size: ' + str(collectivesize_after) +
              ' KB')
    elif 1000000 < int(collectivesize_after) < 1000000000:
        statistics_sheet.write(1, 3, str(collectivesize_after / 1000) + ' MB ')
        print('Compressed Collective File Size: ' +
              str(collectivesize_after / 1000) + ' MB')
    elif int(collectivesize_after) > 1000000000:
        statistics_sheet.write(1, 3,
                               str(collectivesize_after / 1000000) + ' GB ')
        print('Compressed Collective File Size: ' +
              str(collectivesize_after / 1000000) + ' GB')
    freed_space = int(collectivesize_before) - int(collectivesize_after)
    if int(freed_space) < 1000000:
        statistics_sheet.write(1, 4, str(freed_space))
        print('Optimization Freed Space: ' + str(int(freed_space)) + ' KB')
    elif 1000000 < int(freed_space) < 1000000000:
        statistics_sheet.write(1, 4, str(freed_space / 1000) + ' MB ')
        print('Optimization Freed Space: ' + str(freed_space / 1000) + ' MB')
    elif int(freed_space) > 1000000000:
        statistics_sheet.write(1, 4, str(freed_space / 1000000) + ' GB ')
        print('Optimization Freed Space: ' + str(freed_space / 1000000) +
              ' GB')

    # This ends the timer
    end = time.time()
    # This outputs the total run time of the program
    total_time = (end - start) / 60
    benchmark_sheet.write(1, 0, str(total_time) + ' min.')
    print('Program Execution Time: ' + str(total_time) + ' min.')
Exemplo n.º 17
0
 def test_should_return_source(self):
     httpretty.register_uri(httpretty.POST, 'https://api.tinify.com/shrink',
         location='https://api.tinify.com/some/location')
     tinify.key = 'valid'
     self.assertIsInstance(tinify.from_url('http://example.com/test.jpg'), tinify.Source)
Exemplo n.º 18
0
                         'weekRating': '(-)',
                         'class': get_class(item)
                     }
                     deck_info[temp[0]]['deck'].append(deck)
     elif re.match(str(r'^T.卡组'), title):
         level = title[:2]
         if '[/color]' in string and string.startswith('=='):
             if deck_name:
                 deck_info = update_deck(deck, deck_info)
             deck_name = string_
             deck = get_deck(deck_name, deck_info)
         elif string_.endswith('.png') and string_.startswith('./'):
             imgUrl = r'http://img.ngacn.cc/attachments' + string_[
                 string_.index('/'):]
             deck['imgUrl'] = imgUrl
             source = tinify.from_url(imgUrl)
             source.to_file('ts/' + unicode(tid) + '/' + level + '-' +
                            pinyin.get_pinyin(deck_name, '') + '.png')
             deck['imgSrc'] = href + level + '-' + pinyin.get_pinyin(
                 deck_name, '') + '.png'
         elif string.startswith('套牌代码:') or string.startswith('代码:'):
             deck['code'] = string.split(':')[1]
         elif string.startswith('对阵快攻:'):
             deck['toFast'] = string.split(':')[1]
         elif string.startswith('对阵控制:'):
             deck['toControl'] = string.split(':')[1]
         elif string.startswith('劣势对抗:'):
             deck['cons'] = string.split(':')[1].split('、')
         elif '[' not in string and ']' not in string:
             deck['cardIntro'] = deck.get('cardIntro', '') + string
 print json.dumps(deck_info, ensure_ascii=False, indent=4)
Exemplo n.º 19
0
def compress_web_image(src_url, dest_file):
    try:
        source = tinify.from_url(src_url)
        source.to_file(dest_file)
    except Exception as e:
        logging.exception(e.message)
Exemplo n.º 20
0
def get_thumbnail(url: str, width: int, height: int) -> BytesIO:
    log.debug("tinifying url %s", url)
    source = tinify.from_url(url)  # pylint: disable=no-member
    resized = source.resize(method="cover", width=width, height=height)
    return BytesIO(resized.to_buffer())
Exemplo n.º 21
0
try:
    # loop through the image URLs from the JSON
    for item in data['lighthouseResult']['audits']['uses-optimized-images']['details']['items']:
        pic_url = item['url']

        # NGage is causing an issue, this is a workaround
        if "GeneralImage.aspx" in pic_url:
            print("NGage result exempted")
        else:
            pic_name = pic_url.split("/")[-1]
            completeName = os.path.join(save_path, pic_name)

            # tinypng pulls from image URLs, compresses, then saves
            print('Compressing ' + pic_name + '...')
            tinify.from_url(pic_url).to_file(completeName)
            print('Saved to ' + completeName)

    # create text file list for image URLs
    image_list = open(save_path + '/' + 'image_list.txt', 'a')
    print('Image URLs saved to ' + save_path + '/' + 'image_list.txt')

    # loop through the image URL to print references
    for item in data['lighthouseResult']['audits']['uses-optimized-images']['details']['items']:
        image_list.write(item['url'] + "\n")
        
except KeyError:
    print(url + ' is not a valid domain name')

except:
    print('Something went wrong')
Exemplo n.º 22
0
import tinify
import xlrd
import os
from urllib.parse import urlsplit
import requests
tinify.key = '5p5UxEJn7z5jYohyzUzpqeCJ3Ymbadxj'

pre = os.path.dirname(os.path.realpath(__file__))
fname = 'links.xlsx'
path = os.path.join(pre, fname)
book = xlrd.open_workbook(path)
print("Número de abas: ", book.nsheets)
print("Nomes das Planilhas:", book.sheet_names())
sh = book.sheet_by_index(0)
print(f'Nome: {sh.name}, Linhas: {sh.nrows}, Colunas: {sh.ncols}')
for rx in range(sh.nrows):

    linha = str(sh.row(rx))
    lixo1, url, lixo2 = linha.split("'")
    parts = urlsplit(url)
    paths = parts.path.split('/')
    page = requests.get(url)
    print(url)
    statusCode = page.status_code
    print('Status Code:',statusCode)
    if statusCode == 404:
        os.system("pause")
    source = tinify.from_url(url)
    source.to_file(paths[-1])
Exemplo n.º 23
0
import tinify
tinify.key = "你的KEY"

image_path = '本地文件路径'
output_path = '压缩后的图片路径'
image_url = '在线图片链接'

source = tinify.from_file(image_path)
source.to_file(output_path)
print("转换完成")

source = tinify.from_url(image_url)
source.to_file(output_path)
print(image_url + "转换完成")
Exemplo n.º 24
0
from bs4 import BeautifulSoup  # pip install bs4
import datetime
import tinify

tinify.key = "JJN9ZVJysJm6k6L158d6NSC2pLxq1hPB"
d_today = str(datetime.date.today())

try:
    with open("data.html") as f:
        html = f.read()
        soup = BeautifulSoup(html, "html.parser")  # HTMLを解析する
        images = soup.findAll('img')  # 全ての画像を取得する
        counter = 0
        d = {}

        for image in images:
            if counter != 0 and counter <= 12:
                imgFileName = image['src'].replace("-preview",
                                                   "")  # replaceで大きい方の画像に変換する
                if str(imgFileName) in 'limited-2':
                    imgFileName.replace('limited-2', 'limited')
                dst_path = './static/img/' + d_today + '-' + str(
                    imgFileName).split('/')[-1]  # 出力先のpathとファイル名を設定する
                source = tinify.from_url(imgFileName)  # 画像をtinypngにアップロード
                source.to_file(dst_path)  # 圧縮された画像を出力
            counter += 1

        print("画像の取得が完了しました。")

except:
    print('画像の取得に失敗しました。')
Exemplo n.º 25
0
def get_image():
    tinify.key = os.environ["TINIFY_KEY"]
    source = tinify.from_url(os.environ["IMAGE_URL"])
    source.to_file("optimized.png")