예제 #1
0
    def do_GET(self):
        try:
            action, fields = self.__validate(self.path)
            if action == '/ping':
                self._set_headers()
                self.wfile.write('OK')
                return
            else:
                key = (fields['video_type'], fields['trakt_id'],
                       fields.get('season'), fields.get('episode'))
                if action == '/clear':
                    with self.lock:
                        if key in self.proxy_cache:
                            del self.proxy_cache[key]
                        self._set_headers()
                        self.wfile.write('OK')
                        return
                else:
                    with self.lock:
                        if key in self.proxy_cache:
                            images = self.proxy_cache[key]
                        else:
                            video_ids = json.loads(fields['video_ids'])
                            if fields[
                                    'video_type'] == image_scraper.OBJ_PERSON:
                                person_ids = json.loads(fields['person_ids'])
                                person = {
                                    'person': {
                                        'name': fields['name'],
                                        'ids': person_ids
                                    }
                                }
                                images = image_scraper.scrape_person_images(
                                    video_ids, person)
                            else:
                                images = image_scraper.scrape_images(
                                    fields['video_type'], video_ids,
                                    fields.get('season', ''),
                                    fields.get('episode', ''))
                            self.proxy_cache[key] = images

                    image_url = images[fields['image_type']]
                    if image_url is None:
                        self._set_headers()
                    elif image_url.startswith('http'):
                        self.__redirect(image_url)
                    else:
                        self._set_headers()
                        if self.command == 'GET':
                            with open(image_url) as f:
                                self.wfile.write(f.read())
        except ValidationError as e:
            self.__send_error(e)
예제 #2
0
def imscrap(site):
    import image_scraper

    image_scraper.scrape_images(site)
    print ("SOM RTUUTU")
예제 #3
0
import image_scraper
image_scraper.scrape_images(
    "http://st.ourhtmldemo.com/new/Shelder1/assets/images/icons/")
예제 #4
0
from bs4 import BeautifulSoup as BS
import urllib.request
import certifi
import image_scraper
with urllib.request.urlopen("https://www.deere.com/en/index.html", cafile=certifi.where()) as url:
	html = url.read()
	soup = BS(html,'html.parser')
	#for link in soup.find_all('a'):
		#print(link.get('href'))
	#for link in soup.find_all('div'):
		#print(link.attrs['class'].['industry-img'])
	imglinks = []
	for imglink in soup.find_all('img'):
		print(imglink.attrs['src'])
		imglinks.append(imglink.attrs['src'])
	print(imglinks)
	image_scraper.scrape_images('https://github.com')	
예제 #5
0
import image_scraper  #image_scraper.scrape_images(URL)










url = str(input("URL="))
lang=str(input('lang='))

tagval=len(url.split("/"))
image_scraper.scrape_images(url) -s ./raw/
예제 #6
0
import image_scraper
import randomword
import os
path = 'E:\Python Workspace\Scrap specilized\Images'
image_scraper.scrape_images('https://imgur.com/',40,["jpg","png","gif","jpeg"],path,10000,False,False)
예제 #7
0
imgs_url = []

for file in glob.glob('*.jpg'):
    imgs_url += [file.replace('.jpg', '')]

print(imgs_url)
print(len(imgs_url))
'''
https://static.wixstatic.com/media/d4a7a2_1fecc1f8206b4de0ad9baafc055eee83.jpg/v1/fill/w_1000,h_1000,al_c,q_90/d4a7a2_1fecc1f8206b4de0ad9baafc055eee83.webp
'''

prefix = 'https://static.wixstatic.com/media/{}.jpg/v1/fill/w_1000,h_1000,al_c,q_90/{}.webp'

for url in imgs_url:
    image_scraper.scrape_images(prefix.format(url, url))
    '''
    # class instantiation
    response = google_images_download.googleimagesdownload()

    # creating list of arguments
    arguments = {'url': prefix.format(url, url), 'format': 'webp',
                 'print_urls': True, 'usage_rights': 'labeled-for-nocommercial-reuse'}

    # passing the arguments to the function
    paths = response.download(arguments)

    # printing absolute paths of the downloaded images
    print(paths)
    '''
예제 #8
0
import image_scraper

URL = './'
imgs = image_scraper.scrape_images(URL)

print(imgs)



예제 #9
0
#pip install ImageScraper
import image_scraper
#this uses python3
max = input("How many pepe images do you want to download - not currently working")
source = input("From which sources should images be imported")
path = input("Add the path of test_images folder - not currently working")
image_scraper.scrape_images(source)
예제 #10
0
import image_scraper

rc_base_url = "https://redislabs.com/redis-cloud-documentation/"

image_scraper.scrape_images(
    "https://redislabs.com/redis-cloud-documentation/overview/")
rc = open("rc_urls.txt", "r")
for line in rc.readlines():
    url = rc_base_url + line
    print(url)
    image_scraper.scrape_images(url)
예제 #11
0
def downloadImages(url):
    scrape_images(url, download_path='./images/')