コード例 #1
0
 def test_download_google(self):
     download_image(APP_STORE_US_GOOGLE, print_only=False, save_path='./')
     contain_google = list(
         filter(lambda x: "google" in x.lower(), os.listdir()))
     self.assertTrue(len(contain_google) > 0)
     self.assertIsNotNone(
         re.match(r"Google_[0-9.]+_[0-9]+x0w.(jpg|png)", contain_google[0]))
     # cleanup
     os.remove(contain_google[0])
コード例 #2
0
 def test_download_staples(self):
     download_image(APP_STORE_US_STAPLES, print_only=False, save_path='./')
     contain_staples = list(
         filter(lambda x: "staples" in x.lower(), os.listdir()))
     self.assertTrue(len(contain_staples) > 0)
     self.assertIsNotNone(
         re.match(r"Staples.*?_[0-9.]+_[0-9]+x0w.(jpg|png)",
                  contain_staples[0]))
     # cleanup
     os.remove(contain_staples[0])
コード例 #3
0
ファイル: videomaker.py プロジェクト: beingaditya/Major
def clip(line):
    try:
        image = downloader.download_image(line['keyword'])
        if image == "":
            image = "default.jpeg"
    except Exception as e:
        image = "default.jpeg"

    frame = frameGenerator.generateFrame(image, line['sentence'])
    duration = len(line['sentence']) / 20
    return ((ImageClip(frame).set_duration(duration).set_pos("center")),
            duration)
コード例 #4
0
ファイル: server.py プロジェクト: clevin95/final-server
	def do_POST(self):
		self._set_headers()
		body_data = self.rfile.read(int(self.headers['Content-Length']))

		self.end_headers()

		data = simplejson.loads(body_data)
		file_name = data['file_name']
		print(file_name)
		downloader.download_image(file_name)


		signs = crop.signs_from_image(file_name)
		print(signs)

		red_signs = signs["red"]
		green_signs = signs["green"]

		response = {}
		response["red"] = parser.parse_image(red_signs)
		response["green"] = parser.parse_image(green_signs)

		formatted_json = json.dumps(response)
		self.wfile.write(formatted_json.encode())
コード例 #5
0
                         each['link'])  # filter out other regions & /developer
        if match and match.group(1) in REGIONS:
            results.append(each)
    results = list(filter(lambda x: '?l=' not in x['link'], results))  # remove link with alternative language

    if len(results) == 0:
        exit(0)
    # create async request for getting images (for iTerm)
    (loop, async_img_tasks) = submit_store_urls_to_async(map(lambda x: x['link'], results), img_side_len=64) if FLAG_ITERM else (None, [])
    for i, each in enumerate(results):
        print(str(i+1) + ".\t" + each['title'].strip())
        print("\t" + urllib.parse.unquote(each['link']))
    if args.lucky:
        print("I'm feeling lucky!")
        chosen_num = 1
    else:
        if FLAG_ITERM:  # iTerm spec
            img_results = wait_async_tasks(loop, async_img_tasks)
            horizontal_show_image_by_store_urls(img_results)
        while True:
            chosen_num = input("select: ")
            try:
                chosen_num = int(chosen_num)
                if 1 <= chosen_num <= len(results):
                    break
            except ValueError as e:
                pass
    chosen_item = results[chosen_num - 1]

    download_image(chosen_item['link'], False)
コード例 #6
0
def to_download(card_id: int, is_artwork: bool = False):
    downloaded = already_downloaded(card_id, is_artwork)
    if not downloaded:
        download_image(card_id, is_artwork)
        mark_as_downloaded(card_id, is_artwork)
        sleep(.1)
コード例 #7
0
def download_image(referer, url, output):
    downloader.download_image(referer, url, output)
コード例 #8
0
import image_analyzer
import scraper
import time
import uploader

groceries_data = groceries.GROCERY_DATA
desired_items = groceries.GROCERY_ITEMS
grocery_search_results = {}

for i, item in enumerate(groceries_data, start=1):
    store_name = item.get('name')
    url = item.get('link')
    grocery_search_results[store_name] = {'pages': []}

    flyer_link = scraper.get_flyer_link(url, item.get('excludeProvince'))
    image_links = scraper.get_image_links(flyer_link)

    for idx, link in enumerate(image_links, start=1):
        image_name = store_name + "-" + str(idx) + ".jpg"
        downloader.download_image(link, "./images/" + image_name)
        source = uploader.upload_image(config.BUCKET_NAME, image_name,
                                       "./images/" + image_name)
        detection_results = image_analyzer.detect_text(source, desired_items)
        if (len(detection_results) > 0):
            pages = grocery_search_results[store_name].get('pages')
            pages.append(link)

    if (i < len(groceries_data)):
        print("=== SLEEPING FOR 10 SECONDS ===")
        time.sleep(10)