예제 #1
0
def puppyScraper():  #Downloads the images of puppies
    done = False
    while not done:
        amount = input("How many images?:  ")
        try:
            downloader.download("puppies",
                                limit=int(amount),
                                adult_filter_off=True,
                                force_replace=True)
            done = True
        except:
            print("That isn't a number! Try again...")
            done = False
예제 #2
0
def main():

  message  =("Downloads a bunch of images from bing " +
    "based on a search query.")  
  parser = get_default_ArgumentParser(message)
  parser.add_argument("--query", type=str, default="dragon",
    help="The query that is used to get the images.")
  parser.add_argument("destination", type=str,
    help="The directory where the files should be placed.")
  parser.add_argument("--limit", type=int, default=1000,
    help="The number of images to get.")

  FLAGS = parser.parse_args()
  process_common_arguments(FLAGS)

  downloader.download(FLAGS.query, 
                      output_dir=FLAGS.destination, 
                      limit=FLAGS.limit)
예제 #3
0
async def img_sampler(event):
     if event.fwd_from:
        return
     if event.is_group:
       if not (await is_register_admin(event.input_chat, event.message.sender_id)):
          await event.reply("Sorry But You Are Not Fully Worthy To Use My Death Note Power Use This In My Pm If You Need")
          return
     query = event.pattern_match.group(1)
     jit = f'"{query}"'
     downloader.download(jit, limit=5, output_dir='store', adult_filter_off=True, force_replace=False, timeout=60)
     os.chdir(f'./store/"{query}"')
     types = ('*.png', '*.jpeg', '*.jpg') # the tuple of file types
     filesgrabbed = []
     for files in types:
         filesgrabbed.extend(glob.glob(files))
     await event.client.send_file(event.chat_id, filesgrabbed, reply_to=event.id)
     os.remove(filesgrabbed)
     os.chdir('./')
def downloadImges(query,
                  storePath='dataset/bing/',
                  imgType='image',
                  quantity=1,
                  size=(1600, 900)):
    search = query + ' ' + imgType
    try:
        downloader.download(search,
                            limit=quantity,
                            adult_filter_off=True,
                            force_replace=False)
        resize(search, size, storePath)
    except FileNotFoundError:
        downloader.download(query,
                            limit=quantity,
                            adult_filter_off=True,
                            force_replace=False)
        resize(query, size, storePath)
예제 #5
0
 def scrape_bing(self):
     download_things = downloader.download(
         self.config.search,
         limit=self.config.limit,
         output_dir=self.config.output_dir,
         adult_filter_off=True,
         force_replace=False,
         timeout=60)
     return download_things
예제 #6
0
async def img_sampler(event):
     if event.fwd_from:
        return
     if event.is_group:
       if not (await is_register_admin(event.input_chat, event.message.sender_id)):
          await event.reply("😜 Hai.. You are not admin..🤭 You can't use this command.. But you can use in my pm🙈")
          return
     query = event.pattern_match.group(1)
     jit = f'"{query}"'
     downloader.download(jit, limit=5, output_dir='store', adult_filter_off=False, force_replace=False, timeout=60)
     os.chdir(f'./store/"{query}"')
     types = ('*.png', '*.jpeg', '*.jpg') # the tuple of file types
     files_grabbed = []
     for files in types:
         files_grabbed.extend(glob.glob(files))
     await event.client.send_file(event.chat_id, files_grabbed, reply_to=event.id)
     os.remove(files_grabbed)
     os.chdir('./')
예제 #7
0
    def download_training_data(self, num_of_images_per_category,
                               list_of_categories):
        """
        uses bing_image_downloader==1-0-2 to download images 
        """

        sys.path.append(bing_fork_location)
        from bing_image_downloader import downloader

        lim = str(num_of_images_per_category)
        categories = list_of_categories

        for category in categories:
            downloader.download(category,
                                limit=lim,
                                adult_filter_off=True,
                                force_replace=False)
            #summary of downloads
            print('{} Images of {}'.format(lim, pet))
def main(list_path, output_dir, num_images):
    with open(list_path) as list_file:
        for search_term in [line.strip() for line in list_file.readlines()]:
            # names list might have two columns: refkb_ID<TAB>person_name
            search_term = search_term.split('\t', 1)[-1].strip()
            if os.path.exists(os.path.join(output_dir, search_term)):
                print(f'Skipping {search_term}, path exists')
                continue
            print('Search term:', search_term)
            try:
                bing.download(query=search_term,
                              limit=num_images,
                              adult_filter_off='off',
                              output_dir=output_dir,
                              timeout=30,
                              page_counter_limit=5)
            except Exception:
                sys.stderr.write(
                    "ERROR: Exception occurred while processing name: {0}\n".
                    format(search_term))
                traceback.print_exc()
예제 #9
0
async def img_sampler(event):
    if event.fwd_from:
        return
    query = event.pattern_match.group(1)
    jit = f'"{query}"'
    downloader.download(
        jit,
        limit=5,
        output_dir="store",
        adult_filter_off=False,
        force_replace=False,
        timeout=60,
    )
    os.chdir(f'./store/"{query}"')
    types = ("*.png", "*.jpeg", "*.jpg")  # the tuple of file types
    files_grabbed = []
    for files in types:
        files_grabbed.extend(glob.glob(files))
    await tbot.send_file(event.chat_id, files_grabbed, reply_to=event.id)
    os.chdir("/app")
    os.system("rm -rf store")
예제 #10
0
async def img(
    ctx,
    nr: int,
    *,
    name: str,
):
    if nr > 10:
        await ctx.send(
            "Da mi ssd u tau daca esti asa smecher 1000 de poze nu vrei?")
    else:
        downloader.download(name,
                            nr,
                            output_dir='dataset',
                            adult_filter_off=True,
                            force_replace=False,
                            timeout=60)
        for file in os.listdir(f'dataset/{name}/'):
            filename_str = f'dataset/{name}/{os.fsdecode(file)}'
            file = discord.File(filename_str, filename=filename_str)
            await ctx.send(file=file)
            os.remove(filename_str)
예제 #11
0
from email.mime.text import MIMEText
from bing_image_downloader import downloader

# function that calls quotes api to get a random quote
from Day2_API_excel import get_random_quote

# save random quote to a map
data = {
    'quote': get_random_quote()['text'],
    'author': get_random_quote()['author']
}

# download an image, search by the contents of a quote
downloader.download(query=data['quote'],
                    limit=1,
                    output_dir='attachment//',
                    adult_filter_off=True,
                    force_replace=False,
                    timeout=60)

# save downloaded image,  by default bing downloader creates a folder with query as a name
img_file_name = r"current_project's_path\attachment\\" + data[
    'quote'] + "\Image_1.jpg"

# prepare message
img_data = open(img_file_name, 'rb').read()
msg = MIMEMultipart()

msg['Subject'] = 'A quote from ' + data['author']
msg['From'] = '*****@*****.**'
msg['To'] = '*****@*****.**'
예제 #12
0
from bing_image_downloader import downloader

query_string = input('what do you want images of?\n')
downloader.download(query_string, limit=100, adult_filter_off=True, force_replace=False)
def main():
    parser = argparse.ArgumentParser(description='download some images')
    parser.add_argument('search_term', help='the query string')
    parser.add_argument('limit',
                        type=int,
                        help='the number of images to download')
    parser.add_argument('--only_yandex', type=str, default='False')
    args = parser.parse_args()
    search_phrase = args.search_term
    only_yandex = args.only_yandex
    only_yandex = False if only_yandex == 'False' else True
    limit = args.limit

    if 'FLICKR_API_KEY' in os.environ:
        api_key = os.environ['FLICKR_API_KEY']
    else:
        print("Set the FLICKR_API_KEY environment variable.")
        sys.exit()

    if 'FLICKR_API_SECRET' in os.environ:
        api_secret = os.environ['FLICKR_API_SECRET']
    else:
        print("Set the FLICKR_API_SECRET_KEY")
        sys.exit()

    if 'WEBLY_DOWNLOAD_DIR' in os.environ:
        webly_download_dir = Path(os.environ['WEBLY_DOWNLOAD_DIR'])
    else:
        print("Set the WEBLY_DOWNLOAD_DIR")
        sys.exit()

    # Create download folder.
    search_dir = Path(webly_download_dir / search_phrase.replace(' ', '_'))
    if not search_dir.exists():
        search_dir.mkdir()

    if not only_yandex:
        print('Downloading bing images!')
        downloader.download(search_phrase,
                            limit=limit,
                            adult_filter_off=True,
                            force_replace=False,
                            timeout=60)

        # Move bing files.
        bing_download_dir = Path(f'./dataset/{search_phrase}')
        for b in bing_download_dir.iterdir():
            shutil.move(b, Path(search_dir / b.name))

        if bing_download_dir.exists():
            bing_download_dir.rmdir()

        if Path('./dataset').exists():
            Path('./dataset').rmdir()

        print('Downloading flickr images!')
        download_flickr_images(api_key,
                               api_secret,
                               webly_download_dir,
                               search_phrase,
                               max_dl=limit)

    print('Launching yandex script.')
    subprocess.call([
        'yandex-images-download', 'Chrome', '--keywords', f'"{search_phrase}"',
        '--limit', f'{limit}', '-o', f'{str(webly_download_dir.resolve())}'
    ])

    yandex_dir = Path(webly_download_dir / f'"{search_phrase}"')
    for p in yandex_dir.iterdir():
        shutil.move(p, Path(search_dir / p.name))

    if yandex_dir.exists():
        yandex_dir.rmdir()
예제 #14
0
파일: test.py 프로젝트: perseu912/insta_bot
from bing_image_downloader import downloader
downloader.download('mascara covid', limit=100,  output_dir='dataset', 
adult_filter_off=False, force_replace=False, timeout=60)
예제 #15
0
def fetchIMG(name):
    downloader.download(name, limit=1)
    return 'dataset/',name,'/',os.listdir("dataset/"+str(name))[0]
예제 #16
0
from bing_image_downloader import downloader

downloader.download("apple fruit", limit=20, output_dir='images')
예제 #17
0
from bing_image_downloader import downloader
import os
from PIL import Image
query_string = 'frog on mountain'
thisDir = os.getcwd()

downloader.download(query_string,
                    limit=1,
                    adult_filter_off=False,
                    force_replace=False,
                    timeout=60,
                    output_dir="")
fileName = thisDir + "/" + query_string + "/Image_1.jpg"
im = Image.open(fileName)
im.show()
예제 #18
0
from bing_image_downloader import downloader

downloader.download('apple fruit', limit=3, output_dir='dataset/train')
# downloader.download('fresh guava', limit=350, output_dir='dataset/test')
예제 #19
0
    'Jackie Chan': 'Male',
    'Stephen Chow': 'Male',
    'Fan BingBing': 'Female',
    'Yang Mi': 'Female'
}

print("[INFO] Downloading images ... ")
names = list(search_queries.keys())
for query in names:
    if (os.path.exists(DATA_DIR + "/" + query)):
        print("[INFO] Data dir '" + query +
              "' has already exist, Skipping ... ")
        continue

    downloader.download(query,
                        limit=35,
                        adult_filter_off=False,
                        force_replace=True)

### After the download we need to check for validity ###
### Criteria : only 1 face per image, true gender ###

print("[INFO] Checking data validity ... ")
ALL_SATISFIED = False
while (not ALL_SATISFIED):
    invalid_count = 0
    for (dir, dirs, files) in os.walk(DATA_DIR):
        if (dir != DATA_DIR):
            for file in files:
                abs_path = dir + "/" + file
                print("[INFO] Check image file " + abs_path)
예제 #20
0
# 01_load_images.py
# ref https://pypi.org/project/bing-image-downloader/

from utility import create_folders
from bing_image_downloader import downloader

DIR_IMG = 'data/images'
search_string ='superman'
search_limit=15


if __name__ == "__main__": 
    #create some folders to store
    create_folders([DIR_IMG])
    #get a csv
    downloader.download(search_string, limit=search_limit, \
    output_dir=DIR_IMG, adult_filter_off=True, force_replace=False, timeout=60)
예제 #21
0
from bing_image_downloader import downloader as bing

print("Query String:")
query = input()

print("How many images:")
number = int(input())

bing.download(query, limit=number,  output_dir='data', adult_filter_off=False, force_replace=False, timeout=60)
print("Complete")
예제 #22
0
from google_images_download import google_images_download

#instantiate the class
response = google_images_download.googleimagesdownload()
arguments = {
    "keywords": "aeroplane, school bus, dog in front of house",
    "limit": 10,
    "print_urls": False
}
paths = response.download(arguments)

#print complete paths to the downloaded images
print(paths)

#####################################
#Bing
from bing_image_downloader import downloader

downloader.download("monkey",
                    limit=200,
                    output_dir='dataset',
                    adult_filter_off=True,
                    force_replace=False,
                    timeout=60)
downloader.download("tiger",
                    limit=200,
                    output_dir='dataset',
                    adult_filter_off=True,
                    force_replace=False,
                    timeout=60)
예제 #23
0
from bing_image_downloader import downloader

with open('download-dataset-query-strings.txt') as f:
    for query_string in f:
        query_string = query_string.strip()
        print('Downloading images for:', query_string)
        print()
        downloader.download(query_string,
                            limit=500,
                            output_dir='indian-food-dataset-divyanshu',
                            adult_filter_off=True,
                            force_replace=False,
                            timeout=60)
        print()
        print()
예제 #24
0
from bing_image_downloader import downloader

file = open('Dishes.txt', 'r')
queries = file.read().splitlines()

for query in queries:
    downloader.download(query.lower()+' images', limit=1000,
                        adult_filter_off=True, force_replace=False)
예제 #25
0
from bing_image_downloader import downloader
downloader.download('shopping trolley', limit=200,  output_dir='dataset', adult_filter_off=True, force_replace=False, timeout=60)
예제 #26
0
#path to place you want training data to be dumped if not same dir as this py file
#path_to_training_data = '.'

gendirs = ['Gen1', 'Gen2', 'Gen3', 'Gen4', 'Gen5', 'Gen6', 'Gen7', 'Gen8']

gens = [gen1, gen2, gen3, gen4, gen5, gen6, gen7, gen8]

for gen in gens:
    gendir = gendirs[gens.index(gen)]
    directory = path_to_training_data + '/' + gendir + '/'
    if not os.path.exists(directory):
        os.makedirs(directory)

    for pokemon in gen:
        downloader.download(pokemon + ' pokemon',
                            limit=lim,
                            adult_filter_off=True,
                            force_replace=False)
        #moves data to new location as we dont like dataset/bing/...

        # bing-image-downloader 1.0.2 fork
        os.system('mv dataset/bing/{} {}'.format(
            pokemon + '\ pokemon',
            path_to_training_data + '/' + gendir + '/' + pokemon))

        #removes dataset/bing/ folder
        os.system('rm -rf dataset/')

#######################################################

#summary of downloads
예제 #27
0
names_done = os.listdir('./images')

with open('strong_man.txt', encoding='utf-8') as file:
    strong_mans = file.read().split(',')

strong_mans = [(strong + " strongman") if "strongman" not in strong else strong
               for strong in strong_mans]

for index, query_string in enumerate(strong_mans):
    if query_string in names_done:
        continue

    try:
        now = time.time()
        timeToFinish = ((now - start) / (index + 1)) * (len(strong_mans) -
                                                        (index + 1))

        print(
            f'   {query_string}, {index + 1}/{len(strong_mans)}   {(index + 1)/len(strong_mans) * 100}%   timeToF: {timeToFinish}s   '
            .center(100, '#'))

        downloader.download(query_string,
                            limit=7,
                            output_dir='./images',
                            adult_filter_off=True,
                            force_replace=False)
    except:
        logging.error(f'{query_string} not downloded')

print(f'done'.center(100, '#'))
input()
예제 #28
0
  plt.imshow(andhra_cm_image_np)
plt.show()

"""# sometimes google wont allow the images to be downloaded, then we have an alternative which is microsoft bing. so lets see how we can extract the images from bing.

# download images from bing image search
"""

!pip install bing-image-downloader

"""# make a directory in current folder"""

!mkdir images

from bing_image_downloader import downloader
downloader.download("elephant in africa", limit = 5, output_dir ="images", adult_filter_off = True, force_replace = False)

downloader.download("tigers in india", limit = 10, output_dir="images", force_replace = False)

!ls images/ - alrt

!ls "images"/"tigers in india"

!ls "images"/"elephant in africa"

from IPython.display import Image
Image('images/elephant in africa/Image_1.jpg')

Image('images/tigers in india/Image_1.jpg')

# convert into numpy so that i can feed it into any model
예제 #29
0
from bing_image_downloader import downloader
import os
import shutil

query = ["rural", "gray backgrounds"]  #put words to search
output_dir = "bing_images"

if not os.path.exists(output_dir):  #if dir not present, make it
    os.mkdir(output_dir)

for i in range(len(query)):
    downloader.download(query[i],
                        limit=100,
                        output_dir=output_dir,
                        adult_filter_off=True,
                        force_replace=False,
                        timeout=60)

### merge image folders with search queary name into one image dir:"images" ###

target_dir = "images"
if not os.path.exists(target_dir):  #if dir not present, make it
    os.mkdir(target_dir)

count = 0
for dirname in os.listdir(output_dir):
    file_names = os.listdir(os.path.join(output_dir, dirname))
    for file_name in file_names:
        count += 1
        shutil.move(os.path.join(os.path.join(output_dir, dirname), file_name),
                    target_dir)
예제 #30
0
# Importation des librairies
import os
# git clone https://github.com/gurugaurav/bing_image_downloader
# sudo python3 setup.py install
# Lien vers doc API : pypi.org/project/bing-image-downloader
from bing_image_downloader import downloader

# Nombre d'images à télécharger
NB_IMG = 15

# Creation du dossier 'bing_search' s'il n'existe pas
if not os.path.exists("bing_search"):
    os.makedirs("bing_search")

# Demande des mots clefs à l'utilisateur
mots_clefs = input("Quels mots clefs souhaitez-vous pour votre recherche ? ")

# Récupération des images du résultat de la recherche
downloader.download(mots_clefs,
                    limit=NB_IMG,
                    output_dir="bing_search",
                    force_replace=True)