Beispiel #1
0
def main():
    images_url = 'https://api.spacexdata.com/v3/launches/latest'
    response = requests.get(images_url)
    response.raise_for_status()
    image_links = response.json()['links']['flickr_images']
    for link_number, link in enumerate(image_links):
        download_image(link, f'spacex{link_number + 1}.jpg')
Beispiel #2
0
    def post_reply(self, status):
        statusID = status.id_str
        i = 0
        print(f"Processing status {statusID}.")
        media = get_status_media(status)
        if media is None or media[0]["type"] != "photo":
            print("Sem imagens.")
            return

        urlArray = get_media_url(media)

        print("Downloading images...")

        for i, url in enumerate(urlArray):
            utils.download_image(f"image{i}.jpg", urlArray[0])

        print("Completed!")
        print("Coloring images...")

        for i, url in enumerate(urlArray):
            colorize_image(f"image{i}.jpg")

        print("Completed!")
        print("Uploading images...")

        newMedias = []
        for i, url in enumerate(urlArray):
            newMedia = self.client.media_upload(f"image{i}.jpg")
            newMedias.append(newMedia.media_id)

        res = self.client.update_status("",
                                        in_reply_to_status_id=statusID,
                                        media_ids=newMedias,
                                        auto_populate_reply_metadata=True)
        print("Completed!")
Beispiel #3
0
def fetch_hubble_image(image_id, source_path):
    response = requests.get(f'http://hubblesite.org/api/v3/image/{image_id}')
    response.raise_for_status()
    review_result = response.json()
    file_link = review_result['image_files'][-1]['file_url']
    logger.info(f'download http:{file_link}')
    utils.download_image(image_id, f'http:{file_link}', source_path)
Beispiel #4
0
def selfie2anime():
    img_id = os.environ['id']
    result_id = os.environ['result']
    

    parser = get_parser()
    args = parser.parse_args("--phase test".split())

    with tf.Session(config=tf.ConfigProto(allow_soft_placement=True)) as sess:
        #sess.reuse_variables()
        gan = UGATIT(sess, args)

        # build graph
        gan.build_model()

        # download target img
        download_path = os.path.join(img_path, img_id)

        download_image(images_bucket, img_id, dest=download_path)
        dataset_tool.create_from_images(record_path, img_path, True)
        # os.remove(del_record)
        
        img = gan.infer(download_path)

        image_url = upload_image(img, result_id)

    return download_path, img
def fetch_hubble_images(image_url, collection_name, folder):
    for image_id in get_images_ids(collection_name):
        user_url = get_best_image_url(image_url, image_id)
        filename = os.path.join(os.getcwd(), folder,
                                f'{image_id}{os.path.splitext(user_url)[-1]}')
        download_image(user_url, filename)
        print(f'File {image_id} downloaded')
Beispiel #6
0
def colorize_image(filename):
    api_url = "https://api.deepai.org/api/colorizer"
    files = {"image": open(filename, 'rb')}
    headers = {"api-key": keys["deepai_key"]}

    res = requests.post(api_url, files=files, headers=headers)
    res_data = res.json()
    utils.download_image(filename, res_data["output_url"])
Beispiel #7
0
def fetch_hubble_image(image_id):
    response = requests.get(f"{HUBBLE_IMAGE_API}{image_id}")
    response.raise_for_status()
    image_data = response.json()
    url = f"https:{image_data['image_files'][-1]['file_url']}"
    (_, ext) = os.path.splitext(url)
    name = f"{image_id}{ext}"
    utils.download_image(url, name)
Beispiel #8
0
def fetch_spacex_launch(images_url, folder):
    file_path = os.path.join(os.getcwd(), folder, 'spacex')
    file_extension = '.jpg'
    spacex_response = requests.get(images_url)
    spacex_response.raise_for_status()
    images_links = spacex_response.json()['links']['flickr_images']
    for image_number, image_link in enumerate(images_links, start=1):
        download_image(image_link,
                       f'{file_path}{image_number}{file_extension}')
Beispiel #9
0
def get_profile_photos(browser, username):
    save_dir = 'profiles/' + username + '/'
    create_dir(save_dir) # need to fix these names
    i = 0
    for image in browser.find_elements_by_xpath('//div[@id="profile_thumbs"]//img[@src]'):
        image_url = image.get_attribute('src')
        save_path = save_dir + str(i) + '.webp' # probably a bad idea to assume the img format
        download_image(image_url, save_path)
        i += 1 # using 'i' is probably not the clean way to name these images
Beispiel #10
0
def download_book_cover(url, image_name):
    r = requests.get(url)
    soup = BeautifulSoup(r.content, 'html.parser')
    img_tag = soup.find('img', attrs={'id': 'coverImage'})
    if img_tag:
        image_url = img_tag.attrs['src']
        download_image(image_url=image_url, image_name=image_name)
        return True
    return False
Beispiel #11
0
def download_book_covers(books):
    for book in books:
        my_logger.debug("Downloading book cover for: {}".format(book['name']))
        image_url = book['image_url']
        extention = image_url.split('.')[-1]
        image_name = "{}.{}".format(book['goodreads_id'], extention)
        download_image(image_url=image_url, image_name=image_name)
        book['image_name'] = image_name
        time.sleep(1)
    return books
def fetch_spacex_last_launch(images_path=IMAGES_FOLDER):
    """Download pics from last SpaceX launch."""
    default_filename = "spacex"

    for num, link in enumerate(get_latest_launch_images_links()):
        _, ext = os.path.splitext(link)
        file_name = f"{default_filename}{str(num)}{ext}"

        print("Download by link: {0} ".format(link))
        download_image(url=link, img_path=images_path, img_name=file_name)
        print("File {0} saved".format(file_name))
Beispiel #13
0
def echo_photo(update, context):
    global message
    global current_command
    if current_command == 'upload':
        current_command = 'o'
        chat = update.message['chat']['id']
        photo_id = update.message['photo'][-1]['file_id']
        random_string = str(time.time())
        download_image(chat, photo_id, random_string)
        #attatchents.append('photo.jpg'+ random_string)
        temp.append('photo.jpg'+ random_string)
        update.message.reply_text("enter link for order button..")
def fetch_spacex_launch(source_path, launch_id):
    logger.info('download SpaceX images')
    response = requests.get(
        f'https://api.spacexdata.com/v4/launches/{launch_id}')
    response.raise_for_status()
    review_result = response.json()
    picture_links = review_result['links']['flickr']['original']

    for picture_number, picture_link in enumerate(picture_links):
        file_name = f'spacex{picture_number}'
        logger.info(f'download {picture_link}')
        utils.download_image(file_name, picture_link, source_path)
def download_versioned_cdragon_item_icon(item_id):
    item_id = item_id.lower().split("/")[-1].replace(".dds", ".png")
    if settings.patch[
            'cdragon'] == "pbe":  # Season 11 items in different location
        return utils.download_image(
            constants.cdragon_url +
            f"/{settings.patch['cdragon']}/game/assets/items/icons2d/{item_id}"
        )
    else:
        return utils.download_image(
            constants.cdragon_url +
            f"/{settings.patch['cdragon']}/game/data/items/icons2d/{item_id}")
Beispiel #16
0
def download_image_file(meta):
    download_image(meta)

    path, dirs, files = next(os.walk(base_path + 'images/logs'))
    file_count = len(files)

    printProgressBar(file_count,
                     14653,
                     prefix='Progress:',
                     suffix='Complete',
                     decimals=2,
                     length=50)
Beispiel #17
0
def fetch_spacex_last_launch():
    response = requests.get(SPACEX_LATEST_API)
    response.raise_for_status()
    launches = response.json()
    if not launches["links"]["flickr"]["original"]:
        response = requests.get(SPACEX_API)
        launches = response.json()
        for launch in launches:
            if launch["links"]["flickr"]["original"]:
                spacex_image = launch["links"]["flickr"]["original"]
                break
    for num_image, url_image in enumerate(spacex_image):
        utils.download_image(url_image, f"spacex{num_image}.jpg")
Beispiel #18
0
def scrape_picture(picture_url, save_location, ids, verbose, gallery_name):
    """
    Scrape a single picture.
    
    Parameters
    ----------
    
    - picture_url (str): The URL of the picture to save, e.g.
      `http://www.imagefap.com/photo/361343809/?pgid=&gid=7225770&page=0&idx=2`.
      
    - save_location (str): The location to save to, e.g. `downloads`.
    
    - gallery_name (str): Gallery name used for the save folder.
        
    - ids (boolean): Whether or not to use IDs for gallery and picture names.
    
    - verbose (boolean): Whether or not to print information.
    
    
    """

    # Get the download url
    picture_dl_url = picture_dl_url_from_picture_url(picture_url)
    
    # Get information for saving
    username, gallery_id, picture_id, picture_name_full, filetype = \
    information_from_picture_dl_url(picture_dl_url)
    
    # Choose a directory name to save to, 
    # either using gallery ids or gallery name
    if ids:
        save_directory = os.path.join(save_location, gallery_id)
    else:
        save_directory = os.path.join(save_location, gallery_name)
        
    # Create the directory if it does not exist
    if not os.path.exists(save_directory):
        os.makedirs(save_directory)
    
    # Save images
    if ids:
        save_directory = os.path.join(save_directory, picture_id + '.' + filetype)
    else:
        save_directory = os.path.join(save_directory, picture_name_full)
        
    # Download the image
    download_image(picture_dl_url, save_directory)

    if verbose:
        print('Downloaded:', save_directory)
Beispiel #19
0
    def make_background(self):
        photos = self.get_panoramas()
        image_list = []
        for p in photos:
            url = p.get('photo_file_url')
            name = url.split('/').pop()
            print 'downloading %s ...' % name
            download_image(url, self.id, name)
            image_list.append(name)
            print 'total: %i' % len(image_list)

        grid = 10
        sizex = 60
        sizey = 60
        xy = (sizex * grid, sizey * grid)

        blank_image = Image.new("RGB", xy)

        ycar = 0
        xcar = 0
        y = 0
        x = 0

        statics_path = settings.MEDIA_ROOT
        downloads_path = os.path.join(settings.DOWNLOAD_IMAGE_PATH, str(self.id))

        for i in range(0, (grid*grid)):

            if i % grid == 0:
                y = ycar * sizey
                ycar = ycar + 1
                xcar = 0
            else:
                xcar = xcar + 1

            x = sizex * xcar

            img = Image.open( os.path.join(downloads_path, image_list[i]) )
            blank_image.paste(img, (x,y))

        negro = Image.open( os.path.join(statics_path, 'negro.jpg') )
        blank_image.save( os.path.join(statics_path, 'backgrounds', '%i.jpg' % self.id) , quality=100)
        image = Image.blend(blank_image, negro, 0.5)
        image.save( os.path.join(statics_path, 'backgrounds', '%i_black.jpg' % self.id) , quality = 100)
        image = Image.open(os.path.join(statics_path, 'backgrounds', '%i_black.jpg' % self.id))
        self.image = image.filename.split('static/').pop()

        self.save()
def winner_tweet(api, winner_handle, auction_id, image_url, product_url,
                 product_name):

    image_path = utils.download_image(auction_id, image_url)
    tweet = "The auction for " + product_name[:
                                              50] + " is won by: " + winner_handle + "\n Get your product here : " + product_url
    api.update_with_media(image_path, status=tweet)
Beispiel #21
0
def get_support_card(uri: str, soup: BeautifulSoup):
    wiki_id = int(uri.split('/')[-1])

    h3 = soup.find('h3')
    card_name = h3.text.replace('の性能', '')
    illust_uri = h3.find_next_sibling('img')['data-original']
    image_filename = download_image(illust_uri)

    card_table: BeautifulSoup = soup.find_all('table')[1]
    card_field = card_table.find_all('td')
    rare_degree = card_field[0].text
    card_type = card_field[1].text
    second_name = card_field[2].text

    support_card = SupportCard(card_name=card_name,
                               card_name_kr=card_name,
                               card_type=card_type,
                               card_type_kr=card_type,
                               card_image=image_filename,
                               gamewith_wiki_id=wiki_id,
                               rare_degree=rare_degree,
                               second_name=second_name,
                               second_name_kr=second_name)
    db_session.add(support_card)
    return support_card
Beispiel #22
0
def check_new_songs(artist, collection):
    """Checks if there is any new song

    It compares the old discography of the artist with the new (already fetched) discography.
    It tweets if there is a new release or featuring of the artist.

    Args:
      - artist: dictionary that contains all the data about the single artist
      - collection: dictionary that contains all the updated discography of the artist

    Returns:
      an artist dictionary with updated discography details
    """
    print("[{}] ({}) Checking new songs...".format(module, artist["name"]))
    old = artist["discography"]

    for album in collection:
        found = False
        for old_album in old:
            if album["name"] == old_album["name"]:
                found = True
                break
        if not found:
            if album["type"] != 'appears_on':
                twitter_post_image(
                    "#{} released a new {} on #Spotify: {}\n{}\n{}".format(artist["name"].upper(), album["type"], album["name"], link_album(album["id"]), hashtags),
                    download_image(album["image"]),
                    None
                    )
            else:
                twitter_post("#{} appeared on {} by {} with the song {}\n{}\n{} #spotify".format(artist["name"].upper(), album["name"], album["artist_collab"], album["tracks"][0]["name"], link_album(album["id"]), hashtags ))
    
    artist["discography"] = collection
    return artist
async def main() -> None:
    utils.log.info("kiitensupport bot started")
    api = await create_api()
    while True:
        if not "DRYRUN" in os.environ:
            await asyncio.sleep(utils.next_hour(19).seconds)
        else:
            await asyncio.sleep(1)
        try:
            phrase = await get_random_message()
            if not phrase:
                continue

            kitten_url = await get_random_kitten_image_url()
            if not kitten_url:
                continue

            async with utils.download_image(kitten_url) as path:
                new_image_path = await mememaker.create_meme_tempfile(
                    path, phrase)
                if not new_image_path:
                    continue

            if not "DRYRUN" in os.environ:
                status = await api.update_with_media(new_image_path)
                utils.log.info("Posted %s", await
                               utils.get_tweet_url(api, status))
            else:
                utils.log.debug("DRYRUN, skipping")
        except TweepError as e:
            utils.log.error(f"TweepError %s", e)
        except Exception as e:
            utils.log.error("Uncaught exception %s", e)
Beispiel #24
0
def main():
    args = parse_args()
    names_to_urls = {
        'australian_bass':
        'https://yaffa-cdn.s3.amazonaws.com/yaffadsp/images/dmImage/StandardImage/bass11.jpg',
        'barramundi':
        'https://www.abc.net.au/news/image/9320982-3x2-700x467.jpg',
        'barred_cheek_coral_trout':
        'https://app.fisheries.qld.gov.au/images/barred-cheek-coral-trout-2.jpg',
        'giant_trevally':
        'https://upload.wikimedia.org/wikipedia/commons/thumb/e/ed/Caranx_ignobilis.jpg/1200px-Caranx_ignobilis.jpg',
        'diamond_scale_mullet':
        'https://us.123rf.com/450wm/aozz85/aozz851708/aozz85170800005/83943872-diamond-scale-mullet-liza-vaigiensis-also-known-as-diamond-scaled-mullet-squaretail-mullet-blackfin-.jpg?ver=6',
        'mahi_mahi':
        'https://cdn-tp2.mozu.com/15440-22239/cms/files/7bb4f59f-02ae-4a62-a788-f985f0951012?maxWidth=960&quality=75&_mzcb=_1536869987056',
        'wahoo':
        'https://www.fishingcairns.com.au/wp-content/uploads/2017/05/Wahoo-e1510617727696.jpg',
    }
    for name, url in names_to_urls.items():
        path = os.path.join(args.path, name)
        if not os.path.isdir(path):
            os.makedirs(path)

        try:
            digest, file_ext, path = download_image(url, path)
            print("Downloaded", url, "to", path)
        except Exception as e:
            print("[INFO] error downloading %s" % url)
            print(e)
Beispiel #25
0
def check_birthdays(group):
    """Checks if today is the birthday of a member of the group

    It tweets if it is the birthday of someone

    Args:
      group: a dictionary with all the details of the group

    Returns:
      an dictionary containing all the updated data of the group
    """

    now = datetime.datetime.today()
    print("[{}] Today is {}".format(module, now.date()))

    print("[{}] Checking...".format(module))
    for member in group["members"]:
        birthday = datetime.datetime.strptime(member["birthday"], '%d/%m/%Y')
        difference = round((now - birthday).days / 365.25)
        birthday = birthday.replace(year=now.year)
        if birthday.date() == now.date():
            print("[{}] ({}) Birthday: {} years!".format(
                module, member["name"], difference))
            if member["years"] != difference:
                member["years"] = difference
                twitter_post_image(
                    "Today is #{}'s birthday! She did {} years\n#{}bday #blackpink @BLACKPINK"
                    .format(member["name"].upper(), difference,
                            member["name"].lower()),
                    download_image(member["instagram"]["image"]),
                    str(difference))
    print()
    return group
Beispiel #26
0
def fetch_spacex(args) -> None:
    mission = args.spacex_mission
    try:
        mission_name, images_urls = get_mission_name_and_images_urls(mission)
    except Exception as e:
        logger.error(
            'An error is occurred, SpaceX images data cannot be retrieved: %s',
            str(e))
        return

    if not images_urls:
        logger.warning(
            'Images data on the SpaceX site is empty. Try another mission.')
        return

    logger.info('Start downloading SpaceX.')
    logger.info('%i SpaceX images found.', len(images_urls))
    for i, image_url in enumerate(images_urls):
        image_path = utils.get_image_path(mission_name, image_url, i)
        logger.info(f'Downloading image %s.', image_url)
        try:
            is_downloaded = utils.download_image(image_url, image_path)
        except Exception as e:
            logger.error('Image %s cannot be retrieved: %s', image_path,
                         str(e))
            continue
        if is_downloaded:
            logger.info('%s is fetched.', image_url)
        else:
            logger.warning('Skip %s.', image_url)
    logger.info('SpaceX downloading is finished.')
def handler(event, context):

    # Initialize Logger
    log = init_logger()
    log = add_handler(log)

    input_data = json.loads(event['body'])
    log.info(f"Input data: {input_data}")

    # Retrieve inputs
    input_url, n_predictions = input_data['input_url'], input_data[
        'n_predictions']

    # Download image
    input_image = download_image(input_url)

    # Process input image
    log.info(f"INFO -- Processing Image")
    batch = preprocess_image(input_image)

    # Generate prediction
    log.info(f"INFO -- Generating Prediction")
    pred = prediction(input_batch=batch, mdl=mobilenet_v2)

    # Top n results
    log.info(f"INFO -- Generating Top n predictions")
    n_results = number_output(mdl_output=pred,
                              mdl_labels=labels,
                              top_n=n_predictions)

    # prediction = model.predict(url)
    response = {"statusCode": 200, "body": json.dumps(n_results)}

    return response
Beispiel #28
0
async def main():

    redis = utils.redis()
    reddit = await create_reddit()
    twitter = await utils.create_twitter(
        key=os.environ["MEMESBR_TWITTER_ACCESS_KEY"],
        secret=os.environ["MEMESBR_TWITTER_ACCESS_SECRET"],
        access_token=os.environ["MEMESBR_TWITTER_ACCESS_TOKEN"],
        access_token_secret=os.environ["MEMESBR_TWITTER_ACCESS_TOKEN_SECRET"],
    )

    utils.log.info("memesbr bot started")

    while True:
        utils.log.info("sleeping")
        await asyncio.sleep(utils.next_hour(19).total_seconds())
        memeurl = await getmemeurl(redis, reddit)
        if memeurl is None:
            return

        async with utils.download_image(memeurl) as memepath:
            utils.log.debug("memepath %s", memepath)
            if "DRYRUN" not in os.environ:
                status = await twitter.update_with_media(memepath)
            else:
                utils.log.info("dryrun, skipping")
Beispiel #29
0
def youtube_check_videos_change(name, old_videos, new_videos, hashtags):
    """Checks if there is any new video

    It compares the old videos list of the artist with the new (already fetched) videos list.
    It tweets if there is a new release or if a video reaches a new views goal.

    Args:
      - name: name of the channel
      - old_videos: list that contains all the old videos
      - new_videos: list that contains all the updated videos
      - hashtags: hashtags to append to the Tweet

    Returns:
      new_videos
    """

    if old_videos is not None:
        for new_video in new_videos:
            found = False
            for old_video in old_videos:
                if new_video["url"] == old_video["url"]:
                    found = True
            if not found:
                twitter_post_image(
                    "{} uploaded a new #video on #YouTube: {}\n{}\n{}".format(
                        name, new_video["name"], url_video + new_video["url"],
                        hashtags),
                    download_image(new_video["image"]),
                    "NEW",
                    text_size=100,
                    crop=True)
    return new_videos
 def task(self):
     self.session = scoped_session(SessionFactory)
     images = self.session.query(LVV2Image).filter(
         LVV2Image.status == "new").all()
     download_count = 0
     notfound_count = 0
     exception_count = 0
     for _image in images:
         try:
             status_code = download_image(
                 _image.url,
                 os.path.join(Config.Get('default.base_output'), "lvv2",
                              _image.date.strftime("%Y-%m-%d"),
                              self.get_thread_by_id(
                                  _image.thread_id).title))
             if status_code == 200:
                 _image.status = 'download'
                 self.logger.info('下载图片: {}'.format(_image.url))
                 download_count += 1
             if status_code == 404:
                 _image.status = 'notfound'
                 notfound_count += 1
         except Exception as e:
             _image.status = 'exception'
             _image.exception_string = str(e)
             exception_count += 1
         self.session.commit()
         time.sleep(1)
     self.logger.info('下载图片: 成功 {} 张, Not Found {} 张, 错误 {} 张'.format(
         download_count, notfound_count, exception_count))
     self.session.close()
def create_animal_picture_dict(tag: Tag) -> Dict[str, str]:
    """
    extract from tag animal name and animal picture and update dictionary
    """
    animal_name = tag.get(HtmlTags.TITLE)
    animal_image_path = download_image(get_specific_animal_url(tag),
                                       animal_name)
    return {ANIMAL_NAME: animal_name, PICTURE_URL: animal_image_path}
Beispiel #32
0
def youtube_check_channel_change(old_channel, new_channel, hashtags):
    """Checks if there is any change in the number of subscribers or total views of the channel

    It compares the old channel data with the new (already fetched) data.

    Args:
      - old_channel: dictionary that contains all the old data of the channel
      - new_channel: dictionary that contains all the updated data of the channel
      - hashtags: hashtags to add to the Tweet

    Returns:
      a dictionary with updated data of the channel
    """

    # Tweet if subs reach a new 100 thousands
    if convert_num("100K", new_channel["subs"]) != convert_num(
            "100K", old_channel["subs"]):
        twitter_post_image("{} reached {} subscribers on #YouTube\n{}".format(
            new_channel["name"], display_num(new_channel["subs"],
                                             decimal=True), hashtags),
                           download_image(new_channel["image"]),
                           display_num(new_channel["subs"],
                                       short=True,
                                       decimal=True),
                           text_size=150)
    old_channel["subs"] = new_channel["subs"]

    # Tweet if total views increase and reach a new mark (based on the views_scale)
    if new_channel["views"] > old_channel["total_views"]:
        if convert_num(old_channel["views_scale"],
                       new_channel["views"]) != convert_num(
                           old_channel["views_scale"],
                           old_channel["total_views"]):
            twitter_post_image(
                "{} reached {} total views on #YouTube\n{}".format(
                    new_channel["name"], display_num(new_channel["views"]),
                    hashtags), download_image(new_channel["image"]),
                display_num(new_channel["views"], short=True))
        old_channel["total_views"] = new_channel["views"]

    old_channel["playlist"] = new_channel["playlist"]
    old_channel["name"] = new_channel["name"]
    old_channel["image"] = new_channel["image"]

    return old_channel
Beispiel #33
0
def get_image_online(**kwargs):
    if kwargs.get('used images'):
        txt_name = kwargs.get('used images')
        used_links = open(txt_name, 'r').read().splitlines()
    else:
        txt_name = os.path.join(os.getcwd(), "Used sankaku {0}.txt".format(
            kwargs['bot name']))
        try:
            used_links = open(txt_name, 'r').read().splitlines()
        except:
            if not os.path.exists(txt_name):
                print("Didn't find any used links! Creating a TXT!")
                print("Set it to:\n{0}".format(txt_name))
                used_links = []
            else:
                used_links = open(txt_name, 'r').read().splitlines()

    if kwargs.get('highest page'):
        high_page = int(kwargs.get('highest page'))
    else:
        high_page = 50

    tried_pages = [high_page]
    cookie_file = None
    try_count = 0
    low_page = 0
    page = 0
    x = None
    no_images = False
    url_start = "https://chan.sankakucomplex.com"
    url_search = "https://chan.sankakucomplex.com/?tags="
    if utils.is_bool(kwargs.get('login')):
        cookie_file = "../sankakucomplex.txt"
        url_login = "******"
        form_num = 0
        form_user = "******"
        form_password = "******"
        username = kwargs.get('username')
        password = kwargs.get('password')
        if not os.path.exists(cookie_file):
            browser, s = utils.scrape_site(url_login, cookie_file, True)
            form = browser.get_form(form_num)
            form[form_user].value = username
            form[form_password].value = password
            browser.submit_form(form)
            s.cookies.save()

    if utils.is_bool(kwargs.get('save images')):
        if kwargs.get('path'):
            path = kwargs.get('path')
        else:
            path = os.path.abspath(os.path.join(os.getcwd(),
                                                "images"))
            if not os.path.exists(path):
                os.makedirs(path)
    else:
        path = os.path.abspath(os.path.join(os.getcwd()))

    if kwargs.get('tags'):
        if isinstance(kwargs.get('tags'), list):
            tags = '+'.join(kwargs.get('tags'))
        else:
            tags = '+'.join(kwargs.get('tags').split(', '))
    else:
        tags = ""
    if kwargs.get('ignore tags'):
        if isinstance(kwargs.get('ignore tags'), list):
            ignore_tags = kwargs.get('ignore tags')
        else:
            ignore_tags = kwargs.get('ignore tags').split(', ')
    else:
        ignore_tags = []
    if utils.is_bool(kwargs.get('ignore cosplay')):
        ignore_cosplay = utils.is_bool(kwargs.get('ignore cosplay'))
    else:
        ignore_cosplay = False
    if utils.is_bool(kwargs.get('accept webm')):
        accept_webm = utils.is_bool(kwargs.get('accept webm'))
    else:
        accept_webm = False

    tried_pages = [high_page + 1]
    while True:
        while True:
            while True:
                while True:
                    no_images = False
                    try_count += 1
                    if try_count == 15:
                        return False, False
                    page = str(int(random.randint(low_page, high_page) * 1))
                    while int(page) in tried_pages:
                        if int(page) == 0:
                            break
                        if not x:
                            x = high_page
                        page = str(int(
                            random.randint(low_page, high_page) * 1))
                        if int(page) > int(x):
                            continue
                    tried_pages.append(int(page))
                    x = min(tried_pages)
                    page_url = "&page=" + str(page)
                    url = "%s%s%s" % (url_search, tags, page_url)
                    browser = utils.scrape_site(url, cookie_file)
                    if browser.find('div', text="No matching posts"):
                        no_images = True
                    time.sleep(1)
                    if not no_images:
                        break
                    elif no_images and int(page) == 0:
                        return False, False
                good_image_links = []
                image_links = browser.find_all('a')
                for link in image_links:
                    try:
                        link['href']
                    except:
                        continue
                    if "/post/show/" not in link['href']:
                        continue
                    good_image_links.append(link['href'])
                if good_image_links == []:
                    return False, False
                random.shuffle(good_image_links)
                url = "%s%s" % (url_start, random.choice(good_image_links))
                try_count = 0
                while url in used_links:
                    url = "%s/%s" % (
                        url_start, random.choice(good_image_links))
                    try_count = try_count + 1
                    if try_count == 20:
                        break
                used_links.append(url)
                # Make a copy for better use in message
                post_url = url
                browser.open(url)
                if not accept_webm:
                    if browser.find('video', attrs={'id': 'image'}):
                        continue

                image_tags = []
                char_tags = []
                art_tags = []
                sers_tags = []
                tags_tags = []
                site_tag = browser.find('ul', id="tag-sidebar")
                site_tag = site_tag.find_all('li')
                for taga in site_tag:
                    tag = tag_clean(taga)
                    if taga['class'][0] == "tag-type-artist":
                        art_tags.append(tag.title())
                    elif taga['class'][0] == "tag-type-copyright":
                        sers_tags.append(tag.title())
                    elif taga['class'][0] == "tag-type-character":
                        char_tags.append(tag.title())
                    else:
                        tags_tags.append(tag.title())
                    image_tags.append(tag.lower())

                if any([item in [x.lower() for x in ignore_tags]
                        for item in [x.lower() for x in image_tags]]):
                    continue
                if ignore_cosplay:
                    if any(" (cosplay)" in s for s in image_tags):
                        continue
                break

            image_url = browser.find('img', attrs={'id': 'image'})
            if not image_url:
                image_url = browser.find('video', attrs={'id': 'image'})
            try:
                url = urllib.parse.urljoin("https:", image_url['src'])
            except:
                # Flash File
                continue

            filename = ""
            if not utils.is_bool(kwargs.get('message')):
                message = ""
            sn_kwgs = {}
            sn_url, sn_kwgs = utils.saucenao(url, kwargs['saucenao api'], True)
            re_dict = {'{#artist}': (
                '#' if art_tags else '') + ' #'.join(
                [x.replace(" ", "_") for x in art_tags]),
                       '{#character}': (
                '#' if char_tags else '') + ' #'.join(
                [x.replace(" ", "_") for x in char_tags]),
                       '{#series}': (
                '#' if sers_tags else '') + ' #'.join(
                [x.replace(" ", "_") for x in sers_tags]),
                       '{#tags}': (
                '#' if tags_tags else '') + ' #'.join(
                [x.replace(" ", "_") for x in tags_tags]),
                       '{artist}': ', '.join(art_tags),
                       '{character}': ', '.join(char_tags),
                       '{series}': ', '.join(sers_tags),
                       '{tags}': ', '.join(tags_tags),
                       '{url}': post_url,
                       '{title}': sn_kwgs.get('title'),
                       '{sn title}': sn_kwgs.get('title'),
                       '{sn illust id}': sn_kwgs.get('illust id'),
                       '{sn illust url}': sn_url,
                       '{sn artist}': sn_kwgs.get('artist'),
                       '{sn artist id}': sn_kwgs.get('artist id'),
                       '{sn artist url}': sn_kwgs.get('artist url')}

            if kwargs.get('filename'):
                filename = utils.replace_all(kwargs.get('filename'), re_dict)
                filename = utils.safe_msg(filename)

            if kwargs.get('message'):
                message = utils.replace_all(kwargs.get('message'), re_dict)
                message = utils.safe_msg(message)

            with open(txt_name, 'w+') as f:
                f.write("\n".join(used_links))

            tweet_image = utils.download_image(url, path, filename, **kwargs)
            if tweet_image:
                break

        if not utils.is_bool(kwargs.get('save images')):
            from threading import Thread
            Thread(name="Delete Image", target=delete_image, args=(
                tweet_image, )).start()
        return message, tweet_image
Beispiel #34
0
import sys
import os
sys.path.append('plugins/')

account_list = OrderedDict()
api_objects = OrderedDict()
__version__ = '0.1.0'
CHECK_UPDATE = True
BASE_DIR = os.path.dirname(os.path.realpath(__file__))
if os.environ.get('DEBUG') == "travis":
    DEBUG = "travis"
    DEBUG_ACCS = os.environ.get('ACCOUNTS', '').split("||")
    # Download test image for folder testing
    from utils import download_image
    url = "https://www.google.co.uk/images/nav_logo231_hr.png"
    download_image(url)
else:
    # Local testing. Edit this for your own testing.
    DEBUG = False
    DEBUG_ACCS = ["test"]


def latest_ver():
    import urllib.request
    url = "http://ace3df.github.io/AcePictureBot/it_ver.txt"
    try:
        site_ver = urllib.request.urlopen(url).read().strip().decode("utf-8")
        if site_ver != __version__:
            print("!WARNING! A new version is out ({0})!".format(site_ver))
            print("Download it here: http://bombch.us/BWVH")
            print("----------------------------------------\n")
Beispiel #35
0
def get_reddit(**kwargs):
    if kwargs.get('used images'):
        txt_name = kwargs.get('used images')
        used_links = open(txt_name, 'r').read().splitlines()
    else:
        txt_name = os.path.join(os.getcwd(), "Used reddit {0}.txt".format(
                                             kwargs['bot name']))
        try:
            used_links = open(txt_name, 'r').read().splitlines()
        except:
            if not os.path.exists(txt_name):
                print("Didn't find any used links! Creating a TXT!")
                print("Set it to:\n{0}".format(txt_name))
                used_links = []
            else:
                used_links = open(txt_name, 'r').read().splitlines()
    try:
        sub = used_links[0]
        used_links = used_links[1:]
    except:
        # Probably doesn't exist (i hope only that)
        pass
    if kwargs.get('save images'):
        if kwargs.get('path'):
            path = kwargs.get('path')
        else:
            path = os.path.abspath(os.path.join(os.getcwd(),
                                                "images"))
            if not os.path.exists(path):
                os.makedirs(path)
    else:
        path = os.path.abspath(os.path.join(os.getcwd()))

    start_url = "https://www.reddit.com/r/"
    subreddits = kwargs.get('subreddits')
    is_random = kwargs.get('random subreddit')
    is_random_link = kwargs.get('random link')
    if subreddits is None:
        return False, False
    if isinstance(subreddits, str):
        subreddits = subreddits.split(", ")
    if utils.is_bool(is_random):
        import random
        sub = random.choice(subreddits)
    else:
        # Get last used sub and + 1
        try:
            sub = open(os.path.join(os.getcwd(), "Used reddit {0}.txt".format(
                                                         kwargs['bot name'])),
                       'r').read().splitlines
            sub = subreddits[(subreddits.index(sub) + 1)]
        except:
            # Doesn't exsist / end of list
            sub = subreddits[0]
    url = start_url + sub + "/.rss"
    soup = utils.scrape_site(url, is_rss=True)
    pic_imgs = []
    for a in soup.find_all('item'):
        img_string = a.find('description').string
        img_title = a.find('title').string
        img_link = a.find('link').string
        img_string = img_string[:img_string.index("[link]")]
        img_string = BeautifulSoup(img_string, 'html5lib').find_all('a')
        for item in img_string:
            if "reddit.com" not in item['href'] and "http" in item['href']:
                pic_imgs.append([item['href'], img_title, img_link])

    if utils.is_bool(is_random_link):
        import random
        image = random.choice(pic_imgs)
    else:
        image = pic_imgs[0]
    safe_break = 0
    count = 0
    while image[0] in used_links:
        if utils.is_bool(is_random_link):
            image = random.choice(pic_imgs)
        else:
            image = pic_imgs[count]
            if image[0] in used_links:
                count += 1
                continue
            break
        safe_break += 1
        if safe_break == 50:
            break
    used_links.append(image[0])
    imgTypes = {"jpg": "image/jpeg",
                "jpeg": "image/jpeg",
                "png": "image/png",
                "gif": "image/gif",
                "webm": "video/webm"}
    filepath = urlparse(image[0]).path
    ext = os.path.splitext(filepath)[1].lower()
    if not ext[ext.rfind(".") + 1:] in imgTypes:
        if "imgur" in image[0]:
            # Just make it .png it still returns correct image
            image[0] = "http://i.imgur.com/" + image[0].rsplit(
                       '/', 1)[1] + ".png"
            ext = ".png"

    sn_kwgs = {}
    if "(x-post" in image[1].lower() or "(via" in image[1].lower():
        image[1] = re.sub(r'\([^)]*\)', '', image[1])
    if "sn" in kwargs.get('message'):
        sn_url, sn_kwgs = utils.saucenao(fname=image[0],
                                         api_key=kwargs.get('saucenao api'),
                                         metainfo=True)
    re_dict = {'{url}': image[2],
               '{title}': image[1],
               '{sn title}': sn_kwgs.get('title'),
               '{sn illust id}': sn_kwgs.get('illust id'),
               '{sn illust url}': sn_url,
               '{sn artist}': sn_kwgs.get('artist'),
               '{sn artist id}': sn_kwgs.get('artist id'),
               '{sn artist url}': sn_kwgs.get('artist url')}

    if kwargs.get('filename'):
        filename = utils.replace_all(kwargs.get('filename'), re_dict)
        filename = utils.safe_msg(filename)
    else:
        filename = ""
    if kwargs.get('message'):
        message = utils.replace_all(kwargs.get('message'), re_dict)
        message = utils.safe_msg(message)
    else:
        message = ""
    image = utils.download_image(image[0], path, filename, **kwargs)
    used_links = [sub] + used_links
    with open(txt_name, 'w') as f:
        f.write("\n".join(used_links))
    return message, image