Example #1
0
def check_website():
    """Check to make sure websites are online.
    If they're not change the settings.ini to say False.
    Use a url that will make the website query and not a cached page."""
    import time
    config = configparser.RawConfigParser(allow_no_value=True)
    config.read(settings['settings'])
    websites = (dict(config.items('Websites')))
    while True:
        for website in websites:
            browser = False
            if website == "sankakucomplex":
                ping_url = r"https://chan.sankakucomplex.com/?tags=1girl+rating%3Asafe+solo&commit=Search"
            elif website == "safebooru":
                ping_url = r"http://safebooru.org/index.php?page=post&s=list&tags=c.c.+1girl"
            elif website == "danbooru":
                continue
            elif website == "yande":
                continue
            elif website == "konachan":
                continue
            browser = utils.scrape_site(ping_url)
            if not browser or browser is False:
                # Website is offline/timed out.
                print("$ Website {0} has been set to offline!".format(website))
                config_save('Websites', website, 'False', file=0)
            else:
                # Site is online. Turn it back on.
                if websites[website] == "False":
                    print("$ Website {0} has been set to online!".format(website))
                    config_save('Websites', website, 'True', file=0)
        time.sleep(5 * 60)
Example #2
0
def check_website():
    """Check to make sure websites are online.
    If they're not change the settings.ini to say False.
    Use a url that will make the website query and not a cached page."""
    import time
    while True:
        config = configparser.RawConfigParser(allow_no_value=True)
        config.read(settings['settings'])
        websites = (dict(config.items('Websites')))
        for website in websites:
            browser = False
            if website == "sankakucomplex":
                ping_url = (r"https://chan.sankakucomplex.com/"
                            "?tags=1girl+rating%3Asafe+solo&commit=Search")
            elif website == "safebooru":
                ping_url = (r"http://safebooru.org/index.php?"
                            "page=post&s=list&tags=c.c.+1girl")
            elif website == "danbooru":
                continue
            elif website == "yande":
                continue
            elif website == "konachan":
                continue
            browser = utils.scrape_site(ping_url)
            if not browser or browser is False:
                # Website is offline/timed out.
                print("$ Website {0} has been set to offline!".format(website))
                config_save('Websites', website, 'False', file=0)
            else:
                # Site is online. Turn it back on.
                if websites[website] == "False":
                    print("$ Website {0} has been set to online!".format(
                        website))
                config_save('Websites', website, 'True', file=0)
        time.sleep(5 * 60)
 def get_soup(self, site):
     if site == 0:
         cookie_file = "sankakucomplex.txt"
         url_search = "https://chan.sankakucomplex.com/?tags="
     elif site == 2:
         cookie_file = "safebooru.txt"
         a = "http://safebooru.org/"
         url_search = a + "index.php?page=post&s=list&tags="
     tags = self.name + self.end_tags
     if site == 0 or site >= 2:
         tags += "+rating:safe"
     url = url_search + tags
     return scrape_site(url, cookie_file)
Example #4
0
 def get_soup(self, site):
     if site == 0:
         cookie_file = settings['secret_key'] + "-sankakucomplex.txt"
         url_search = "https://chan.sankakucomplex.com/?tags="
     elif site == 2:
         cookie_file = settings['secret_key'] + "-safebooru.txt"
         a = "http://safebooru.org/"
         url_search = a + "index.php?page=post&s=list&tags="
     tags = self.name + self.end_tags
     if site == 0 or site > 2:
         tags += "+rating:safe"
     url = url_search + tags
     return scrape_site(url, cookie_file)
Example #5
0
    def info(image):
        url = "http://iqdb.org/?url=%s" % (str(image))
        soup = utils.scrape_site(url)
        if not soup:
            return "OFFLINE", "OFFLINE", "OFFLINE"
        if soup.find('th', text="No relevant matches"):
            return False, False, False
        site = None
        links = soup.find_all('a')
        for link in links:
            try:
                link['href']
            except:
                continue
            if link.string == "(hide)":
                # Haven't broke yet, onto low results
                return False, False, False
            if "chan.sankakucomplex.com/post/show/" in link['href']:
                if "http" not in link['href']:
                    url = "http:" + link['href']
                else:
                    url = link['href']
                site = 0
                break
            elif "http://danbooru.donmai.us/posts/" in link['href']:
                url = link['href']
                site = 1
                break
        if site is None:
            # No link found!
            return False, False, False
        soup = utils.scrape_site(url)
        if not soup:
            return "OFFLINE", "OFFLINE", "OFFLINE"
        try:
            if site == 0:
                # Sankaku
                artist = soup.find(
                    'li', class_="tag-type-artist").find_next('a').text.title()

            elif site == 1:
                # Danbooru
                artist = soup.find(
                    'h2', text="Artist").find_next(
                    'a', class_="search-tag").text.title()
        except:
            artist = ""
        try:
            if site == 0:
                # Sankaku
                series = soup.find(
                    'li', class_="tag-type-copyright").find_next(
                    'a').text.title()
            elif site == 1:
                # Danbooru
                series = soup.find(
                    'h2', text="Copyrights").find_next(
                    'a', class_="search-tag").text.title()
        except:
            series = ""
        try:
            if site == 0:
                # Sankaku
                names = soup.find_all('li', class_="tag-type-character")
            elif site == 1:
                # Danbooru
                names = soup.find_all('li', class_="category-4")
            name = []
            for a in names:
                if site == 0:
                    a = a.find_next('a').text.title()
                elif site == 1:
                    a = a.find_next('a').find_next('a').text.title()
                name.append(re.sub(r' \([^)]*\)', '', a))
            name = list(set(name))
            if len(name) >= 2:
                names = utils.make_paste(
                    text='\n'.join(name),
                    title="Source Names")
            else:
                names = ''.join(name)
        except:
            names = ""

        return artist, series, names
Example #6
0
def airing(args):
    if len(args) <= 3:
        return False
    args = args.replace("Durarara!!x2", "Durarara!!×2")
    air_list_titles = []
    air_list_msg = []
    url = "https://www.livechart.me/summer-2015/all"
    soup = utils.scrape_site(url)
    if not soup:
        return False
    show_list = soup.find_all('h3', class_="main-title")
    today = datetime.datetime.today()
    today = today + datetime.timedelta(hours=8)
    for div in show_list:
        anime_title = div.text
        if len(anime_title) > 60:
            anime_title = anime_title[:50] + "[...]"
        air_list_titles.append(anime_title)
        ep_num_time = div.findNext('div').text
        if "ished" in ep_num_time or \
           "ished" in ep_num_time and "eatrical" in ep_num_time:
            ep_num = "Finished"
        elif "eatrical" in ep_num_time:
            ep_num = "Movie"
        else:
            try:
                if "EP" in ep_num_time:
                    ep_num = "Episode " + ep_num_time.split(":", 1)[0][2:]
                else:
                    if "Movie" in anime_title:
                        ep_num = "Movie"
                    else:
                        ep_num = "Movie/OVA/Special"
            except:
                ep_num = "New Series"

        if "Episode" in ep_num or "Movie" in ep_num:
            try:
                ep_num_time = ep_num_time.split(": ", 1)[1]
            except:
                pass
            try:
                ep_num_time = ep_num_time.split(
                    " JST", 1)[0].replace(" at", "")
                anime_time = datetime.datetime.strptime(re.sub(
                    " +", " ", ep_num_time), '%b %d %I:%M%p')
                anime_time = anime_time.replace(
                    year=datetime.datetime.today().year)
                result = anime_time - today
                msg = ("{0}\n"
                       "{1} airing in\n"
                       "{2} Days, {3} Hours and {4} Minutes").format(
                    anime_title, ep_num, result.days,
                    result.seconds//3600, (result.seconds//60) % 60)
            except:
                msg = "{0}\nNew Series\nUnknown air date!".format(anime_title)
        else:
            msg = "{0} has finished airing!".format(anime_title)
        air_list_msg.append(msg)

    try:
        found = [s for s in air_list_titles if re.sub(
            '[^A-Za-z0-9]+', '', args.lower()) in re.sub(
            '[^A-Za-z0-9]+', '', s.lower())]
        found = ''.join(found[0])
        index = air_list_titles.index(''.join(found))
        air = air_list_msg[index]
        air = ''.join(air)
    except:
        count_trigger("airing")
        return False
    return air
Example #7
0
def get_image_online(**kwargs):
    if kwargs.get('used images'):
        txt_name = kwargs.get('used images')
        used_links = open(txt_name, 'r').read().splitlines()
    else:
        txt_name = os.path.join(os.getcwd(), "Used sankaku {0}.txt".format(
            kwargs['bot name']))
        try:
            used_links = open(txt_name, 'r').read().splitlines()
        except:
            if not os.path.exists(txt_name):
                print("Didn't find any used links! Creating a TXT!")
                print("Set it to:\n{0}".format(txt_name))
                used_links = []
            else:
                used_links = open(txt_name, 'r').read().splitlines()

    if kwargs.get('highest page'):
        high_page = int(kwargs.get('highest page'))
    else:
        high_page = 50

    tried_pages = [high_page]
    cookie_file = None
    try_count = 0
    low_page = 0
    page = 0
    x = None
    no_images = False
    url_start = "https://chan.sankakucomplex.com"
    url_search = "https://chan.sankakucomplex.com/?tags="
    if utils.is_bool(kwargs.get('login')):
        cookie_file = "../sankakucomplex.txt"
        url_login = "******"
        form_num = 0
        form_user = "******"
        form_password = "******"
        username = kwargs.get('username')
        password = kwargs.get('password')
        if not os.path.exists(cookie_file):
            browser, s = utils.scrape_site(url_login, cookie_file, True)
            form = browser.get_form(form_num)
            form[form_user].value = username
            form[form_password].value = password
            browser.submit_form(form)
            s.cookies.save()

    if utils.is_bool(kwargs.get('save images')):
        if kwargs.get('path'):
            path = kwargs.get('path')
        else:
            path = os.path.abspath(os.path.join(os.getcwd(),
                                                "images"))
            if not os.path.exists(path):
                os.makedirs(path)
    else:
        path = os.path.abspath(os.path.join(os.getcwd()))

    if kwargs.get('tags'):
        if isinstance(kwargs.get('tags'), list):
            tags = '+'.join(kwargs.get('tags'))
        else:
            tags = '+'.join(kwargs.get('tags').split(', '))
    else:
        tags = ""
    if kwargs.get('ignore tags'):
        if isinstance(kwargs.get('ignore tags'), list):
            ignore_tags = kwargs.get('ignore tags')
        else:
            ignore_tags = kwargs.get('ignore tags').split(', ')
    else:
        ignore_tags = []
    if utils.is_bool(kwargs.get('ignore cosplay')):
        ignore_cosplay = utils.is_bool(kwargs.get('ignore cosplay'))
    else:
        ignore_cosplay = False
    if utils.is_bool(kwargs.get('accept webm')):
        accept_webm = utils.is_bool(kwargs.get('accept webm'))
    else:
        accept_webm = False

    tried_pages = [high_page + 1]
    while True:
        while True:
            while True:
                while True:
                    no_images = False
                    try_count += 1
                    if try_count == 15:
                        return False, False
                    page = str(int(random.randint(low_page, high_page) * 1))
                    while int(page) in tried_pages:
                        if int(page) == 0:
                            break
                        if not x:
                            x = high_page
                        page = str(int(
                            random.randint(low_page, high_page) * 1))
                        if int(page) > int(x):
                            continue
                    tried_pages.append(int(page))
                    x = min(tried_pages)
                    page_url = "&page=" + str(page)
                    url = "%s%s%s" % (url_search, tags, page_url)
                    browser = utils.scrape_site(url, cookie_file)
                    if browser.find('div', text="No matching posts"):
                        no_images = True
                    time.sleep(1)
                    if not no_images:
                        break
                    elif no_images and int(page) == 0:
                        return False, False
                good_image_links = []
                image_links = browser.find_all('a')
                for link in image_links:
                    try:
                        link['href']
                    except:
                        continue
                    if "/post/show/" not in link['href']:
                        continue
                    good_image_links.append(link['href'])
                if good_image_links == []:
                    return False, False
                random.shuffle(good_image_links)
                url = "%s%s" % (url_start, random.choice(good_image_links))
                try_count = 0
                while url in used_links:
                    url = "%s/%s" % (
                        url_start, random.choice(good_image_links))
                    try_count = try_count + 1
                    if try_count == 20:
                        break
                used_links.append(url)
                # Make a copy for better use in message
                post_url = url
                browser.open(url)
                if not accept_webm:
                    if browser.find('video', attrs={'id': 'image'}):
                        continue

                image_tags = []
                char_tags = []
                art_tags = []
                sers_tags = []
                tags_tags = []
                site_tag = browser.find('ul', id="tag-sidebar")
                site_tag = site_tag.find_all('li')
                for taga in site_tag:
                    tag = tag_clean(taga)
                    if taga['class'][0] == "tag-type-artist":
                        art_tags.append(tag.title())
                    elif taga['class'][0] == "tag-type-copyright":
                        sers_tags.append(tag.title())
                    elif taga['class'][0] == "tag-type-character":
                        char_tags.append(tag.title())
                    else:
                        tags_tags.append(tag.title())
                    image_tags.append(tag.lower())

                if any([item in [x.lower() for x in ignore_tags]
                        for item in [x.lower() for x in image_tags]]):
                    continue
                if ignore_cosplay:
                    if any(" (cosplay)" in s for s in image_tags):
                        continue
                break

            image_url = browser.find('img', attrs={'id': 'image'})
            if not image_url:
                image_url = browser.find('video', attrs={'id': 'image'})
            try:
                url = urllib.parse.urljoin("https:", image_url['src'])
            except:
                # Flash File
                continue

            filename = ""
            if not utils.is_bool(kwargs.get('message')):
                message = ""
            sn_kwgs = {}
            sn_url, sn_kwgs = utils.saucenao(url, kwargs['saucenao api'], True)
            re_dict = {'{#artist}': (
                '#' if art_tags else '') + ' #'.join(
                [x.replace(" ", "_") for x in art_tags]),
                       '{#character}': (
                '#' if char_tags else '') + ' #'.join(
                [x.replace(" ", "_") for x in char_tags]),
                       '{#series}': (
                '#' if sers_tags else '') + ' #'.join(
                [x.replace(" ", "_") for x in sers_tags]),
                       '{#tags}': (
                '#' if tags_tags else '') + ' #'.join(
                [x.replace(" ", "_") for x in tags_tags]),
                       '{artist}': ', '.join(art_tags),
                       '{character}': ', '.join(char_tags),
                       '{series}': ', '.join(sers_tags),
                       '{tags}': ', '.join(tags_tags),
                       '{url}': post_url,
                       '{title}': sn_kwgs.get('title'),
                       '{sn title}': sn_kwgs.get('title'),
                       '{sn illust id}': sn_kwgs.get('illust id'),
                       '{sn illust url}': sn_url,
                       '{sn artist}': sn_kwgs.get('artist'),
                       '{sn artist id}': sn_kwgs.get('artist id'),
                       '{sn artist url}': sn_kwgs.get('artist url')}

            if kwargs.get('filename'):
                filename = utils.replace_all(kwargs.get('filename'), re_dict)
                filename = utils.safe_msg(filename)

            if kwargs.get('message'):
                message = utils.replace_all(kwargs.get('message'), re_dict)
                message = utils.safe_msg(message)

            with open(txt_name, 'w+') as f:
                f.write("\n".join(used_links))

            tweet_image = utils.download_image(url, path, filename, **kwargs)
            if tweet_image:
                break

        if not utils.is_bool(kwargs.get('save images')):
            from threading import Thread
            Thread(name="Delete Image", target=delete_image, args=(
                tweet_image, )).start()
        return message, tweet_image
Example #8
0
def airing(args):
    if len(args) <= 3:
        return False
    args = args.replace("Durarara!!x2", "Durarara!!×2")
    air_list_titles = []
    air_list_msg = []
    url = "https://www.livechart.me/summer-2015/all"
    soup = utils.scrape_site(url)
    if not soup:
        return False
    show_list = soup.find_all('h3', class_="main-title")
    today = datetime.datetime.today()
    today = today + datetime.timedelta(hours=8)
    for div in show_list:
        anime_title = div.text
        if len(anime_title) > 60:
            anime_title = anime_title[:50] + "[...]"
        air_list_titles.append(anime_title)
        ep_num_time = div.findNext('div').text
        if "ished" in ep_num_time or \
           "ished" in ep_num_time and "eatrical" in ep_num_time:
            ep_num = "Finished"
        elif "eatrical" in ep_num_time:
            ep_num = "Movie"
        else:
            try:
                if "EP" in ep_num_time:
                    ep_num = "Episode " + ep_num_time.split(":", 1)[0][2:]
                else:
                    if "Movie" in anime_title:
                        ep_num = "Movie"
                    else:
                        ep_num = "Movie/OVA/Special"
            except:
                ep_num = "New Series"

        if "Episode" in ep_num or "Movie" in ep_num:
            try:
                ep_num_time = ep_num_time.split(": ", 1)[1]
            except:
                pass
            try:
                ep_num_time = ep_num_time.split(
                    " JST", 1)[0].replace(" at", "")
                anime_time = datetime.datetime.strptime(re.sub(
                    " +", " ", ep_num_time), '%b %d %I:%M%p')
                anime_time = anime_time.replace(
                    year=datetime.datetime.today().year)
                result = anime_time - today
                msg = ("{0}\n"
                       "{1} airing in\n"
                       "{2} Days, {3} Hours and {4} Minutes").format(
                    anime_title, ep_num, result.days,
                    result.seconds//3600, (result.seconds//60) % 60)
            except:
                msg = "{0}\nNew Series\nUnknown air date!".format(anime_title)
        else:
            msg = "{0} has finished airing!".format(anime_title)
        air_list_msg.append(msg)

    try:
        found = [s for s in air_list_titles if re.sub(
            '[^A-Za-z0-9]+', '', args.lower()) in re.sub(
            '[^A-Za-z0-9]+', '', s.lower())]
        found = ''.join(found[0])
        index = air_list_titles.index(''.join(found))
        air = air_list_msg[index]
        air = ''.join(air)
    except:
        count_trigger("airing")
        return False
    return air
Example #9
0
    def info(image):
        url = "http://iqdb.org/?url=%s" % (str(image))
        soup = utils.scrape_site(url)
        if not soup:
            return "OFFLINE", "OFFLINE", "OFFLINE"
        if soup.find('th', text="No relevant matches"):
            return False, False, False
        site = None
        links = soup.find_all('a')
        for link in links:
            try:
                link['href']
            except:
                continue
            if link.string == "(hide)":
                # Haven't broke yet, onto low results
                return False, False, False
            if "chan.sankakucomplex.com/post/show/" in link['href']:
                if "http" not in link['href']:
                    url = "http:" + link['href']
                else:
                    url = link['href']
                site = 0
                break
            elif "http://danbooru.donmai.us/posts/" in link['href']:
                url = link['href']
                site = 1
                break
        if site is None:
            # No link found!
            return False, False, False
        soup = utils.scrape_site(url)
        if not soup:
            return "OFFLINE", "OFFLINE", "OFFLINE"
        try:
            if site == 0:
                # Sankaku
                artist = soup.find(
                    'li', class_="tag-type-artist").find_next('a').text.title()

            elif site == 1:
                # Danbooru
                artist = soup.find(
                    'h2', text="Artist").find_next(
                    'a', class_="search-tag").text.title()
        except:
            artist = ""
        try:
            if site == 0:
                # Sankaku
                series = soup.find(
                    'li', class_="tag-type-copyright").find_next(
                    'a').text.title()
            elif site == 1:
                # Danbooru
                series = soup.find(
                    'h2', text="Copyrights").find_next(
                    'a', class_="search-tag").text.title()
        except:
            series = ""
        try:
            if site == 0:
                # Sankaku
                names = soup.find_all('li', class_="tag-type-character")
            elif site == 1:
                # Danbooru
                names = soup.find_all('li', class_="category-4")
            name = []
            for a in names:
                if site == 0:
                    a = a.find_next('a').text.title()
                elif site == 1:
                    a = a.find_next('a').find_next('a').text.title()
                name.append(re.sub(r' \([^)]*\)', '', a))
            name = list(set(name))
            if len(name) >= 2:
                names = utils.make_paste(
                    text='\n'.join(name),
                    title="Source Names")
            else:
                names = ''.join(name)
        except:
            names = ""

        return artist, series, names
Example #10
0
def get_reddit(**kwargs):
    if kwargs.get('used images'):
        txt_name = kwargs.get('used images')
        used_links = open(txt_name, 'r').read().splitlines()
    else:
        txt_name = os.path.join(os.getcwd(), "Used reddit {0}.txt".format(
                                             kwargs['bot name']))
        try:
            used_links = open(txt_name, 'r').read().splitlines()
        except:
            if not os.path.exists(txt_name):
                print("Didn't find any used links! Creating a TXT!")
                print("Set it to:\n{0}".format(txt_name))
                used_links = []
            else:
                used_links = open(txt_name, 'r').read().splitlines()
    try:
        sub = used_links[0]
        used_links = used_links[1:]
    except:
        # Probably doesn't exist (i hope only that)
        pass
    if kwargs.get('save images'):
        if kwargs.get('path'):
            path = kwargs.get('path')
        else:
            path = os.path.abspath(os.path.join(os.getcwd(),
                                                "images"))
            if not os.path.exists(path):
                os.makedirs(path)
    else:
        path = os.path.abspath(os.path.join(os.getcwd()))

    start_url = "https://www.reddit.com/r/"
    subreddits = kwargs.get('subreddits')
    is_random = kwargs.get('random subreddit')
    is_random_link = kwargs.get('random link')
    if subreddits is None:
        return False, False
    if isinstance(subreddits, str):
        subreddits = subreddits.split(", ")
    if utils.is_bool(is_random):
        import random
        sub = random.choice(subreddits)
    else:
        # Get last used sub and + 1
        try:
            sub = open(os.path.join(os.getcwd(), "Used reddit {0}.txt".format(
                                                         kwargs['bot name'])),
                       'r').read().splitlines
            sub = subreddits[(subreddits.index(sub) + 1)]
        except:
            # Doesn't exsist / end of list
            sub = subreddits[0]
    url = start_url + sub + "/.rss"
    soup = utils.scrape_site(url, is_rss=True)
    pic_imgs = []
    for a in soup.find_all('item'):
        img_string = a.find('description').string
        img_title = a.find('title').string
        img_link = a.find('link').string
        img_string = img_string[:img_string.index("[link]")]
        img_string = BeautifulSoup(img_string, 'html5lib').find_all('a')
        for item in img_string:
            if "reddit.com" not in item['href'] and "http" in item['href']:
                pic_imgs.append([item['href'], img_title, img_link])

    if utils.is_bool(is_random_link):
        import random
        image = random.choice(pic_imgs)
    else:
        image = pic_imgs[0]
    safe_break = 0
    count = 0
    while image[0] in used_links:
        if utils.is_bool(is_random_link):
            image = random.choice(pic_imgs)
        else:
            image = pic_imgs[count]
            if image[0] in used_links:
                count += 1
                continue
            break
        safe_break += 1
        if safe_break == 50:
            break
    used_links.append(image[0])
    imgTypes = {"jpg": "image/jpeg",
                "jpeg": "image/jpeg",
                "png": "image/png",
                "gif": "image/gif",
                "webm": "video/webm"}
    filepath = urlparse(image[0]).path
    ext = os.path.splitext(filepath)[1].lower()
    if not ext[ext.rfind(".") + 1:] in imgTypes:
        if "imgur" in image[0]:
            # Just make it .png it still returns correct image
            image[0] = "http://i.imgur.com/" + image[0].rsplit(
                       '/', 1)[1] + ".png"
            ext = ".png"

    sn_kwgs = {}
    if "(x-post" in image[1].lower() or "(via" in image[1].lower():
        image[1] = re.sub(r'\([^)]*\)', '', image[1])
    if "sn" in kwargs.get('message'):
        sn_url, sn_kwgs = utils.saucenao(fname=image[0],
                                         api_key=kwargs.get('saucenao api'),
                                         metainfo=True)
    re_dict = {'{url}': image[2],
               '{title}': image[1],
               '{sn title}': sn_kwgs.get('title'),
               '{sn illust id}': sn_kwgs.get('illust id'),
               '{sn illust url}': sn_url,
               '{sn artist}': sn_kwgs.get('artist'),
               '{sn artist id}': sn_kwgs.get('artist id'),
               '{sn artist url}': sn_kwgs.get('artist url')}

    if kwargs.get('filename'):
        filename = utils.replace_all(kwargs.get('filename'), re_dict)
        filename = utils.safe_msg(filename)
    else:
        filename = ""
    if kwargs.get('message'):
        message = utils.replace_all(kwargs.get('message'), re_dict)
        message = utils.safe_msg(message)
    else:
        message = ""
    image = utils.download_image(image[0], path, filename, **kwargs)
    used_links = [sub] + used_links
    with open(txt_name, 'w') as f:
        f.write("\n".join(used_links))
    return message, image