Пример #1
0
 def _read_vimeo(self, channel, msg):
     res = vimeo_regex.search(msg)
     if not res:
         return
     try:
         video_id = str(res.groups()[0])
         video_info = self._get_vimeo_info(video_id)
         self.bot.say(random_response(VIDEO_RESPONSES) % video_info, channel)
     except Exception as e:
         self.bot.log_error('ERROR could not get title of vimeo link from: "'
                            + msg + '" the exception was: ' + str(e))
         self.bot.say('For some reason I couldn\'t read the title of that '
                      + 'vimeo link.', channel)
Пример #2
0
 def _read_vimeo(self, channel, msg):
     vimeo_res = regex.VIMEO.search(msg)
     if not vimeo_res:
         return
     try:
         video_info = self._get_vimeo_info(vimeo_res.group('id'))
         self.bot.say(random_response(VIDEO_RESPONSES) % video_info,
                      channel)
     except Exception:
         self.bot.log_error('Could not get title of vimeo link from: "'
                            + msg + '"')
         self.bot.say('For some reason I couldn\'t read the title of that '
                      + 'vimeo link.', channel)
Пример #3
0
 def _read_youtube(self, channel, msg):
     yt_res = regex.YOUTUBE.search(msg)
     if not yt_res:
         return
     try:
         video_info = self._get_youtube_info(yt_res.group('id'))
         self.bot.say(random_response(VIDEO_RESPONSES) % video_info,
                      channel)
     except Exception:
         self.bot.log_error('Could not get title of youtube link from: "'
                            + msg + '"')
         self.bot.say('For some reason I couldn\'t read the title of that '
                      + 'youtube link.', channel)
Пример #4
0
 def _read_websites(self, channel, msg):
     links = regex.WEB_URL.findall(msg)
     for link in links:
         if [r for r in __all_non_web__ if r.search(link)]:
             continue
         try:
             t = lxml.html.parse(urlopen(link))  # noqa: E501 # nosec: regex.WEB_URL only allows http(s)
             t = t.find(".//title").text
             t = t.strip().replace('\n', ' ')
             if len(re.sub("[^a-zA-Z0-9]", "", t)) >= 5:
                 self.bot.say(random_response(WEB_RESPONSES) % {'title': t},
                              channel)
         except Exception:
             self.bot.log_error('Could not get title of webpage: "'
                                + msg + '"')
Пример #5
0
 def _read_vimeo(self, channel, msg):
     res = vimeo_regex.search(msg)
     if not res:
         return
     try:
         video_id = str(res.groups()[0])
         video_info = self._get_vimeo_info(video_id)
         self.bot.say(
             random_response(VIDEO_RESPONSES) % video_info, channel)
     except Exception as e:
         self.bot.log_error(
             'ERROR could not get title of vimeo link from: "' + msg +
             '" the exception was: ' + str(e))
         self.bot.say(
             'For some reason I couldn\'t read the title of that ' +
             'vimeo link.', channel)
Пример #6
0
 def _read_websites(self, channel, msg):
     links = web_regex.findall(msg)
     for link in links:
         link = link[0]
         if len([r for r in __all_non_web__ if r.search(link)]) > 0:
             continue
         try:
             t = lxml.html.parse(str(link))
             t = t.find(".//title").text
             t = t.strip().replace('\n', ' ')
             if len(re.sub("[^a-zA-Z0-9]", "", t)) >= 5:
                 self.bot.say(random_response(WEB_RESPONSES) % {'title': t},
                              channel)
         except Exception as e:
             self.bot.log_error('ERROR could not get title of a webpage: "'
                                + msg + '" the exception was: ' + str(e))
Пример #7
0
 def _read_websites(self, channel, msg):
     links = web_regex.findall(msg)
     for link in links:
         link = link[0]
         if len([r for r in __all_non_web__ if r.search(link)]) > 0:
             continue
         try:
             t = lxml.html.parse(str(link))
             t = t.find(".//title").text
             t = t.strip().replace('\n', ' ')
             if len(re.sub("[^a-zA-Z0-9]", "", t)) >= 5:
                 self.bot.say(
                     random_response(WEB_RESPONSES) % {'title': t}, channel)
         except Exception as e:
             self.bot.log_error(
                 'ERROR could not get title of a webpage: "' + msg +
                 '" the exception was: ' + str(e))
Пример #8
0
    def handle_message(self, channel, nick, msg, line=None):
        urls = re.findall(r'(https?://[^\s]+)', msg)

        if not urls:
            return

        image_urls = self._get_image_urls(urls)

        if not image_urls:
            return

        nsfw_image_urls = self._process_images(urls=image_urls)

        for url in nsfw_image_urls:
            from response import NSFW_LINKS, random_response
            msg = random_response(NSFW_LINKS) % {'url': url, 'nick': nick}
            self.bot.say(msg, channel)
Пример #9
0
    def handle_message(self, channel, nick, msg, line=None):
        urls = re.findall(r'(https?://[^\s]+)', msg)

        if not urls:
            return

        image_urls = self._get_image_urls(urls)

        if not image_urls:
            return

        nsfw_image_urls = self._process_images(urls=image_urls)

        for url in nsfw_image_urls:
            from response import NSFW_LINKS, random_response
            msg = random_response(NSFW_LINKS) % {'url': url, 'nick': nick}
            self.bot.say(msg, channel)
Пример #10
0
 def _pick_response(self, is_self, is_multiple):
     f = [[REPOSTS, MULTIPLE_REPOST], [SELF_REPOSTS, MULTIPLE_SELF_REPOST]]
     return random_response(f[is_self][is_multiple])
Пример #11
0
 def _pick_response(self, is_self, is_multiple):
     f = [
         [REPOSTS, MULTIPLE_REPOST],
         [SELF_REPOSTS, MULTIPLE_SELF_REPOST]
     ]
     return random_response(f[is_self][is_multiple])