Esempio n. 1
0
 async def retrosign(self, *, content : str):
     """Make a retrosign with 3 words seperated by ';' or one word in the middle"""
     texts = [t.strip() for t in content.split(';')]
     if len(texts) < 3 and not len(texts) > 1:
         lenstr = len(texts[0])
         if lenstr <= 12:
                 data = dict(
                   bcg=choice([1, 2, 3, 4, 5]),
                   txt=choice([1, 2, 3, 4]),
                   text1="",
                   text2=texts[0],
                   text3=""
                 )
                 await self.bot.type() 
                 with aiohttp.ClientSession() as session:
                   async with session.post("http://photofunia.com/effects/retro-wave", data=data) as response:
                     if response.status == 200:
                       soup = b_s(await response.text(), "lxml")
                       download_url = soup.find("div", class_="downloads-container").ul.li.a["href"]
                       async with session.get(download_url) as image_response:
                         if image_response.status == 200:
                           image_data = await image_response.read()
                           with BytesIO(image_data) as temp_image:
                             await self.bot.upload(temp_image, filename="retro.jpg")
         else:
             await self.bot.say("\N{CROSS MARK} too many characters for one line")
             return
     elif len(texts) != 3:
         await self.bot.say("\N{CROSS MARK} please provide three words seperated by ';' or one word")
         return
     elif len(texts[0]) >= 12:
         await self.bot.say("\N{CROSS MARK} Your First Word(s) is/are too long")
         return
     elif len(texts[1]) >= 12:
         await self.bot.say("\N{CROSS MARK} Your Second Word(s) is/are too long")
         return
     elif len(texts[2]) >= 12:
         await self.bot.say("\N{CROSS MARK} Your Third Word(s) is/are too long")
         return
     else:
         data = dict(
           bcg=choice([1, 2, 3, 4, 5]),
           txt=choice([1, 2, 3, 4]),
           text1=texts[0],
           text2=texts[1],
           text3=texts[2]
         )
         await self.bot.type() 
         with aiohttp.ClientSession() as session:
           async with session.post("http://photofunia.com/effects/retro-wave", data=data) as response:
             if response.status == 200:
               soup = b_s(await response.text(), "lxml")
               download_url = soup.find("div", class_="downloads-container").ul.li.a["href"]
               async with session.get(download_url) as image_response:
                 if image_response.status == 200:
                   image_data = await image_response.read()
                   with BytesIO(image_data) as temp_image:
                     await self.bot.upload(temp_image, filename="retro.jpg")
Esempio n. 2
0
 async def _top_(self, *, content : str):
     """Make a retrosign with top and middle text"""
     texts = [t.strip() for t in content.split(';')]
     if len(texts) != 2:
         await self.bot.say("\N{CROSS MARK} please provide two words seperated by ';'")
         return
     else:
         data = dict(
           bcg=choice([1, 2, 3, 4, 5]),
           txt=choice([1, 2, 3, 4]),
           text1=texts[0],
           text2=texts[1],
           text3=""
         )
         await self.bot.type() 
         with aiohttp.ClientSession() as session:
           async with session.post("http://photofunia.com/effects/retro-wave", data=data) as response:
             if response.status == 200:
               soup = b_s(await response.text(), "lxml")
               download_url = soup.find("div", class_="downloads-container").ul.li.a["href"]
               async with session.get(download_url) as image_response:
                 if image_response.status == 200:
                   image_data = await image_response.read()
                   with BytesIO(image_data) as temp_image:
                     await self.bot.upload(temp_image, filename="retro.jpg")
Esempio n. 3
0
def miner(url):

  song_list = []
  song_list_dict = []

  with u_r.urlopen(url) as f:
    html_doc = f.read()

  soup = b_s(html_doc, "html5lib")

  for item in soup.findAll(class_='ye-chart__item-text'):
    cleanr = re.compile('<.*?>')
    cleantext = re.sub(cleanr, '', item.text).rstrip().strip().split('\n')

    cleantext = [x for x in cleantext if x != "" ]
    cleantext = [x for x in cleantext if x != " " ]

    song_list.append(cleantext)

  for info in song_list:
    song_dict = {}
    song_dict['rank'] = info[0]
    song_dict['song'] = info[1]
    song_dict['artist'] = info[2]
    song_list_dict.append(song_dict)

  return song_list_dict
Esempio n. 4
0
def craigslist_soup(region, term, last_scrape):
    """Scraping Craigslist data for queried search"""
    url = "https://{region}.craigslist.org/search/sss?query={term}".format(
        region=region, term=term)
    response = requests.get(url=url)
    if craigslist_handler() == 0:
        soup = b_s(response.content, "html.parser")
        posts = soup.find_all("li", class_="result-row")
        links = []
        image_jpg_list = []
        posting_body = []
        list_results = []
        for post in posts:
            title_class = post.find("a", class_="result-title hdrlnk")
            links.append(title_class["href"])
        for link in links:
            response_link = requests.get(url=link)
            link_soup = b_s(response_link.content, "html.parser")
            image_url = link_soup.find("img")
            if image_url is not None:
                image_url = image_url["src"]
            else:
                image_url = "no image provided in this listing"
            image_jpg_list.append(image_url)
            section_body_class = link_soup.find("section", id="postingbody")
            if section_body_class is not None:
                section_body_class = section_body_class.get_text()
            else:
                section_body_class = "No description provided"
            stripped = section_body_class.replace(
                "\n\nQR Code Link to This Post\n", "")
            final_strip = stripped.replace("\n\n", "")
            posting_body.append(final_strip)
        for index, post in enumerate(posts):
            planter_description_full = posting_body[index]
            image_url_jpg = image_jpg_list[index]
            result_price = post.find("span", class_="result-price")
            result_price_text = result_price.get_text()
            time_class = post.find("time", class_="result-date")
            created_at = time_class["datetime"]
            title_class = post.find("a", class_="result-title hdrlnk")
            url = title_class["href"]
            cl_id = title_class["data-id"]
            title_text = title_class.text
            neighborhood = post.find("span", class_="result-hood")
            if neighborhood is not None:
                neighborhood_text = neighborhood.get_text()
            else:
                neighborhood_text = "No neighborhood provided"
            result_listings = {
                "cl_id": cl_id,
                "created_at": created_at,
                "title_text": title_text,
                "price": result_price_text,
                "neighborhood_text": neighborhood_text,
                "url": url,
                "description": planter_description_full,
                "jpg": image_url_jpg,
            }

            if pd.isnull(pd.to_datetime(last_scrape)):
                list_results.append(result_listings)
                print(
                    f"the datetime is null. Listing posted {created_at} and last scrapetime {last_scrape} so we will append this AND POST TO SLACK"
                )
            elif pd.to_datetime(result_listings["created_at"]) > (
                    pd.to_datetime(last_scrape)):
                list_results.append(result_listings)
                print(
                    f"Listing posted {created_at} and last scrapetime {last_scrape} so we will append this AND POST TO SLACK"
                )

            else:
                print(
                    f"Listing posted {created_at} and last scrapetime {last_scrape}. We will not append this."
                )
    else:
        return "Craigslist not OK"
    return list_results