def generate_movie_embed(self, result: dict) -> discord.Embed: release_date = dt.date.fromisoformat(result['release_date']) if result['release_date'] else None embed = self.bot.generate_embed( '🎞', f'{result["title"]} ({result["release_date"][:4]})', result.get('tagline'), url=f'https://www.themoviedb.org/movie/{result["id"]}' ) if result.get('original_title') != result['title']: embed.add_field(name='Tytuł oryginalny', value=result['original_title'], inline=False) embed.add_field(name='Średnia ocen', value=f'{result["vote_average"]:n} / 10') embed.add_field(name='Głosów', value=f'{result["vote_count"]:n}') if release_date is not None: embed.add_field(name='Data premiery', value=release_date.strftime('%-d %B %Y')) if result['runtime']: embed.add_field(name='Długość', value=human_amount_of_time(result['runtime'] * 60)) if result['budget']: embed.add_field(name='Budżet', value=f'${result["budget"]:n}') if result['revenue']: embed.add_field(name='Przychody', value=f'${result["revenue"]:n}') if result['genres']: genre_parts = ( f'[{genre["name"]}](https://www.themoviedb.org/genre/{genre["id"]})' for genre in result['genres'] ) embed.add_field(name='Gatunki' if len(result['genres']) > 1 else 'Gatunek', value=' / '.join((genre_parts))) if result['overview']: embed.add_field(name='Opis', value=text_snippet(result['overview'], 500), inline=False) if result.get('poster_path'): embed.set_thumbnail(url=f'https://image.tmdb.org/t/p/w342{result["poster_path"]}') if result.get('backdrop_path'): embed.set_image(url=f'https://image.tmdb.org/t/p/w780{result["backdrop_path"]}') return embed
async def file(self, ctx, member: Union[discord.Member, int] = None, *, event_types: Event.comprehend_types = None): """Responds with a list of the user's files events on the server.""" if isinstance(member, int): search_by_non_member_id = True member_id = member try: member = await self.bot.fetch_user(member) except discord.NotFound: member = None else: search_by_non_member_id = False member = member or ctx.author member_id = member.id with data.session() as session: events = session.query(Event) if event_types is None: events = events.filter(Event.server_id == ctx.guild.id, Event.user_id == member_id) else: events = events.filter(Event.server_id == ctx.guild.id, Event.user_id == member_id, Event.type.in_(event_types)) events = events.order_by(Event.occurred_at).all() if member == ctx.author: address = 'Twoja kartoteka' else: address = f'Kartoteka {member if member else "usuniętego użytkownika"}' if events: if event_types is None: event_types_description = '' elif len(event_types) == 1: event_types_description = ' podanego typu' elif len(event_types) > 1: event_types_description = ' podanych typów' event_number_form = word_number_form(len(events), 'zdarzenie', 'zdarzenia', 'zdarzeń') embed = self.bot.generate_embed( '📂', f'{address} zawiera {event_number_form}{event_types_description}', 'Pokazuję 25 najnowszych.' if len(events) > 25 else '') for event in events[-25:]: embed.add_field( name=await event.get_presentation(self.bot), value=text_snippet(event.details, Event.MAX_DETAILS_LENGTH) if event.details is not None else '—', inline=False) else: if search_by_non_member_id: embed = self.bot.generate_embed( '⚠️', 'Nie znaleziono na serwerze pasującego użytkownika') else: notice = 'jest pusta' if event_types is None else 'nie zawiera zdarzeń podanego typu' embed = self.bot.generate_embed('📂', f'{address} {notice}') await self.bot.send(ctx, embed=embed)
async def on_command_error(self, ctx: commands.Context, error: commands.CommandError): with data.session(commit=True) as session: invocation = session.query(Invocation).get(ctx.message.id) if invocation is not None: invocation.exited_at = dt.datetime.now() invocation.error = text_snippet( str(error).replace('Command raised an exception: ', ''), Invocation.MAX_ERROR_LENGTH)
async def urban_dictionary(self, ctx, *, query): """Returns Urban Dictionary word definition.""" params = {'term': query} async with self.bot.session.get(self.API_URL, params=params) as request: if request.status == 200: response = await request.json() if response['list']: result = response['list'][0] # get top definition embed = self.bot.generate_embed( None, result['word'], url=result['permalink'], timestamp=dt.datetime.fromisoformat( result['written_on'][:-1]), ) definition = self.expand_links( text_snippet(result['definition'], 500)) embed.add_field(name='Definicja', value=definition, inline=False) example = self.expand_links( text_snippet(result['example'], 500)) embed.add_field(name='Przykład', value=f'*{example}*', inline=False) embed.add_field(name='👍', value=f'{result["thumbs_up"]:n}') embed.add_field(name='👎', value=f'{result["thumbs_down"]:n}') else: embed = self.bot.generate_embed( '🙁', f'Brak wyników dla terminu "{query}"') else: embed = self.bot.generate_embed( '⚠️', 'Nie udało się połączyć z serwisem') embed.set_footer(text=self.FOOTER_TEXT) await self.bot.send(ctx, embed=embed)
async def roles(self, ctx): roles_counter = Counter( (role for member in ctx.guild.members for role in member.roles)) roles = [ role for role in reversed(ctx.guild.roles[1:]) if not role.name.startswith('🎨') ] role_lines = [ f'{role.mention} – `{str(role.color).upper()}` – 👥 {roles_counter[role]}' for role in roles ] embed = self.bot.generate_embed( '🔰', f'Na serwerze {word_number_form(len(roles), "jest", "są", "jest", include_number=False)} ' f'{word_number_form(len(roles), "rola", "role", "ról")}', text_snippet('\n'.join(role_lines), 2048) if role_lines else None) await self.bot.send(ctx, embed=embed)
async def search(self, language: str, title: str): """Returns the closest matching article or articles from Wikipedia.""" params = { 'action': 'opensearch', 'search': title, 'limit': 25, 'format': 'json' } search_result = self.SearchResult(language) try: # use OpenSearch API first to get accurate page title of the result async with self.bot.session.get(self.link(language, 'w/api.php'), params=params) as title_request: search_result.status = title_request.status if title_request.status == 200: title_data = await title_request.json() if title_data[1]: # use the title retrieved from the OpenSearch response as a search term in the REST request query = title_data[1][0] async with self.bot.session.get( self.link(language, f'api/rest_v1/page/summary/{query}') ) as article_request: search_result.status = article_request.status if article_request.status == 200: article_data = await article_request.json() search_result.title = article_data['title'] search_result.url = article_data['content_urls']['desktop']['page'] if article_data['type'] == 'disambiguation': # use results from OpenSearch to create a list of links from disambiguation page for i, option in enumerate(title_data[1][1:]): search_result.articles.append({ 'title': option, 'summary': None, 'url': title_data[3][i+1].replace('(', '%28').replace(')', '%29'), 'thumbnail_url': None }) elif article_data['type'] == 'standard': thumbnail_url = ( article_data['thumbnail']['source'] if 'thumbnail' in article_data else None ) search_result.articles.append({ 'title': article_data['title'], 'summary': text_snippet(article_data['extract'], 500), 'url': article_data['content_urls']['desktop']['page'], 'thumbnail_url': thumbnail_url }) except aiohttp.client_exceptions.ClientConnectorError: pass return search_result
async def generate_events_embed() -> discord.Embed: embed = self.bot.generate_embed( '📂', f'{address} zawiera {event_number_form}{event_types_description}', ) relevant_events = events[self.PAGE_FIELDS * current_page_index:self.PAGE_FIELDS * (current_page_index + 1)] if page_count > 1: embed.description = ( f"Strona {current_page_index+1}. z {page_count}. Do {self.PAGE_FIELDS} zdarzeń na stronę." ) for event_offset, event in enumerate( relevant_events, self.PAGE_FIELDS * current_page_index): embed.add_field( name= f"{len(events)-event_offset}. {await event.get_presentation(self.bot)}", value=text_snippet(event.details, Event.MAX_DETAILS_LENGTH) if event.details is not None else '—', inline=False, ) return embed
def generate_tv_embed(self, result: dict) -> discord.Embed: first_air_date = dt.date.fromisoformat(result['first_air_date']) if result['first_air_date'] else None last_air_date = dt.date.fromisoformat(result['last_air_date']) if result['last_air_date'] else None air_years_range = str(first_air_date.year) if result['in_production']: air_years_range += '–' elif first_air_date.year != last_air_date.year: air_years_range += f'–{last_air_date.year}' embed = self.bot.generate_embed( '📺', f'{result["name"]} ({air_years_range})', url=f'https://www.themoviedb.org/tv/{result["id"]}' ) if result.get('original_name') != result['name']: embed.add_field(name='Tytuł oryginalny', value=result['original_name'], inline=False) embed.add_field(name='Średnia ocen', value=f'{result["vote_average"]:n} / 10') embed.add_field(name='Głosów', value=f'{result["vote_count"]:n}') if first_air_date is not None: embed.add_field( name='Data premiery pierwszego odcinka', value=first_air_date.strftime('%-d %B %Y'), inline=False ) if last_air_date is not None and last_air_date != first_air_date: embed.add_field( name=f'Data premiery ostatniego odcinka', value=last_air_date.strftime('%-d %B %Y'), inline=False ) if result['networks']: network_parts = ( f'[{network["name"]}](https://www.themoviedb.org/network/{network["id"]})' for network in result['networks'] ) embed.add_field(name='Sieci' if len(result['networks']) > 1 else 'Sieć', value=', '.join((network_parts))) if result['created_by']: author_parts = ( f'[{author["name"]}](https://www.themoviedb.org/person/{author["id"]})' for author in result['created_by'] ) if len(result['created_by']) > 1: are_all_authors_female = all((author.get('gender') == 1 for author in result['created_by'])) created_by_field_name = 'Autorki' if are_all_authors_female else 'Autorzy' else: created_by_field_name = 'Autorka' if result['created_by'][0].get('gender') == 1 else 'Autor' embed.add_field(name=created_by_field_name, value=', '.join(author_parts)) if result['genres']: genre_parts = ( f'[{genre["name"]}](https://www.themoviedb.org/genre/{genre["id"]})' for genre in result['genres'] ) embed.add_field(name='Gatunki' if len(result['genres']) > 1 else 'Gatunek', value=' / '.join((genre_parts))) season_parts = [] season_counter = 0 for season in result['seasons']: if season['season_number'] < 1: continue season_counter += 1 if season['season_number'] > 10: continue if season['air_date']: air_date_presentation = dt.date.fromisoformat(season['air_date']).strftime('%-d %B %Y') else: air_date_presentation = 'TBD' season_parts.append( f'[{season["season_number"]}. ' f'{word_number_form(season["episode_count"] or "?", "odcinek", "odcinki", "odcinków")} ' f'(premiera {air_date_presentation})]' f'(https://www.themoviedb.org/tv/{result["id"]}/season/{season["season_number"]})' ) if season_counter > 10: following_form = word_number_form(season_counter - 10, 'kolejny', 'kolejne', 'kolejnych') season_parts.append( f'[…i jeszcze {following_form}](https://www.themoviedb.org/tv/{result["id"]}/seasons)' ) embed.add_field(name='Sezony', value='\n'.join(season_parts), inline=False) if result['overview']: embed.add_field(name='Opis', value=text_snippet(result['overview'], 500), inline=False) if result.get('poster_path'): embed.set_thumbnail(url=f'https://image.tmdb.org/t/p/w342{result["poster_path"]}') if result.get('backdrop_path'): embed.set_image(url=f'https://image.tmdb.org/t/p/w780{result["backdrop_path"]}') return embed
def test_no_text(self): cut_text = text_snippet('', 16) expected_cut_text = '' self.assertEqual(cut_text, expected_cut_text)
def test_no_cutting(self): cut_text = text_snippet('This string is not too long, it doesn\'t need to be cut.', 420) expected_cut_text = 'This string is not too long, it doesn\'t need to be cut.' self.assertEqual(cut_text, expected_cut_text)
def test_first_word_too_long(self): cut_text = text_snippet('This string is too long, it needs to be cut.', 4) expected_cut_text = '…' self.assertEqual(cut_text, expected_cut_text)
def test_end_on_period(self): cut_text = text_snippet('This string is too long. It needs to be cut.', 25) expected_cut_text = 'This string is too long.…' self.assertEqual(cut_text, expected_cut_text)
def test_end_on_semicolon(self): cut_text = text_snippet('This string is too long; it needs to be cut.', 25) expected_cut_text = 'This string is too long;…' self.assertEqual(cut_text, expected_cut_text)
def test_limit_from_exact_to_5_over(self): expected_cut_text = 'This string is too…' for limit in (19 + leeway for leeway in range(6)): with self.subTest(limit=limit): cut_text = text_snippet('This string is too long, it needs to be cut.', limit) self.assertEqual(cut_text, expected_cut_text)
def test_limit_1_under(self): cut_text = text_snippet('This string is too long, it needs to be cut.', 18) expected_cut_text = 'This string is…' self.assertEqual(cut_text, expected_cut_text)