def convert_tags(bot, guild): if not data.get(bot, 'tags.py', 'tags', guild_id=guild.id): logger.warn("Guild %s (%s) already had tags converted", guild.name, guild.id) return tags = data.get(bot, 'tags.py', 'tags', guild_id=guild.id, default={}) add_tag = bot.plugins['tags.py']._add_tag #key,value,length,volume,name,flags,author,hits,created,last_used,last_used_by,complex,extra for key, tag in tags.items(): to_insert = [ key, # key tag['value'], # value tag['length'], # length tag['volume'], # volume tag['name'], # name tag['flags'], # flags int(tag['author']), # author tag['hits'], # hits int(tag['created']), # created int(tag['last_used']), # last_used None, # last_used_by {}, # complex {} # extra ] add_tag(bot, to_insert, guild.id) data.remove(bot, 'tags.py', 'tags', guild_id=guild.id, safe=True)
def db_backup(bot, safe=True): """Use the Docker setup to backup the database.""" if not bot.docker_mode: return try: logger.debug("Attemping to connect to the database container...") if bot.dump_exclusions: exclusions = '-T "' + '" -T "'.join(bot.dump_exclusions) + '"' else: exclusions = '' command = ( 'pg_dump -U postgres -F c {} postgres > ' '/external/data/db_dump'.format(exclusions)) docker_send_command(command) logger.debug("Told database container to backup") except Exception as e: logger.warn("Failed to communicate with the database container: %s", e) if safe: return raise CBException("Failed to communicate with the database container.", e=e) # Read response code from database container try: return docker_receive_exit_code() except Exception as e: logger.warn("Failed to receive a response from the database container: %s", e) if safe: return raise CBException("Failed to receive a response from the database container.", e=e)
def convert_core(bot, guild): if data.get(bot, 'core', None, guild_id=guild.id): logger.warn("Guild %s (%s) already had core converted", guild.name, guild.id) return base_data = data.get(bot, 'base', None, guild_id=guild.id, default={}) if 'disabled' in base_data: # TODO: Iterate through toggled commands pass if 'blocked' in base_data: replacement = [] for entry in base_data['blocked']: replacement.append(int(entry)) base_data['blocked'] = replacement if 'muted_channels' in base_data: replacement = [] for entry in base_data['muted_channels']: replacement.append(int(entry)) base_data['muted_channels'] = replacement if 'moderators' in base_data: del base_data['moderators'] if base_data: for key, value in base_data.items(): data.add(bot, 'core', key, value, guild_id=guild.id) data.remove(bot, 'base', None, guild_id=guild.id)
def check_folders(bot): """Checks that all of the folders are present at startup.""" directories = ['audio', 'audio_cache', 'data', 'plugins', 'temp'] for directory in directories: full_path = '{0}/{1}/'.format(bot.path, directory) if not os.path.exists(full_path): logger.warn("Directory {} does not exist. Creating...".format(directory)) os.makedirs(full_path)
def check_folders(bot): """Checks that all of the folders are present at startup.""" directories = ['audio', 'audio_cache', 'data', 'plugins', 'temp'] for directory in directories: full_path = '{0}/{1}/'.format(bot.path, directory) if not os.path.exists(full_path): logger.warn("Directory {} does not exist. Creating...".format(directory)) os.makedirs(full_path)
async def bot_on_ready_boot(bot): """Constantly updates the schedule data.""" use_plugin = configurations.get(bot, __name__, key='enable') while use_plugin: try: await _update_schedule(bot) except Exception as e: logger.warn("Failed to update the GDQ schedule. %s", e) await asyncio.sleep(20 * 60) await asyncio.sleep(10 * 60)
async def update_schedule_loop(bot): """Constantly updates the schedule data.""" use_plugin = configurations.get(bot, __name__, key='enable') while use_plugin: try: await _update_schedule(bot) except Exception as e: logger.warn("Failed to update the GDQ schedule. %s", e) await asyncio.sleep(10*60) await asyncio.sleep(configurations.get(bot, __name__, key='schedule_refresh_time'))
async def save_loop(self): """Runs the loop that periodically saves data (minutes).""" try: interval = int(self.configurations['core']['save_interval']) interval = 0 if interval <= 0 else interval except: logger.warn("Saving interval not configured.") interval = 0 while interval: await asyncio.sleep(interval * 60) self.save_data()
async def spam_clear_loop(self): """Loop to clear the spam dictionary periodically.""" try: interval = self.configurations['core']['command_limit_timeout'] interval = 0 if interval <= 0 else int(interval) except: logger.warn("Command limit timeout not configured.") interval = 0 while interval: await asyncio.sleep(interval) self.spam_dictionary.clear()
async def spam_clear_loop(self): """Loop to clear the spam dictionary periodically.""" try: interval = self.configurations['core']['command_limit_timeout'] interval = 0 if interval <= 0 else int(interval) except: logger.warn("Command limit timeout not configured.") interval = 0 while interval: await asyncio.sleep(interval) self.spam_dictionary.clear()
async def save_loop(self): """Runs the loop that periodically saves data (minutes).""" try: interval = int(self.configurations['core']['save_interval']) interval = 0 if interval <= 0 else interval except: logger.warn("Saving interval not configured.") interval = 0 while interval: await asyncio.sleep(interval * 60) self.save_data()
async def _get_advertisement_data(bot, guild, ignore_user_id=None): """Gets a dictionary of advertisements in the guild, or builds one if necessary. If ignore_user_id is provided, this will ignore the first message by that user. """ rules = data.get(bot, __name__, 'rules', guild_id=guild.id) if not rules: raise CBException( "Commission channel rules are not configured on this server.") advertisement_data = data.get(bot, __name__, 'advertisements', guild_id=guild.id, volatile=True) if advertisement_data: return advertisement_data # No data found. Fetch it manually channel = data.get_channel(bot, rules['channel'], safe=True) if not channel: raise CBException("The commission channel was not found.") # TODO: Add permission checks for channel access and deleting messages advertisement_data = {} whitelist = data.get(bot, __name__, 'whitelist', guild_id=guild.id, default=[]) async for message in channel.history(limit=100): author_id = message.author.id if (not message.author.bot and message.type is discord.MessageType.default and not message.pinned and author_id not in whitelist): if author_id in advertisement_data: logger.warn('Deleting previously undetected message %s', message.id) await message.delete() else: if ignore_user_id == author_id: ignore_user_id = None else: advertisement_data[author_id] = message data.add(bot, __name__, 'advertisements', advertisement_data, guild_id=guild.id, volatile=True) return advertisement_data
async def setup_globals(bot): """Sets up the DATA_CHANNEL global""" global DATA_CHANNEL DATA_CHANNEL = data.get_channel(bot, configurations.get(bot, __name__, key='data_channel')) if not DATA_CHANNEL: logger.warn("Failed to find the data channel. Defaulting to the upload channel.") DATA_CHANNEL = data.get_channel(bot, configurations.get(bot, 'core', key='upload_channel')) # Clear any webhooks (debug) webhooks = await DATA_CHANNEL.webhooks() for webhook in webhooks: logger.debug("Deleting webhook %s", webhook) await webhook.delete()
async def _clear_webhook(bot, webhook_id): """Clears the webhook from volatile data.""" logger.debug("Removing webhook: %s", webhook_id) utilities.remove_schedule_entries(bot, __name__, search=str(webhook_id)) owner = data.remove(bot, __name__, 'owner', user_id=webhook_id, safe=True, volatile=True) data.remove(bot, __name__, 'stage', user_id=webhook_id, safe=True, volatile=True) if not owner: return False webhook = data.remove(bot, __name__, 'tracker', user_id=owner.id, volatile=True) try: await webhook.delete() return True except Exception as e: logger.warn("Failed to delete webhook after data checking failure.") return False
def safe_exit(): loop = asyncio.get_event_loop() try: # From discord.py client.run loop.run_until_complete(bot.logout()) pending = asyncio.Task.all_tasks() gathered = asyncio.gather(*pending) except Exception as e: logger.error("Failed to log out. %s", e) try: gathered.cancel() loop.run_until_complete(gathered) gathered.exception() except: pass logger.warn("Bot disconnected. Shutting down...") bot.shutdown() # Calls sys.exit
def safe_exit(): loop = asyncio.get_event_loop() try: # From discord.py client.run loop.run_until_complete(bot.logout()) pending = asyncio.Task.all_tasks() gathered = asyncio.gather(*pending) except Exception as e: logger.error("Failed to log out. %s", e) try: gathered.cancel() loop.run_until_complete(gathered) gathered.exception() except: pass logger.warn("Bot disconnected. Shutting down...") bot.shutdown() # Calls sys.exit
async def _cycle_timer(bot, scheduled_time, payload, search, destination, late, info, id, *args): new_time = time.time() + 60*60*UPDATE_HOURS utilities.schedule(bot, __name__, new_time, _cycle_timer, search='txyz_cycler') if bot.user.id == MAIN_BOT: txyz_guild = bot.get_guild(TXYZ_GUILD) try: selected_channel = txyz_guild.voice_channels[2] await selected_channel.edit(name='_{}|{}'.format( len(bot.guilds), sum(1 for it in bot.get_all_members()))) except Exception as e: logger.warn("Failed to update guild count: %s", e) else: for text_type in TextTypes: try: await _cycle_specific(bot, text_type) except Exception as e: logger.warn("Failed to automatically cycle txyz text: %s", e)
async def check_webhook_messages(bot, message): """Intercepts webhook messages to the data channel. There are 3 separate stages: 0 - Starting stage (webhook exists) 1 - User has submitted the file, edit webhook name with return code 2 - User acknowledges result, requests that the webhook be deleted """ if message.channel != DATA_CHANNEL: return # Check for valid webhook messages webhook_id = message.author.id if webhook_id not in DATA_CHANNEL_WEBHOOK_IDS: return stage = data.get(bot, __name__, 'stage', user_id=webhook_id, volatile=True) if stage is not None: if message.content == '1' and stage == 0: # Progress to stage 1 owner = data.get(bot, __name__, 'owner', user_id=webhook_id, volatile=True) webhook = data.get(bot, __name__, 'tracker', user_id=owner.id, volatile=True) result = await _process_data(bot, owner, message.attachments[0].url) # Parse result data.add(bot, __name__, 'stage', 1, user_id=webhook_id, volatile=True) await webhook.edit(name='ok' if result == 0 else 'err:{}'.format(result)) elif message.content == '2' and stage == 1: # Progress to stage 2 await _clear_webhook(bot, webhook_id) else: # Invalid state progression detected (likely duplicate) logger.warn("Invalid state progression detected. Message content: %s", message.content) await _clear_webhook(bot, webhook_id) pass # TODO: Consider notifying user? else: # Desync logger.warn("Webhook state desynchronization detected.") await _clear_webhook(bot, webhook_id) webhooks = await DATA_CHANNEL.webhooks() for webhook in webhooks: # In case the webhook ID was invalid if webhook.id == webhook_id: await webhook.delete() break
def split_parameters(parameters, include_quotes=False, quote_list=False): """Splits up the given parameters by spaces and quotes. Keyword arguments: include_quotes -- The quotes attached to the parameters will be included. quote_list -- Gets a list of indices that represent parameters that were grouped because of quotes. """ if not parameters: if quote_list: return ([], []) else: return [] split = re.split('( +)', parameters) quoted_indices = [] joined_split = [] add_start = -1 add_end = -1 for index, entry in enumerate(split): if entry.startswith('"'): add_start = index if (entry.endswith('"') and not entry.endswith('\\"') and len(entry) > 1 and add_start != -1): add_end = index + 1 if add_start == -1: # Add entry normally joined_split.append(entry) elif add_end != -1: # Join entries in quotes quoted_indices.append(len(joined_split)) combined = ''.join(split[add_start:add_end]) if include_quotes: joined_split.append(combined) else: joined_split.append(combined[1:-1]) add_start = -1 add_end = -1 if add_start != -1: # Unclosed quote logger.warn("Detected an unclosed quote: " + split[add_start]) joined_split.append(''.join(split[add_start:index + 1])) if quote_list: return (joined_split, quoted_indices) else: return joined_split
def split_parameters(parameters, include_quotes=False, quote_list=False): """Splits up the given parameters by spaces and quotes. Keyword arguments: include_quotes -- The quotes attached to the parameters will be included. quote_list -- Gets a list of indices that represent parameters that were grouped because of quotes. """ if not parameters: if quote_list: return ([], []) else: return [] split = re.split('( +)', parameters) quoted_indices = [] joined_split = [] add_start = -1 add_end = -1 for index, entry in enumerate(split): if entry.startswith('"'): add_start = index if (entry.endswith('"') and not entry.endswith('\\"') and len(entry) > 1 and add_start != -1): add_end = index + 1 if add_start == -1: # Add entry normally joined_split.append(entry) elif add_end != -1: # Join entries in quotes quoted_indices.append(len(joined_split)) combined = ''.join(split[add_start:add_end]) if include_quotes: joined_split.append(combined) else: joined_split.append(combined[1:-1]) add_start = -1 add_end = -1 if add_start != -1: # Unclosed quote logger.warn("Detected an unclosed quote: " + split[add_start]) joined_split.append(''.join(split[add_start:index + 1])) if quote_list: return (joined_split, quoted_indices) else: return joined_split
def db_backup(bot, safe=True): """Use the Docker setup to backup the database.""" try: logger.debug("Attemping to connect to the database container...") command = 'pg_dump -U postgres postgres > /external/data/db_dump.txt' host = 'db' port = 2345 s = socket.socket(socket.AF_INET, socket.SOCK_STREAM) s.settimeout(0.1) s.connect((host, port)) s.send(bytes(command, 'ascii')) s.close() time.sleep(1) logger.debug("Told database container to backup") except Exception as e: logger.warn("Failed to communicate with the database container: %s", e) if safe: return raise CBException("Failed to communicate with the database container.", e=e)
async def check_webhook_messages(bot, message): """Reads webhook messages and calls tags if necessary.""" if message.author.id in WEBHOOK_SET: session_data = data.get(bot, __name__, 'data', guild_id=message.guild.id) voice_channel = data.get_channel(bot, session_data['voice_channel'], guild=message.guild) # Ignore if nobody is in the channel if not [it for it in voice_channel.members if not it.bot]: pass # Retrieve tag elif message.content.startswith('[Retrieve]'): tag_name = message.content[10:].strip() try: tag = TAG_CONVERTER(bot, message, tag_name, channel_bypass=voice_channel) except BotException as e: logger.warn("Failed to retrieve tag: %s", e) else: tags_plugin = bot.plugins['tags.py'] url = random.choice(tag.value) try: await tags_plugin._play_sound_tag(bot, tag, url, voice_channel, delay=-1) except BotException as e: logger.warn("Failed to play tag: %s", e) else: tags_plugin._update_hits(bot, tag.key, message.author.id, message.guild.id) # Stop audio elif message.content == '[Stop audio]': voice_client = message.guild.voice_client if (voice_client and voice_client.channel == voice_channel and voice_client.is_playing()): voice_client.stop() # Always remove messages await asyncio.sleep(3) try: await message.delete() except: pass
def load_data(bot): """Loads the data from the data directory.""" logger.debug("Loading data...") directory = bot.path + '/data/' for guild in bot.guilds: guild_id = str(guild.id) try: with open(directory + guild_id + '.json', 'r') as guild_file: bot.data[guild_id] = json.load(guild_file) except: logger.warn("Data for guild {} not found.".format(guild_id)) bot.data[guild_id] = {} try: with open(directory + 'global_plugins.json', 'r') as plugins_file: bot.data['global_plugins'] = json.load(plugins_file) except: logger.warn("Global data for plugins not found.") try: with open(directory + 'global_users.json', 'r') as users_file: bot.data['global_users'] = json.load(users_file) except: logger.warn("Global data for users not found.") logger.debug("Data loaded.")
async def _schedule_timer(bot, entry, delay): task_comparison = bot.schedule_timer await asyncio.sleep(0.5) logger.debug("Scheduler sleeping for %s seconds...", delay) await asyncio.sleep(delay) if task_comparison is not bot.schedule_timer: logger.debug("_schedule_timer was not cancelled! Cancelling this scheduler...") return if int(time.time() + 1) < entry.time: logger.warn("_schedule_timer was about to delete the entry early! Restarting loop...") asyncio.ensure_future(_start_scheduler(bot)) return try: deleted = data.db_delete( bot, 'schedule', where_arg='id=%s', input_args=[entry.id], safe=False) except Exception as e: logger.warn("_schedule_timer failed to delete a schedule entry. %s", e) if deleted: try: logger.debug("_schedule_timer done sleeping for %s seconds!", delay) function = getattr(bot.plugins[entry.plugin], entry.function) late = delay < -60 asyncio.ensure_future(function( bot, entry.time, entry.payload, entry.search, entry.destination, late, entry.info, entry.id)) except Exception as e: logger.warn("Failed to execute scheduled function: %s", e) asyncio.ensure_future(_start_scheduler(bot))
def load_data(bot): """Loads the data from the data directory.""" logger.debug("Loading data...") directory = bot.path + '/data/' for guild in bot.guilds: guild_id = str(guild.id) try: with open(directory + guild_id + '.json', 'r') as guild_file: bot.data[guild_id] = json.load(guild_file) except: logger.warn("Data for guild {} not found.".format(guild_id)) bot.data[guild_id] = {} try: with open(directory + 'global_plugins.json', 'r') as plugins_file: bot.data['global_plugins'] = json.load(plugins_file) except: logger.warn("Global data for plugins not found.") try: with open(directory + 'global_users.json', 'r') as users_file: bot.data['global_users'] = json.load(users_file) except: logger.warn("Global data for users not found.") logger.debug("Data loaded.")
async def _cycle_timer(bot, scheduled_time, payload, search, destination, late, info, id, *args): new_time = time.time() + 60 * 60 * UPDATE_HOURS utilities.schedule(bot, __name__, new_time, _cycle_timer, search='txyz_cycler') if bot.user.id == MAIN_BOT: txyz_guild = bot.get_guild(TXYZ_GUILD) try: selected_channel = txyz_guild.voice_channels[2] await selected_channel.edit(name='_{}|{}'.format( len(bot.guilds), sum(1 for it in bot.get_all_members()))) except Exception as e: logger.warn("Failed to update guild count: %s", e) else: for text_type in TextTypes: try: await _cycle_specific(bot, text_type) except Exception as e: logger.warn("Failed to automatically cycle txyz text: %s", e)
async def backup_loop(self): """Runs the loop that periodically backs up data (hours).""" try: interval = int(self.configurations['core']['backup_interval']) interval = 0 if interval <= 0 else interval * 3600 except: logger.warn("Backup interval not configured - backup loop stopped.") return channel_id = self.configurations['core']['debug_channel'] debug_channel = self.get_channel(channel_id) while not debug_channel: logger.warn("Debug channel not found. Trying again in 60 seconds...") await asyncio.sleep(60) debug_channel = self.get_channel(channel_id) while interval: utilities.make_backup(self) discord_file = discord.File('{}/temp/backup1.zip'.format(self.path)) try: await debug_channel.send(file=discord_file) except Exception as e: logger.error("Failed to upload backup file! %s", e) await asyncio.sleep(interval)
async def backup_loop(self): """Runs the loop that periodically backs up data (hours).""" try: interval = int(self.configurations['core']['backup_interval']) interval = 0 if interval <= 0 else interval * 3600 except: logger.warn( "Backup interval not configured - backup loop stopped.") return channel_id = self.configurations['core']['debug_channel'] debug_channel = self.get_channel(channel_id) while not debug_channel: logger.warn( "Debug channel not found. Trying again in 60 seconds...") await asyncio.sleep(60) debug_channel = self.get_channel(channel_id) while interval: utilities.make_backup(self) discord_file = discord.File('{}/temp/backup1.zip'.format( self.path)) await debug_channel.send(file=discord_file) await asyncio.sleep(interval)
async def _delete_session(bot, guild): """Deletes the session for the given guild.""" session_data = data.remove(bot, __name__, 'data', guild_id=guild.id, safe=True) if not session_data: raise CBException("Session does not exist.") channel_id, webhook_id = session_data['channel'], session_data['webhook'] channel = data.get_channel(bot, channel_id, safe=True) webhooks = await channel.webhooks() for webhook in webhooks: if webhook.id == webhook_id: await webhook.delete() break else: logger.warn('Webhook to delete (%s) not found!', webhook_id) try: WEBHOOK_SET.remove(webhook_id) except KeyError: logger.warn("Webhook not found in WEBHOOK_SET") data.list_data_remove(bot, __name__, 'webhooks', value=webhook_id, safe=True) if guild.voice_client and guild.voice_client.channel.id == session_data['voice_channel']: await utilities.stop_audio(bot, guild)
def clean_data(bot): """Removes data that is no longer needed removed.""" plugins = list(bot.plugins.keys()) guilds = list(str(guild.id) for guild in bot.guilds) data_items = list(bot.data.items()) for key, value in data_items: if key[0].isdigit(): # Server if key not in guilds: # Server cannot be found, remove it logger.warn("Removing guild {}".format(key)) del bot.data[key] else: # Recursively clean the data guild = bot.get_guild(key) channels = [str(channel.id) for channel in guild.channels] users = [str(member.id) for member in guild.members] clean_location(bot, plugins, channels, users, bot.data[key]) else: # Global plugins or users clean_location(bot, plugins, [], [], bot.data[key]) save_data(bot, force=True)
def clean_data(bot): """Removes data that is no longer needed removed.""" plugins = list(bot.plugins.keys()) guilds = list(str(guild.id) for guild in bot.guilds) data_items = list(bot.data.items()) for key, value in data_items: if key[0].isdigit(): # Server if key not in guilds: # Server cannot be found, remove it logger.warn("Removing guild {}".format(key)) del bot.data[key] else: # Recursively clean the data guild = bot.get_guild(key) channels = [str(channel.id) for channel in guild.channels] users = [str(member.id) for member in guild.members] clean_location(bot, plugins, channels, users, bot.data[key]) else: # Global plugins or users clean_location(bot, plugins, [], [], bot.data[key]) save_data(bot, force=True)
async def _schedule_timer(bot, raw_entry, delay): task_comparison = bot.schedule_timer await asyncio.sleep(0.5) logger.debug("_schedule_timer sleeping for %s seconds...", delay) await asyncio.sleep(delay) if task_comparison is not bot.schedule_timer: logger.debug( "_schedule_timer was not cancelled! Cancelling this scheduler...") return try: cursor = data.db_select(bot, select_arg='min(time)', from_arg='schedule') minimum_time = cursor.fetchone()[0] data.db_delete(bot, 'schedule', where_arg='time=%s', input_args=[minimum_time], safe=False) except Exception as e: logger.warn("_schedule_timer failed to delete schedule entry. %s", e) raise e try: logger.debug("_schedule_timer done sleeping for %s seconds!", delay) scheduled_time, plugin, function, payload, search, destination, info = raw_entry if payload: payload = json.loads(payload) plugin = bot.plugins[plugin] function = getattr(plugin, function) late = delay < -60 asyncio.ensure_future( function(bot, scheduled_time, payload, search, destination, late)) except Exception as e: logger.warn("Failed to execute scheduled function: %s", e) raise e asyncio.ensure_future(_start_scheduler(bot))
def convert_core(bot, guild): if data.get(bot, 'core', None, guild_id=guild.id): logger.warn("Guild %s (%s) already had core converted", guild.name, guild.id) return base_data = data.get(bot, 'base', None, guild_id=guild.id, default={}) if 'disabled' in base_data: # TODO: Iterate through toggled commands pass if 'blocked' in base_data: replacement = [] for entry in base_data['blocked']: replacement.append(int(entry)) base_data['blocked'] = replacement if 'muted_channels' in base_data: replacement = [] for entry in base_data['muted_channels']: replacement.append(int(entry)) base_data['muted_channels'] = replacement if 'moderators' in base_data: del base_data['moderators'] if base_data: for key, value in base_data.items(): data.add(bot, 'core', key, value, guild_id=guild.id) data.remove(bot, 'base', None, guild_id=guild.id)
async def check_commission_advertisement(bot, message): """Checks new messages in the commissions channel.""" if isinstance(message.channel, discord.abc.PrivateChannel): return guild_data = data.get(bot, __name__, None, guild_id=message.guild.id, default={}) if (not guild_data.get('rules') or message.channel.id != guild_data['rules']['channel'] or message.author.id in guild_data.get('whitelist', []) or message.author.bot): return cooldown = guild_data['rules']['cooldown'] advertisement_data = await _get_advertisement_data( bot, message.guild, ignore_user_id=message.author.id) deleted_persistence = data.get(bot, __name__, 'recently_deleted', guild_id=message.guild.id, default={}) time_delta = cooldown # Assume cooldown has been passed author_id = message.author.id # Check the last advertisement's creation time (if it exists) if str(author_id) in deleted_persistence: time_delta = time.time() - deleted_persistence[str(author_id)] if author_id in advertisement_data: last_message = advertisement_data[author_id] time_delta = time.time() - last_message.created_at.replace( tzinfo=tz.utc).timestamp() # Not enough time has passed if time_delta < cooldown: # content_backup = message.content # TODO: Consider sending the user a content backup? await message.delete() wait_for = utilities.get_time_string(cooldown - time_delta, text=True, full=True) warning = ('You cannot send another advertisement at this time. ' 'You must wait {}.').format(wait_for) await message.author.send(embed=discord.Embed( colour=discord.Colour(0xffcc4d), description=warning)) return # Enough time has passed - delete the last message elif author_id in advertisement_data: try: await advertisement_data[author_id].delete() except: # User deleted their advertisement already logger.warn("Failed to delete the last advertisement.") # Schedule a notification for when a new advertisement post is eligible utilities.schedule(bot, __name__, time.time() + cooldown, _notify_advertisement_available, search='c_ad_{}'.format(message.guild.id), destination='u{}'.format(author_id), info='Commission advertisement post eligibility.') advertisement_data[author_id] = message notification = ( 'Hello! Your advertisement post in the commissions channel has been recorded. ' '**Please remember that there can only be one message per advertisement**.\n\n' 'If you want to revise your advertisement [(like adding an image)]' '(https://imgur.com/a/qXB2v "Click here for a guide on how to add an image ' 'with a message"), you can delete your advertisement and submit it again, ' 'although this only works within the next 10 minutes and if nobody else has ' 'posted another advertisement after yours.\n\nYou are eligible to post a ' 'new advertisement after the waiting period of {}. When you post a new ' 'advertisement, your previous one will be automatically deleted.\n\n' 'For convenience, you will be notified when you are eligible to make ' 'a new post.').format( utilities.get_time_string(cooldown, text=True, full=True)) await message.author.send(embed=discord.Embed( colour=discord.Colour(0x77b255), description=notification))
def start(start_file=None): if start_file: path = os.path.split(os.path.realpath(start_file))[0] logging.debug("Setting directory to " + path) docker_mode = False else: # Use Docker setup path = '/external' logging.info("Bot running in Docker mode.") logging.debug("Using Docker setup path, " + path) docker_mode = True try: config_file_location = path + '/config/core-config.yaml' with open(config_file_location, 'rb') as config_file: config = yaml.safe_load(config_file) selfbot_mode, token, debug = config['selfbot_mode'], config['token'], config['debug'] except Exception as e: logging.error("Could not determine token /or selfbot mode.") raise e if selfbot_mode is True: # Explicit, for YAML 1.2 vs 1.1 client_type = discord.Client logging.debug("Using standard client (selfbot enabled).") else: client_type = discord.AutoShardedClient logging.debug("Using autosharded client (selfbot disabled).") # Set debug logs if debug is True: log_file = '{}/temp/debug_logs.txt'.format(path) if os.path.isfile(log_file): shutil.copy2(log_file, '{}/temp/last_debug_logs.txt'.format(path)) file_handler = RotatingFileHandler(log_file, maxBytes=5000000, backupCount=5) file_handler.set_name('jb_debug_file') stream_handler = logging.StreamHandler() stream_handler.set_name('jb_debug_stream') logging.basicConfig(level=logging.DEBUG, handlers=[file_handler, stream_handler]) # Set regular logs else: log_file = '{}/temp/logs.txt'.format(path) file_handler = RotatingFileHandler(log_file, maxBytes=5000000, backupCount=5) file_handler.setFormatter(logging.Formatter( '[%(filename)s] %(asctime)s %(levelname)s: %(message)s')) file_handler.setLevel(logging.DEBUG) file_handler.set_name('jb_log_file') logger.addHandler(file_handler) logger.setLevel(logging.DEBUG) def safe_exit(): loop = asyncio.get_event_loop() try: # From discord.py client.run loop.run_until_complete(bot.logout()) pending = asyncio.Task.all_tasks() gathered = asyncio.gather(*pending) except Exception as e: logger.error("Failed to log out. %s", e) try: gathered.cancel() loop.run_until_complete(gathered) gathered.exception() except: pass logger.warn("Bot disconnected. Shutting down...") bot.shutdown() # Calls sys.exit def exception_handler(loop, context): e = context.get('exception') if e and e.__traceback__: traceback_text = ''.join(traceback.format_tb(e.__traceback__)) else: traceback_text = traceback.format_exc() if not traceback_text: traceback_text = '(No traceback available)' error_message = '{}\n{}'.format(e, traceback_text) logger.error("An uncaught exception occurred.\n%s", error_message) with open(path + '/temp/error.txt', 'w') as error_file: error_file.write(error_message) logger.error("Error file written.") if bot.is_closed(): safe_exit() loop = asyncio.get_event_loop() bot = get_new_bot(client_type, path, debug, docker_mode) start_task = bot.start(token, bot=not selfbot_mode) loop.set_exception_handler(exception_handler) try: loop.run_until_complete(start_task) except KeyboardInterrupt: logger.warn("Interrupted!") safe_exit()
def start(start_file=None, debug=False): if start_file: path = os.path.split(os.path.realpath(start_file))[0] logger.debug("Setting directory to " + path) else: # Use Docker setup path = '/external' logger.info("Bot running in Docker mode.") logger.debug("Using Docker setup path, " + path) try: config_file_location = path + '/config/core-config.yaml' with open(config_file_location, 'rb') as config_file: config = yaml.load(config_file) selfbot_mode, token = config['selfbot_mode'], config['token'] except Exception as e: logger.error("Could not determine token /or selfbot mode.") raise e if selfbot_mode: client_type = discord.Client logger.debug("Using standard client (selfbot enabled).") else: client_type = discord.AutoShardedClient logger.debug("Using autosharded client (selfbot disabled).") if debug: log_file = '{}/temp/logs.txt'.format(path) if os.path.isfile(log_file): shutil.copy2(log_file, '{}/temp/last_logs.txt'.format(path)) logging.basicConfig(level=logging.DEBUG, handlers=[ RotatingFileHandler(log_file, maxBytes=1000000, backupCount=3), logging.StreamHandler() ]) def safe_exit(): loop = asyncio.get_event_loop() try: # From discord.py client.run loop.run_until_complete(bot.logout()) pending = asyncio.Task.all_tasks() gathered = asyncio.gather(*pending) except Exception as e: logger.error("Failed to log out. %s", e) try: gathered.cancel() loop.run_until_complete(gathered) gathered.exception() except: pass logger.warn("Bot disconnected. Shutting down...") bot.shutdown() # Calls sys.exit def exception_handler(loop, context): e = context.get('exception') if e and e.__traceback__: traceback_text = ''.join(traceback.format_tb(e.__traceback__)) else: traceback_text = traceback.format_exc() if not traceback_text: traceback_text = '(No traceback available)' error_message = '{}\n{}'.format(e, traceback_text) logger.error("An uncaught exception occurred.\n" + error_message) with open(path + '/temp/error.txt', 'w') as error_file: error_file.write(error_message) logger.error("Error file written.") if bot.is_closed(): safe_exit() loop = asyncio.get_event_loop() bot = get_new_bot(client_type, path, debug) start_task = bot.start(token, bot=not selfbot_mode) loop.set_exception_handler(exception_handler) try: loop.run_until_complete(start_task) except KeyboardInterrupt: logger.warn("Interrupted!") safe_exit()