class BotMaintainance: utils: BotUtils database: BotDatabase profanity_filter: ProfanityFilter def __init__(self, database: BotDatabase, utils: BotUtils): self.utils = utils self.database = database self.profanity_filter = ProfanityFilter() self.profanity_filter.set_censor("^") async def handle_censor_channel(self, message_context: discord.Message) -> None: if await self.database.censored_channels.is_censored( message_context.channel.id): censored_message = self.profanity_filter.censor( message_context.content) if censored_message != message_context.content: await self.utils.messages.replace_message( message_context, censored_message) await self.utils.messages.delete_message(message_context) async def handle_maintainance(self, message_context: discord.Message) -> None: await self.handle_censor_channel(message_context)
def __init__(self, websockets, sound_manager, script_manager, light_manager, me_bot): self.queue = queue.Queue() self.websockets = websockets self.sound_manager = sound_manager self.script_manager = script_manager self.light_manager = light_manager self.voicemod_manager = VoicemodManager() self.me_bot = me_bot self.worker_thread = None self.filter = ProfanityFilter() self.all_sounds = { sound.name: sound.sound_file.path for sound in Sound.objects.filter(private=False) } self.rewards = { "79dcdf6f-7166-4958-8635-ba2233772008": self.sound_reward, "9fca547f-266a-4416-a6fe-f7ede97e4d97": self.shame_cube_reward, "5d02b71f-fceb-4ea8-9cca-9da2d749ebda": self.stream_message_reward, "529d7869-0bea-4503-9eba-0c59e9943782": self.scramble_camera_reward, "63438f03-3cea-4461-b7f3-dce44ba5c7da": self.grant_vip_reward, "ba256777-1cbc-4730-9b5c-0e16a1fd1086": self.revoke_vip_reward, "173af3e8-2bc0-4a52-adff-91c47c3e891a": self.change_light_color_reward, "53bf2ef4-0cbb-4cd6-b4e8-55c1c731c31a": self.light_wave_reward, "ac385b50-5be0-49da-bb6a-c95b9d18d9b2": self.change_background_image_reward, "00e8bfd4-d44d-4e85-8d45-088e2e09c639": self.birthday_reward, "259cdb66-6f68-4647-9671-9b1bb81b483d": self.voicemod_reward } self.start_worker()
def convert_information(user_list): information_list = deque([]) pass_list = dict() for k in user_list.keys(): word_list = make_word_file() secure_random = SystemRandom() first_word = secure_random.choice(word_list).strip() second_word = secure_random.choice(word_list).strip() # check if the password is either too long or contains profanity pf = ProfanityFilter() while len(first_word + second_word) > 10 or len(first_word + second_word) < 6 or \ "'" in first_word or "'" in second_word or pf.is_profane(first_word) or pf.is_profane(second_word): first_word = secure_random.choice(word_list).strip() second_word = secure_random.choice(word_list).strip() pwd = (first_word + second_word + str(randint(0, 9)) + str(randint(0, 9))) pass_list[k] = pwd first_name = user_list[k][0].title() last_name = user_list[k][1].title() full_name = first_name + ' ' + last_name email = k + "@xyz.org" information = pwd + ',' + k + ',' + full_name + ',' + last_name + ',' + first_name + ',' + email information_list.append(information) return information_list, pass_list
def feedback_check(self, description=None): pf = ProfanityFilter() if description: result = str(pf.is_profane(description)).lower() else: result = 'None' return result
def is_input_valid(input_value): value = input_value.lower() pf = ProfanityFilter() for invalid_string in invalid_list: if re.search(invalid_string, value, re.IGNORECASE): return False if not pf.is_clean(value): return False return True
def getRoomCode(stringLength=4): pf = ProfanityFilter() letters = string.ascii_uppercase roomCode = ''.join(random.choice(letters) for i in range(stringLength)) # need to check also whether the room code is already being used while pf.is_profane(roomCode) or db.seders.count_documents( {'roomCode': roomCode}) > 0: roomCode = ''.join(random.choice(letters) for i in range(stringLength)) return roomCode
def get_word_web(start_word): ph = Phyme() perfect_rhymes = ph.get_perfect_rhymes(start_word) pf = ProfanityFilter() #gets one syllable perfect rhymes if 1 in perfect_rhymes: list_single_rhymes = perfect_rhymes[1] else: list_single_rhymes = perfect_rhymes[2] word_web = {} antonyms = [] for word in list_single_rhymes: if pf.is_clean(word): syns = wordnet.synsets(word) associated_words = {} associated_words["v"] = [] #verb associated_words["n"] = [] #noun associated_words["s"] = [] associated_words["a"] = [] #adjective associated_words["r"] = [] associated_words["i"] = [] for l in syns: arr = l.name().split(".") if len(arr[1]) == 1: results = associated_words[arr[1]] results.append(l.lemmas()[0].name()) associated_words[arr[1]] = results if len(l.hypernyms()) > 0: for hyp in l.hypernyms(): arr = hyp.name().split(".") if len(arr[1]) == 1: results = associated_words[arr[1]] results.append(hyp.lemmas()[0].name()) associated_words[arr[1]] = results if len(l.hyponyms()) > 0: for hyp in l.hyponyms(): arr = hyp.name().split(".") if len(arr[1]) == 1: results = associated_words[arr[1]] results.append(hyp.lemmas()[0].name()) associated_words[arr[1]] = results for syn in l.lemmas(): if syn.antonyms(): antonyms.append(syn.antonyms()[0].name()) word_web[word] = associated_words word_web["antonyms"] = antonyms return word_web
def sanitize_strings(food_location, food_activity, food_duration): # This function detects errors in the user input field. profanity = False # Do any strings contain profanity? time = False # Is the time field out of range, or not a number? empty = False # Are any strings empty? < 10 chars long = False # Are any strings > 128 chars max_activity_len = 100 # How long can a field be? max_location_len = 50 min_activity_len = 10 # How short can a description be? min_location_len = 5 # How short can location description be? pf = ProfanityFilter() food_location_sanitized = pf.censor(food_location) food_activity_sanitized = pf.censor(food_activity) if food_location_sanitized != food_location: print("food_location: {}".format(food_location)) print("food_location_sanitized: {}".format(food_location_sanitized)) print("Mismatched. Censored contents detected.") profanity = True if food_activity_sanitized != food_activity: print("food_activity: {}".format(food_activity)) print("food_activity_sanitized: {}".format(food_activity_sanitized)) print("Mismatched. Censored contents detected.") profanity = True if '<script>' in food_activity or 'script' in food_location: print("<script> tag detected in input.") profanity = True if len(food_activity) > max_activity_len or len( food_location) > max_location_len: long = True if len(food_activity) < min_activity_len or len( food_location) < min_location_len: empty = True try: food_duration = float(food_duration) except: print("food_duration: {}".format(food_duration)) print("Not a floating point number.") time = True if time is False and (food_duration > 4 or food_duration < 0.5): print( "Food duration of {} falls out of bounds. ".format(food_duration)) time = True return {'profanity': profanity, 'time': time, 'empty': empty, 'long': long}
def response(self, flow: HTTPFlow): """The full HTTP response has been read.""" data = "" try: data = flow.response.content.decode('utf8', errors='ignore') except UnicodeDecodeError as e: ctx.log.alert(str(e)) with open(FILE_PROFANITY_BLOCKLIST) as f: profanity_filter_override = [line.strip() for line in f.readlines()] pf = ProfanityFilter(extra_censor_list=profanity_filter_override) flow.response.content = bytes(pf.censor(data), 'utf8')
def __init__(self, autovote=False, prompt="Chat \"!v (suggestion)\"!"): Settings.set_logger() self.host = None self.port = None self.auth = None capability = ["tags"] self.chan = None self.nick = None self.sending_message = True self.curr_prompt = prompt self.updated = mp.Value(c_bool, True) self.autovote = autovote self.log_results = True self.skip_voting = False self.random_collection = False self.collecting_time = 120 self.voting_time = 120 self.stream_delay = 2 self.vote_cooldown = 120 self.commands_collected_max = 5 self.commands_collected = [] self.votes_collected = [] self.prompt = prompt self.min_msg_size = 5 self.max_msg_size = 200 with open(os.path.join(os.path.dirname(os.path.abspath(__file__)), "blacklist.txt"), "r") as f: censor = [l.replace("\n", "") for l in f.readlines()] self.pf = ProfanityFilter(custom_censor_list=censor) logging.debug("Setting settings.") Settings(self) logging.debug("Creating Database instance.") self.db = Database(self.chan) logging.debug("Creating TwitchWebsocket object.") self.ws = TwitchWebsocket(host=self.host, port=self.port, chan=self.chan, nick=self.nick, auth=self.auth, callback=self.message_handler, capability=capability, live=True) self.curr_mode = mp.Value(c_char, b's') logging.debug("Starting Websocket connection.") self.ws.start_blocking()
async def adopt_pet(self, context): # create instance of the user who wishes to feed their pet pet_owner = Users(context.author.id) if pet_owner.find_pet() == 1: msg = await context.send("Failed! You already have a pet!") await asyncio.sleep(5) await msg.delete() return intro_msg = "Welcome to the **Pet Shelter**!\n\nPlease enter your desired pet name now:" # embed intro message, then overwrite the variable with the actual message object em = discord.Embed(description=intro_msg, colour=0x607D4A) em.set_thumbnail( url= "https://cdn.discordapp.com/emojis/746904102650249296.gif?size=128" ) await context.send(embed=em) # wait for user's pet name entry # helper to check if it's the author that it's responding. def is_author(m): return m.author == context.author and m.channel == context.channel pet_name = await self.client.wait_for("message", check=is_author, timeout=60) # remove everything except alphanumerics from the user's pet name entry pet_name = re.sub(r"\W+", "", pet_name.clean_content) # create an object to scan profanity pf = ProfanityFilter() # while the pet name entry has profanity, prompt user to re-enter a name while not pf.is_clean(pet_name): await context.send( "Pet name has profanity! Please enter a new one now:") # wait for user's new pet name entry pet_name = await self.client.wait_for("message", check=is_author, timeout=60) # remove everything except alphanumerics from the user's pet name entry pet_name = re.sub(r"\W+", "", pet_name.clean_content) adoption_msg = pet_owner.add_pet(pet_name[:15]) # embed confirmation message em = discord.Embed(description=adoption_msg, colour=0x607D4A) em.set_thumbnail( url="https://cdn.discordapp.com/emojis/563872560308289536.png?v=1") await context.send(embed=em)
def post_check(): pf = ProfanityFilter() if 'user_data' in session and len(request.form['message'].split( ' ')) / session['user_data']['public_repos'] <= 5: doc = { 'fname': request.form['fname'], 'lname': request.form['lname'], 'message': pf.censor(request.form['message']) } collection.insert_one(doc) return render_template('message.html', message='You successfully posted') return render_template( 'message.html', message= 'You failed to post due to too many words for your repository count')
def getAnalytics(transcript): participants_speaking_times = {} num_awkward_silences = 0 # 10+ seconds of silence #current_start_time = datetime.datetime.strptime('00:00:03.629', '%H:%M:%S.%f') # initialize to random value current_end_time = None # initialize to random value participants_polarity_subjectivity = {} # key: participant. value: [sum_polarity, sum_subjectivity, total_lines] num_profane_words = 0 pf = ProfanityFilter() for line in transcript: current_start_time = datetime.datetime.strptime(line.start, '%H:%M:%S.%f') # datetime.datetime.strptime(line[0:12], '%H:%M:%S.%f') if current_end_time != None and current_start_time - current_end_time > datetime.timedelta(seconds=10): # (current start) - (previous end) num_awkward_silences += 1 current_end_time = datetime.datetime.strptime(line.end, '%H:%M:%S.%f') current_speaking_time = current_end_time - current_start_time participant_end_index = line.text.find(': ') if participant_end_index == -1: continue current_participant = line.text[0:participant_end_index] if current_participant in participants_speaking_times.keys(): participants_speaking_times[current_participant] += current_speaking_time.total_seconds() else: participants_speaking_times[current_participant] = current_speaking_time.total_seconds() #current_speaking_time = datetime.datetime.strptime() #print(line) #for word in question_words: # Sentiment analysis testimonial = TextBlob(line.text[participant_end_index + 2:]) if current_participant in participants_polarity_subjectivity.keys(): participants_polarity_subjectivity[current_participant][0] += testimonial.sentiment.polarity participants_polarity_subjectivity[current_participant][1] += testimonial.sentiment.subjectivity participants_polarity_subjectivity[current_participant][2] += 1 else: participants_polarity_subjectivity[current_participant] = [testimonial.sentiment.polarity, testimonial.sentiment.subjectivity, 1] # Check for profanity if pf.censor(line.text).find('*') != -1: for word in line.text.split(' '): if pf.censor(word).find('*') != -1: num_profane_words += 1 participants_average_sentiments = {} # key: participant. value: [average_polarity, average_subjectivity] for participant in participants_polarity_subjectivity.keys(): participants_average_sentiments[participant] = [participants_polarity_subjectivity[participant][0]/participants_polarity_subjectivity[participant][2], participants_polarity_subjectivity[participant][1]/participants_polarity_subjectivity[participant][2]] return participants_speaking_times, num_awkward_silences, participants_average_sentiments, num_profane_words
def main(argv): with open(sys.argv[1], 'r') as inputfile, open(sys.argv[2], 'a') as outputfile: df = pd.read_csv(inputfile) #return df #def check_profanity(df): pf = ProfanityFilter() profanity = [] for row in df['body']: prof = pf.is_clean(row) profanity.append(prof) df['profanity'] = profanity df.to_csv(outputfile)
def submit(): pf = ProfanityFilter(no_word_boundaries=True) serial = request.form['serial'] name = request.form['name'] location = request.form['location'] amount = request.form['amount'] #Since this is an open text field accessible on a public website, #some form of profanity check is needed if pf.is_profane(name): return redirect("/warehouse") else: db = Database() db.addItem(serial, name, location, amount) return redirect("/warehouse")
def __init__(self): # Initialize variables to None self.host = None self.port = None self.chan = None self.nick = None self.auth = None capability = ["tags"] self.access_token = None self.cooldown = 0 self.last_command_time = 0 self.allowed_ranks = [] self.allowed_users = [] self.custom_prompt = "" with open("blacklist.txt", "r") as f: censor = [l.replace("\n", "") for l in f.readlines()] self.pf = ProfanityFilter(custom_censor_list=censor) # Create an Api instance to connect to AI Dungeon 2. logging.debug("Creating API instance.") self.api = API(self) # Update variables logging.debug("Setting settings.") Settings(self) # Create a Database instance for storing which users do not want to be whispered logging.debug("Creating Database instance.") self.db = Database(self.chan) # Get the session_id self.session_id = self.api.get_session_id() # Create Websocket object logging.debug("Creating TwitchWebsocket object.") self.ws = TwitchWebsocket(host=self.host, port=self.port, chan=self.chan, nick=self.nick, auth=self.auth, callback=self.message_handler, capability=capability, live=True) # Start a blocking websocket connection logging.debug("Starting Websocket connection.") self.ws.start_bot()
async def update_pet_name(self, context): # create instance of the user who wishes to change their pet name pet_owner = Users(context.author.id) # retrieve pet name pet_name = pet_owner.get_user_pet_name() intro_msg = f"Welcome to the **Pet Shelter**!\n\nPlease enter your new name for **{pet_name}** now:" # embed intro message, then overwrite the variable with the actual message object em = discord.Embed(description=intro_msg, colour=0x607D4A) em.set_thumbnail( url= "https://cdn.discordapp.com/emojis/560065150489722880.png?size=128" ) await context.send(embed=em) # wait for user's pet name entry # helper to check if it's the author that it's responding. def is_author(m): return m.author == context.author and m.channel == context.channel pet_name = await self.client.wait_for("message", check=is_author, timeout=60) # remove everything except alphanumerics from the user's pet name entry pet_name = re.sub(r"\W+", "", pet_name.clean_content) # create an object to scan profanity pf = ProfanityFilter() # while the pet name entry has profanity, prompt user to re-enter a name while not pf.is_clean(pet_name): await context.send( "Pet name has profanity! Please enter a new one now:") # wait for user's new pet name entry pet_name = await self.client.wait_for("message", check=is_author, timeout=60) # remove everything except alphanumerics from the user's pet name entry pet_name = re.sub(r"\W+", "", pet_name.clean_content) confirmation_msg = pet_owner.update_user_pet_name(pet_name[:15]) # embed confirmation message em = discord.Embed(description=confirmation_msg, colour=0x607D4A) em.set_thumbnail( url="https://cdn.discordapp.com/emojis/746904102650249296.gif?v=1") await context.send(embed=em)
def _skip_status(self, status): '''filters out statuses that are profane or retweets''' if status.text.lower().startswith('rt'): return True if ProfanityFilter().is_profane(status.text): return True return False
def isTrash(text): # has profane vocab pf=ProfanityFilter() if pf.is_profane(text): return True # trash filtering urls=re.findall(URL_REGEX, text) if len(urls)> 1: return True # > 3 hashtags hashtags=re.findall(r"#\S+",text) if len(hashtags)> 3: return True # > 2 usernames users=re.findall(r"@\S+",text) if len(users)> 2: return True return False
def profanityFilter(): pf = ProfanityFilter() def generator(revision): has_profanity = pf.has_bad_word(revision['source']['sentence']) or\ pf.has_bad_word(revision['target']['sentence']) if not has_profanity: yield revision return generator
class Swearjar(object): defaultSwearIncrement = 1 def __init__(self): self.swearlist = swears self.defaultMultiplier = 0.25 self.storage = BatchPostgres() self.userSwearCountCache = {} self.filter = ProfanityFilter() def hasSwear(self, text): return self.filter.is_profane(text) # for token in doc: # print(f'{token}:' # f'censored={token._.censored}, ' # f'is_profane={token._.is_profane}, ' # f'original_profane_word={token._.original_profane_word}' # ) #return set(x.lower() for x in text.split()) & self.swearlist def getSwearList(self): return self.swears def addToSwearJar(self, user, swearIncrement = defaultSwearIncrement): swearCount = self.storage.incrementSwearCount(user, swearIncrement) self.userSwearCountCache[user] = swearCount return swearCount def checkSwearJar(self, user): if user in self.userSwearCountCache: return self.userSwearCountCache[user] return self.storage.getSwearCount(user) def getMoneyOwed(self, user): return self.swearsToDollarAmount(self.checkSwearJar(user)) def addNewUser(self, userinfo): return self.storage.addNewUser(userinfo) def getUserData(self, user): return self.storage.getUserData(user) def getAllBalances(self): swearCounts = self.storage.getAllUserSwearCounts() balances = "Balances: \n" for userSwearCount in swearCounts: balances += "%s: $%.2f\n" % (userSwearCount[1], self.swearsToDollarAmount(userSwearCount[2])) self.userSwearCountCache[userSwearCount[0]] = userSwearCount[2] return balances def swearsToDollarAmount(self, swears): money = swears * self.defaultMultiplier return math.ceil(money * 100) / 100
def create_message(subject: str = "Insight message!", main_body: str = None) -> str: """ Combines the given subject and main body to formulate an email message. Returns a str capable of being transmitted using smtplib and gmail. Uses a profanity filter to censor offensive content. :param subject: The subject (title/header) of the email. :param main_body: The main body of the email. :return: The constructed email to be sent. """ # Check for type errors: if type(subject) is not str: # Check the email subject raise TypeError("Expected type <class 'str'> got type ", type(subject), " for subject") if type(main_body) is not str: # Check the email body raise TypeError("Expected type <class 'str'> got type ", type(main_body), " for main_body") message = f'Subject: {subject}\n{main_body}' # Bundle the contents in the appropriate format profanity_filter = ProfanityFilter() # Create ProfanityFilter object message = profanity_filter.censor( message) # Censor offensive content from the message return message # Return the constructed email
def clean_tweets(username): """A program to clean up all your twitter of any profanity.""" # Get API keys # keys_path = os.getcwd() + "/clean/twitter/secrets.json" with open(keys_path, 'r') as filein: try: keys = json.load(filein) except ValueError: # simplejson.decoder.JSONDecodeError print("Error_JSON could not read json file") exit(1) # Authorization to consumer key and consumer secret auth = tweepy.OAuthHandler(keys["consumer_key"], keys["consumer_secret"]) # Access to user's access key and access secret auth.set_access_token(keys["access_token"], keys["access_token_secret"]) # Calling api api = tweepy.API(auth) try: redirect_url = auth.get_authorization_url() except tweepy.TweepError: print ('Error! Failed to get request token.') # Get tweets # # 200 tweets to be extracted number_of_tweets=200 tweets = api.user_timeline(screen_name=username, tweet_mode='extended', count = number_of_tweets) # profanity filter pf = ProfanityFilter() # Printing the tweets for tweet in tweets: if not(pf.is_clean(tweet.full_text)): print(tweet.full_text + "\n")
def profanity_check(cleaned_comment): custom_profanity_list = config.get('ckan.comments.profanity_list', []) if custom_profanity_list: pf = ProfanityFilter( custom_censor_list=custom_profanity_list.splitlines()) else: # Fall back to original behaviour of built-in Profanity bad words list # combined with bad_words_file and good_words_file more_words = load_bad_words() whitelist_words = load_good_words() pf = ProfanityFilter(extra_censor_list=more_words) for word in whitelist_words: pf.remove_word(word) return pf.is_profane(cleaned_comment)
# coding: utf-8 import re import yaml from difflib import SequenceMatcher YAML_FILE = "story/story_data.yaml" from profanityfilter import ProfanityFilter pf = ProfanityFilter() def console_print(text, width=75): last_newline = 0 i = 0 while i < len(text): if text[i] == "\n": last_newline = 0 elif last_newline > width and text[i] == " ": text = text[:i] + "\n" + text[i:] last_newline = 0 else: last_newline += 1 i += 1 print(text) def get_similarity(a, b): return SequenceMatcher(None, a, b).ratio() def get_num_options(num):
from django.db.models import Q from django.shortcuts import render, get_object_or_404, redirect from profanityfilter import ProfanityFilter from .forms import CaptchaForm, CommentForm, SearchForm from .models import Recipe, RecipeComment, WrittenPiece, WrittenPieceComment pf = ProfanityFilter(extra_censor_list=open('Coffee/profanity_list.txt', 'r').read().split('\n')) def index(request): return render(request, 'Coffee/index.html') def recipe_home(request): recipe_list = Recipe.objects.order_by('title') return render(request, 'Coffee/recipe_home.html', {'recipe_list': recipe_list}) def recipe(request, recipe_id): obj = get_object_or_404(Recipe, pk=recipe_id) context = { 'recipe': obj, 'likeForm': CaptchaForm(), 'commentForm': CommentForm, 'comments_ordered': obj.comments.all().order_by('-timestamp') } return render(request, 'Coffee/recipe.html', context)
# In[2]: pip install profanityfilter # In[3]: from profanityfilter import ProfanityFilter # In[4]: pf = ProfanityFilter() # In[5]: pf.censor("That's bullshit!") # In[6]: pf.set_censor("@") # In[7]:
_E='data' _D='ascii' _C=None _B=True _A=False import discord,base64 from operator import itemgetter import requests,random,asyncio,psutil,urllib,datetime,random,sys,traceback,urllib.parse,urllib.request from json import loads from discord.ext.commands import has_permissions,MissingPermissions from discord.ext import commands from discord.utils import find import time,redis,os,json,subprocess from profanityfilter import ProfanityFilter import homoglyphs as hg pf=ProfanityFilter() pf.set_censor('#') homoglyphs=hg.Homoglyphs(languages={'en'},strategy=hg.STRATEGY_LOAD) userspecific=_B yesemoji='👍' noemoji='👎' numberemojis=['1️⃣','2️⃣','3️⃣','4️⃣'] categories={_W:'9',_c:_z,_d:'11',_e:'12',_f:'13',_g:'14',_h:'15',_i:'16',_j:'17',_k:'18',_l:'19',_m:'20',_n:'21',_o:'22',_p:'23',_q:'24',_r:'25',_s:'26',_t:'27',_u:'28',_v:'29',_w:'30',_x:'31',_y:'32'} TOKEN=os.getenv('bottoken') if TOKEN==_C:TOKEN=input('Token Please:') redisurl=os.getenv('REDIS_URL') if redisurl==_C:redisurl=input('Please enter the REDIS URL:') dbl_token=os.getenv('DBL_TOKEN') HEROKU_RELEASE_CREATED_AT=os.getenv('HEROKU_RELEASE_CREATED_AT') HEROKU_RELEASE_VERSION=os.getenv('HEROKU_RELEASE_VERSION') HEROKU_SLUG_COMMIT=os.getenv('HEROKU_SLUG_COMMIT')
# coding: utf-8 import re from difflib import SequenceMatcher import yaml from profanityfilter import ProfanityFilter YAML_FILE = "story/story_data.yaml" with open("story/censored_words.txt", "r") as f: censored_words = [l.replace("\n", "") for l in f.readlines()] pf = ProfanityFilter(custom_censor_list=censored_words) def console_print(text, width=75): last_newline = 0 i = 0 while i < len(text): if text[i] == "\n": last_newline = 0 elif last_newline > width and text[i] == " ": text = text[:i] + "\n" + text[i:] last_newline = 0 else: last_newline += 1 i += 1 print(text) def get_similarity(a, b):
def setUp(self): self.custom_pf = ProfanityFilter( custom_censor_list=["chocolate", "orange"]) self.extended_pf = ProfanityFilter( extra_censor_list=["chocolate", "orange"])
import yorm from yorm.types import List, Object from profanityfilter import ProfanityFilter import log profanity_filter = ProfanityFilter() profanity_filter.remove_word("damn") @yorm.attr(items=List.of_type(Object)) @yorm.sync("data/cache/{self.name}.yml", auto_resolve=True) class Cache: SIZE = 100 def __init__(self, filtered=True): self.items = [] self.disabled = False self.filtered = filtered @property def name(self): return 'filtered' if self.filtered else 'unfiltered' def add(self, **kwargs): if self._skip_cache(kwargs): return log.info("Caching: %s", kwargs) self.items.insert(0, kwargs)