def post(self, request, pk, *args, **kwargs): profile = Profile.objects.get(user=request.user) try: post = Post.objects.get(pk=pk) except: return render(request, 'main/not_found.html') if not profile == post.author: messages.warning(request, 'You can only update your own posts.') form = PostForm(request.POST, request.FILES) # Custom profanity words custom_badwords = CustomProfanity.objects.values_list('bad_word', flat=True) profanity.load_censor_words(custom_badwords) valid = form.is_valid() if profanity.contains_profanity(form.cleaned_data.get('content')): custom_profanity_error = 'Please remove any profanity/swear words. (Added by an admin. Contact an admin if you believe this is wrong.)' valid = False form.errors['content'] = custom_profanity_error if valid: post.content = form.cleaned_data.get('content') if form.cleaned_data.get('image'): post.image = form.cleaned_data.get('image') post.save() return redirect(request.POST.get('referer')) context = {'form': form, 'post': post} return render(request, 'posts/update.html', context)
def __init__(self, conf): self.name = "better_profanity" from better_profanity import profanity profanity.load_censor_words() self.profanity = profanity super().__init__(conf)
async def meme(ctx, args=""): subreddit = "" if len(shlex.split(args)) >= 1: subreddit = shlex.split(args)[0] memejson = json.loads( requests.get("https://meme-api.herokuapp.com/gimme/" + subreddit).text) if not "url" in memejson: await ctx.send("The subreddit you gave me is currently not available.") else: i = 0 while memejson["nsfw"] == True: memejson = json.loads( requests.get("https://meme-api.herokuapp.com/gimme/" + args).text) i += 1 if i == 10: await ctx.send( "Clean memes were not found after 10 tries, please try again." ) return profanity.load_censor_words() await ctx.send( embed=discord.Embed(title=profanity.censor(memejson["title"]), url=memejson["postLink"]).set_image( url=memejson["url"]))
def clean_transcript(link): video_id=gen_id(link) transcript_list = YouTubeTranscriptApi.list_transcripts(video_id) transcript = transcript_list.find_generated_transcript(['en']) transcript=transcript.fetch() profanity.load_censor_words() for i in transcript: i['text']=profanity.censor(i['text']) title=get_title(link) if not os.path.exists(((os.path.join(SAVE_TRANSCRIPT_PATH,title))+'.txt')): file1=open(((os.path.join(SAVE_TRANSCRIPT_PATH,title))+'.txt'),'a') for line in transcript: text=line['text'] start=line['start'] duration=line['duration'] inf=[text,start,duration] file1.writelines(str(inf)) file1.write('\n') file1.close() print('Transcript saved to',((os.path.join(SAVE_TRANSCRIPT_PATH,title))+'.txt')) else: print('File Already Exists!') print() return transcript
def test_unicode_censorship_5(self): bad_text = "Маргаре́та (э́то бы́ло её настоя́щее и́мя) родила́сь в 1876 (ты́сяча восемьсо́т се́мьдесят шесто́м) году́ в Нидерла́ндах. В 18 (восемна́дцать) лет Маргаре́та вы́шла за́муж и перее́хала в Индоне́зию. Там она́ изуча́ла ме́стную культу́ру и та́нцы." censored_text = "Маргаре́та (э́то бы́ло её настоя́щее и́мя) родила́сь в 1876 (ты́сяча восемьсо́т се́мьдесят ****) году́ в ****. В 18 (восемна́дцать) лет Маргаре́та вы́шла за́муж и **** в Индоне́зию. Там она́ изуча́ла ме́стную культу́ру и ****." profanity.load_censor_words( ["шесто́м", "Нидерла́ндах", "перее́хала", "та́нцы"]) self.assertEqual(profanity.censor(bad_text), censored_text)
def test_custom_wordlist(self): custom_badwords = ["happy", "jolly", "merry"] profanity.load_censor_words(custom_badwords) # make sure it doesn't find real profanity anymore self.assertFalse(profanity.contains_profanity("F**k you!")) # make sure it finds profanity in a sentence containing custom_badwords self.assertTrue(profanity.contains_profanity("Have a merry day! :)"))
async def compliment(ctx, args=""): profanity.load_censor_words() await ctx.send( profanity.censor([ e.text_content() for e in html.fromstring( requests.get("http://toykeeper.net/programs/mad/compliments", ).text).xpath("//h3") ][0].replace("\n", "")))
def song_name(self): profanity.load_censor_words() profanity.add_censor_words(self.custom_profanity) title = str(Song.find_song(self.songName).title) title = profanity.censor(str(title)) return " ".join(self.process_word(word) for word in title.split())
def test_whitelist_words(self): bad_text = "I have boobs" censored_text = "I have ****" self.assertEqual(profanity.censor(bad_text), censored_text) # Whitelist the word `boobs` profanity.load_censor_words(whitelist_words=["boobs"]) self.assertEqual(profanity.censor(bad_text), bad_text)
def taboo_censor(request): if __name__ == "__main__": profanity.load_censor_words() # text = #Once Blog Implemented, string of text that gets submitted will be scanned here text = '' text = profanity.censor(text)
def post(self, request, pk, *args, **kwargs): user = request.user profile = Profile.objects.get(user=user) group = Group.objects.get(pk=pk) # Custom profanity words custom_badwords = CustomProfanity.objects.values_list('bad_word', flat=True) profanity.load_censor_words(custom_badwords) if 'submit_post_form' in request.POST: post_form = PostForm(request.POST, request.FILES) comment_form = None valid = post_form.is_valid() if profanity.contains_profanity( post_form.cleaned_data.get('content')): custom_profanity_error = 'Please remove any profanity/swear words. (Added by an admin. Contact an admin if you believe this is wrong.)' valid = False post_form.errors['content'] = custom_profanity_error if valid: post_instance = post_form.save(commit=False) post_instance.author = profile post_instance.group = group post_instance.save() return redirect('groups:view-group', pk=pk) elif 'submit_comment_form' in request.POST: comment_form = CommentForm(request.POST) post_form = None valid = comment_form.is_valid() if profanity.contains_profanity( comment_form.cleaned_data.get('body')): custom_profanity_error = 'Please remove any profanity/swear words. (Added by an admin. Contact an admin if you believe this is wrong.)' valid = False comment_form.errors['body'] = custom_profanity_error if valid: post_id = request.POST.get("post_id") comment_instance = comment_form.save(commit=False) comment_instance.user = profile comment_instance.post = Post.objects.get(id=post_id) comment_instance.save() return redirect(request.headers.get('Referer')) group_posts = Post.objects.filter(group=group) context = { 'profile': profile, 'group': group, 'posts': group_posts, 'post_form': post_form, 'comment_form': comment_form } return render(request, 'groups/view.html', context)
async def on_message(self, message: discord.Message): if message.author.bot: return message_guild: discord.Guild = message.guild list_to_censor = get_censored_words(message_guild.id) profanity.load_censor_words(list_to_censor) ignored_users = get_ignored_users(message_guild.id) if profanity.contains_profanity(message.content): for user in ignored_users: if message.author.id == user: pass else: await message.channel.send("Blacklisted word!") await message.delete() else: pass zalgo_enabled = is_zalgo_enabled(message_guild.id) zalgo_re = re.compile(r"[\u0300-\u036F\u0489]") if zalgo_enabled: if zalgo_re.search(message.content): await message.delete() await message.channel.send("Zalgo detected!") else: pass invites = is_invite_enabled(message_guild.id) if invites: if 'discord.gg' in message.content: await message.delete() await message.channel.send("Invites are not allowed!") else: pass domains = is_domains_enabled(message_guild.id) domain_blacklist = domains_list(message_guild.id) if domains: for domain in domain_blacklist: if str(domain) in message.content: await message.delete() await message.channel.send("This domain is blacklisted!") break else: continue else: pass
def remove_profanity(name: str) -> str: profanity.load_censor_words(whitelist_words=PROFANITY_WHITELIST) profanity.add_censor_words( ['supremacia ariana', 'fisting', 'retarded', 'erection']) name = profanity.censor(name, ' ').strip() name = re.sub( ' +', ' ', name) # We just replaced profanity with a space so compress spaces. return name
def handleMessage(msg): print('Message: ' + msg) profanity.load_censor_words() msg = profanity.censor(msg) message = History(message=msg) db.session.add(message) db.session.commit() send(msg, broadcast=True)
def profanity_word(text, lang): if lang == 'vi': profanity.load_censor_words_from_file('banned_word.text') if profanity.contains_profanity(text): return profanity.censor(text, '-') return text profanity.load_censor_words() if profanity.contains_profanity(text): return profanity.censor(text, '*') return text
def message(data): # profanity filter print(data) profanity.load_censor_words() # print('msg ', profanity.censor(data['msg']), 'username ', data['username'], 'timestamp ', strftime('%d-%b %I:%M%p', localtime()), "room ", data['room']) send( { 'msg': profanity.censor(data['msg']), 'username': data['username'], 'timestamp': strftime('%d-%b %I:%M%p', localtime()) }, room=data['room'])
def compare(): startTime1 = time.time() filter = profanity_filter.ProfanityFilter() print(filter.censor("Damnnn you")) endTime1 = time.time() print("Time for 1st filter: " + str(endTime1 - startTime1)) startTime2 = time.time() profanity.load_censor_words() print(profanity.censor("D*mn you")) endTime2 = time.time() print("Time for 2nd filter: " + str(endTime2 - startTime2))
def censorText(): text = request.json.get('text', '') whitelist = request.json.get('white_list', []) censorlist = request.json.get('censor_list', []) profanity.load_censor_words(whitelist_words=whitelist) profanity.add_censor_words(censorlist) words = text.split(' ') censored_text = '' for word in words: censored_text += ( " " + (profanity.censor(word, censor_char=random.choice(emojis)))) return {"censored_text": censored_text}
def process_lyrics(self): # making the assumption that the lyric input is one long string # and that nothing in the lyrics contains an asterisk # (actually because many censors use asterisks that might still work # if the music was already censored to an extent) profanity.load_censor_words() profanity.add_censor_words(self.custom_profanity) lyrics = Song.find_song(self.songName).lyrics print(lyrics) lyrics = profanity.censor(str(lyrics)) return " ".join(self.process_word(word) for word in lyrics.split())
def __check_profanity(self, content): profanity.load_censor_words(whitelist_words=['len']) if profanity.contains_profanity(content): censored_lines = profanity.censor(content).splitlines() content_lines = content.splitlines() if len(censored_lines) != len(content_lines): self.warn_results.append("Contains profanity.") else: for idx in range(0, len(content_lines)): if content_lines[idx] != censored_lines[idx]: if content_lines[idx].startswith('- "pkg:'): continue self.warn_results.append(f"Contains profanity in line #{idx}: [{censored_lines[idx]}]") return
def user(username): profanity.load_censor_words() user = User.query.filter_by(username=username).first_or_404() page = request.args.get('page', 1, type=int) user_game = User_game.query.filter_by(user_id=user.id).order_by( desc(User_game.game_id)).paginate(page, Config.GAMES_PER_PAGE, False) page_info = { "total": user_game.pages, "current": user_game.page, "item_total": user_game.total } if user_game.has_next: next_url = url_for("user", username=user.username, page=user_game.next_num) last_url = url_for("user", username=user.username, page=page_info["total"]) else: next_url = None last_url = None if user_game.has_prev: prev_url = url_for("user", username=user.username, page=user_game.prev_num) first_url = url_for("user", username=user.username, page=1) else: prev_url = None first_url = None form = EditGameForm() return render_template("user.html", user=user, user_game=user_game.items, page=page_info, form=form, next_url=next_url, prev_url=prev_url, first_url=first_url, last_url=last_url, profanity=profanity)
def removeBadWords(filename): fi = open(filename, "r") allwords = fi.readlines() fi.close() profanity.load_censor_words() badwords = profanity.CENSOR_WORDSET #print(badwords) count = 0 censorwords(filename) for line in allwords: #print(line,"lines") for word in line.split(): #print(word) if (word in badwords): count += 1 return count
def __init__(self, city=None): self.city = city self.structure_file = '{}/config/result.structure.cfg'.format( os.path.pardir) self.config = ConfigParser() self.suburb_info_json = db_connecter.dataLoader( self.city).load_city_suburb_coordinates() self.city_scenarios = [ 'covid-19', 'young_twitter_preference', 'tweet_density' ] self.suburb_scenarios = ['income', 'education', 'migration'] self.load_city_structure() self.load_suburb_structure() self.all_user_ids = set() self.covid_user_ids = [] self.sentiment_analyser = SentimentIntensityAnalyzer() self.api = tweepy.API(get_twitter_auth(), wait_on_rate_limit=True, wait_on_rate_limit_notify=True) profanity.load_censor_words()
def post(self, request, pk, *args, **kwargs): profile = Profile.objects.get(user=request.user) # Custom profanity words custom_badwords = CustomProfanity.objects.values_list('bad_word', flat=True) profanity.load_censor_words(custom_badwords) form = CommentForm(request.POST) valid = form.is_valid() if profanity.contains_profanity(form.cleaned_data.get('body')): custom_profanity_error = 'Please remove any profanity/swear words. (Added by an admin. Contact an admin if you believe this is wrong.)' valid = False form.errors['body'] = custom_profanity_error try: post = Post.objects.get(pk=pk) except: return redirect(request, 'main/not_found.html') if valid: comment_instance = form.save(commit=False) comment_instance.user = profile post = Post.objects.get(pk=pk) comment_instance.post = post comment_instance.save() notification = Notifications.objects.create( notification_type=2, from_user=request.user, to_user=post.author.user, post=post) return redirect(request.headers.get('Referer')) else: context = {'post': post, 'profile': profile, 'form': form} return render(request, 'posts/view_post.html', context)
def setup_bot(bot): @bot.check async def globally_block_bots(ctx): return not ctx.author.bot @bot.check async def global_blacklist(ctx): blacklists = ctx.bot.blacklists return not any( ( ctx.author.id in blacklists["users"], ctx.guild.id in blacklists["servers"], ctx.channel.id in blacklists["channels"], ) ) pool = Redis() loop = asyncio.get_event_loop() rds = os.environ.get("REDISCLOUD_URL", None) if rds is not None and rds: loop.run_until_complete(pool.connect_pool_url(rds)) loop.run_until_complete( pool.connect_pool( bot.config["REDIS"]["host"], bot.config["REDIS"]["port"], pw=bot.config["REDIS"].get("password", None), ) ) bot.db = pool bot.cdn = CDN( bot.config["CDN"]["host"], bot.config["CDN"]["space"], bot.config["CDN"]["client"], bot.config["CDN"]["secret"], ) profanity.load_censor_words() setattr(bot, "profanity", profanity)
async def profanity_check(bot, message): _data = await bot.config.find_one({"_id": message.guild.id}) msg = str(message.content).lower() try: if _data['profanity_toggle']: # check if profanity is enabled try: if _data['words']: # profanity.load_censor_words(_data['words']) except (TypeError, KeyError): profanity.load_censor_words_from_file( bot.path + '/assets/profanity.txt') # anti-profanity if await check_for_profanity(bot, msg): if await profanity_command_check(bot, message): return False # make sure that they're not adding a word # in that case then don't do stuff try: await message.delete() except (discord.NotFound, discord.Forbidden): pass em = SaturnEmbed( description=f"{WARNING} That word is not allowed in **{message.guild}**!", colour=GOLD) await message.channel.send(embed=em) await automod_log( bot, message, "warning", f"Said || {message.content} || which contains profanity") return True return False except (TypeError, KeyError): return False
def new(): form = LinkForm() if form.validate_on_submit(): link = Link(link=form.link.data, title=form.title.data, name=form.name.data, desc=form.desc.data, image=form.image.data, url='https://www.youtube.com/watch?v=dQw4w9WgXcQ') with open('bad-words.txt', 'r') as f: wordlist = [i.strip() for i in f.readlines()] profanity.load_censor_words() profanity.add_censor_words(wordlist) if profanity.contains_profanity( f'{link.link} {link.title} {link.name} {link.desc}'): flash( 'NOTE: EXCESSIVE PROFANITY IS NOT PERMITTED ON THIS PLATFORM. CONTINUED EXCESSIVE PROFANITY MAY RESULT IN AN IP BAN FROM THIS PLATFORM', 'danger') link.link = profanity.censor(link.link, 'X') link.title = profanity.censor(link.title, 'X') link.name = profanity.censor(link.name, 'X') link.desc = profanity.censor(link.desc, 'X') link.link = link.link.replace(' ', '-') link.link = re.sub(r'[^a-zA-Z0-9-]', '-', link.link) # ensure uniqueness of link existinglink = Link.query.filter_by(link=link.link).first() while existinglink: link.link = link.link + 'X' existinglink = Link.query.filter_by(link=link.link).first() db.session.add(link) db.session.commit() # getting config details with open('config.json') as f: data = json.load(f) flash(f"Created link {data['domain']}/l/{link.link}", 'success') return redirect(url_for('home')) return render_template('new.html', form=form, legend='New Link')
def __init__(self): profanity.load_censor_words() with open('responses.json', 'r') as f: self.responses = json.load(f) FILE = os.path.join(os.path.dirname(__file__), '..', 'data.pth') # if no cuda map_location = None if torch.cuda.is_available() else 'cpu' data = torch.load(FILE, map_location=map_location) self.input_size = data["input_size"] self.hidden_size = data["hidden_size"] self.output_size = data["output_size"] self.all_words = data["all_words"] self.tags = data["tags"] self.model_state = data["model_state"] self.model = NeuralNet( self.input_size, self.hidden_size, self.output_size).to(ChatBot.device) self.model.load_state_dict(self.model_state) # load learned parameters self.model.eval()
def setUp(self): self.maxDiff = None # Pre-load CENSOR_WORDSET profanity.load_censor_words()
def test_unicode_vietnamese_2(self): bad_text = "Con chó sủa gâu gâu!" censored_text = "Con chó sủa **** ****!" profanity.load_censor_words(["gâu"]) self.assertEqual(profanity.censor(bad_text), censored_text)