def redirect_(): try: person = request.form['Search_user'] #for i in bad_names: # if i in person.lower(): # _user = request.cookies.get('username') # _person = request.cookies.get('person') # _reason = request.cookies.get('reason') # return render_template('homepage.html', USERNAME = _user, PERSON = _person, REASON = _reason, err_msg = "Usernames do not include bad words!") if person in db: if person == request.cookies.get('username'): return redirect(url_for('_home_')) if 'AboutUser' in db[person]: return render_template('visitor_view.html', USERNAME=profanity.censor(person), ABOUT=db[person]['AboutUser']) else: return render_template('visitor_view.html', USERNAME=profanity.censor(person)) else: return redirect(url_for('_home_')) except: _user = request.cookies.get('username') _person = request.cookies.get('person') _reason = request.cookies.get('reason') return render_template('homepage.html', USERNAME=profanity.censor(_user), PERSON=_person, REASON=_reason)
def submit(): form = post_form() code_name = profanity.censor(form.post_alias.data.strip(" ")) post_content = profanity.censor(form.post_content.data.strip(" ")) tags = profanity.censor(form.tags.data.strip(" ")).split(" ") if form.validate_on_submit(): code_name = code_name post_content = post_content post_date = datafunctions.get_pst_time() post_colour = form.colours.data for increment in db.increments.find({'type': "post_increments"}): post_id = increment['post_increments'] new_post_increment = int(post_id) + random.randint(1,50) db.increments.update_one({'type': "post_increments"}, {'$set': {'post_increments': new_post_increment}}) this_post = { "code_name": code_name, "content": post_content, "date_posted": post_date, "post_id": new_post_increment, "colour": post_colour, "comments": [], "tags":tags } db.posts.insert_one(this_post) return redirect(url_for('post', post_number = new_post_increment)) posts_json_data = db.posts.find() return render_template("index.html", post_data=posts_json_data, form=form, comment=comment_form())
def handle_message(msg): global profane name = msg['name'] content = msg['content'] image = msg['image'] name_cleaned = profanity.censor(name) content_cleaned = profanity.censor(content) now = datetime.now().strftime('%A %I:%M:%S %p').lstrip("0").replace( " 0", " ") message_object = Meals(name=name_cleaned, content=content_cleaned, image=image) database.session.add(message_object) database.session.commit() json_data = { 'name': name_cleaned, 'content': content_cleaned, 'image': image } send({'json_data': json_data}, broadcast=True)
def room_post(): form = room_post_form() if form.validate_on_submit(): code_name = profanity.censor(form.room_post_alias.data.strip(" ")) post_content = profanity.censor(form.room_post_content.data.strip(" ")) post_date = datafunctions.get_pst_time() post_colour = form.room_colours.data room_id = form.room_id.data.strip(" ") tags = profanity.censor(form.room_tags.data.strip(" ")).split(" ") for thatroom in db.rooms.find({'room_id': str(room_id)}): room_post_increment = thatroom["room_post_increments"] room_post_id = int(room_post_increment) + random.randint(1,50) db.rooms.update_one({'room_id': str(room_id)}, {'$set': {'room_post_increments': room_post_id}}) this_post = { "code_name": code_name, "content": post_content, "date_posted": post_date, "room_post_id": room_post_id, "colour": post_colour, "comments": [], "tags": tags } for thatroom in db.rooms.find({'room_id': str(room_id)}): db.rooms.update_one({'room_id': str(room_id)},{'$push': {'posts': this_post}}) return redirect(url_for('roompost', room_id = room_id, post_id = room_post_id)) else: # If form cannot validate, bring them back to the room room_id = form.room_id.data.strip(" ") for thatroom in db.rooms.find({'room_id': str(room_id)}): return render_template("room.html", room_data=thatroom, form = form)
def post_room_comment(): form = roomcomment_form() if form.validate_on_submit(): code_name = profanity.censor(form.comment_alias.data.strip(" ")) post_content = profanity.censor(form.comment_content.data.strip(" ")) post_date = datafunctions.get_pst_time() room_id = form.room_id.data post_id = form.post_id.data comment = { "code_name": code_name, "content": post_content, "date_posted": post_date, } db.rooms.update_one( {'room_id': str(room_id), "posts.room_post_id":int(post_id)}, { "$push": {"posts.$.comments": comment } } ) return redirect(url_for('roompost', room_id = str(room_id), post_id=str(post_id))) else: room_id = form.room_id.data post_id = form.post_id.data return redirect(url_for('roompost', room_id = str(room_id), post_id=str(post_id)))
def create_room(): for increment in db.increments.find({'type': "room_increments"}): beginning = increment['beginning'] new_beginning = int(beginning) + random.randint(1,99) db.increments.update_one({'type': "room_increments"}, {'$set': {'beginning': new_beginning}}) form = makeroom_form() if form.validate_on_submit(): title = profanity.censor(form.title.data.strip(" ")) description = profanity.censor(form.description.data.strip(" ")) created_date = datafunctions.get_pst_time() room_id = str(new_beginning) + datafunctions.random_char(4) room = { "title": title, "description": description, "date_posted": created_date, "room_id": room_id, "room_post_increments":0, 'posts':[], } db.rooms.insert_one(room) return redirect(url_for('room', room_id = room_id)) else: return render_template("makeroom.html", form=form)
def post_comment(): form = comment_form() if form.validate_on_submit(): code_name = profanity.censor(form.comment_alias.data.strip(" ")) post_content = profanity.censor(form.comment_content.data.strip(" ")) post_date = datafunctions.get_pst_time() post_id = form.post_id.data comment = { "code_name": code_name, "content": post_content, "date_posted": post_date, } for doc in db.posts.find({'post_id': int(post_id)}): if len(doc) > 0: db.posts.update_one({'post_id': int(post_id)},{'$push': {'comments': comment}}) return redirect(url_for('post', post_number = str(post_id))) else: post_id = form.post_id.data return redirect(url_for('post', post_number = str(post_id)))
def test_whitelist_words(self): bad_text = "I have boobs" censored_text = "I have ****" self.assertEqual(profanity.censor(bad_text), censored_text) # Whitelist the word `boobs` profanity.load_censor_words(whitelist_words=["boobs"]) self.assertEqual(profanity.censor(bad_text), bad_text)
def profanity_word(text, lang): if lang == 'vi': profanity.load_censor_words_from_file('banned_word.text') if profanity.contains_profanity(text): return profanity.censor(text, '-') return text profanity.load_censor_words() if profanity.contains_profanity(text): return profanity.censor(text, '*') return text
def game_color(self, msg, answer=None, rgb=None, color=None): if self.game_channel is None: self.game_channel = msg.channel.id logger.warn( f"game channel set to {self.game_channel}\n{msg.channel}") try: _answer = float(answer) if (self.answered is True or self.generating is True or _answer != self.pending_answer): return self.answered_at = datetime.datetime.now() self.answered = True self.total += 1 name = profanity.censor(msg.sender.real_name) msg.react("heavy_check_mark") msg.say(f":champagne: :fireworks: The answer has been found!" f".. it was {self.pending_answer}\nWait for new question") try: color_tuple = None if rgb is not None: color_tuple = hex_to_rgb(rgb) if color is not None: color_tuple = name_to_rgb(color) except Exception as e: msg.reply( f"Sorry, couldn't work out that color, I'll do super green" ) color_tuple = (0, 128, 0) if color_tuple is not None: name = profanity.censor(msg.sender.real_name) logger.warn(f"Changing color to {color_tuple} for {name}") self.screen.fill(color_tuple) self._center_text(name) pygame.display.flip() else: msg.reply(f"Sorry, couldn't work out that color") except Exception as e: msg.reply( f":slightly_frowning_face: Doh, something went wrong, sorry\n`{e}`" )
def listing_handler(video_id): '''Handles profanity-check for a video with the given video id''' response.headers['Content-Type'] = 'application/json' response.headers['Cache-Control'] = 'no-cache' text = '' profanity_detected = False censored_text = [] print(video_id) transcript_list = YouTubeTranscriptApi.get_transcript(video_id) # iterate over all available transcripts for transcript in transcript_list: text = transcript['text'] if profanity.contains_profanity(text): profanity_detected = True censored_text.append(profanity.censor(text)) result = { 'video-id': video_id, 'profanity-detected': profanity_detected, 'censored': censored_text } # return json.dumps({'names': list(_names)}) result = json.dumps(result) ## return template("<html>{{result}}", result) return result
def _create_new_room_(): try: _new_room = request.form['new_room_name'] _new_room_details = request.form['new_room_details'] _user = request.cookies.get('username') if 'DebateRooms' in db: db_ = db['DebateRooms'] db_.append({_user: _new_room, 'Details': _new_room_details}) db['DebateRooms'] = db_ else: db['DebateRooms'] = [{ 'RoomName': _new_room, 'Details': _new_room_details, 'RoomOwner': _user }] _person = request.cookies.get('person') _reason = request.cookies.get('reason') #_password = request.cookies.get('password') res = make_response( render_template('homepage.html', USERNAME=profanity.censor(_user), PERSON=_person, REASON=_reason)) return res except: return 'nah'
def result(): req = request.get_json() # print(req) as_string = req['input'] checked = "Yes" if profanity.contains_profanity(as_string) == True else "No" # print(checked) censored = profanity.censor(as_string, '🙉') count = censored.count('🙉🙉🙉🙉') # print(censored) res = make_response(jsonify(f'Contains profanity? {checked}. Profanity count: {count}. Censored version: "{censored}"'), 200) # custom = [] # profanity.add_censor_words(custom) return res return render_template("home.html")
async def on_message(self, message: discord.Message): if self.bot.config["profanity-check"]: if profanity.contains_profanity(message.content) and isinstance(message.channel, discord.TextChannel): await message.delete() # todo check if the content is longer than the max chars for the description censored_content = profanity.censor(message.content, '\\*') embed = discord.Embed( title=f"I have deleted a message from {message.author.display_name} because it contains bad words! ", description=f"Content of the message:\n||{censored_content}||\n\uFEFF", colour=int(self.bot.config["embed-colours"]["default"], 16) ) embed.set_footer(text="This message will delete itself after 15 seconds!") await message.channel.send(embed=embed, delete_after=15) if not await self.bot.check_message_reply(message, False): return prefix = self.bot.get_my_prefix(self.bot, message) # if the bot gets mentioned it replies if f"<@!{self.bot.user.id}>" in message.content: await message.channel.send(embed=discord.Embed( title=f"I If you need my help use `{prefix}help` to get a list of the available commands.", colour=int(self.bot.config["embed-colours"]["default"], 16) ))
def processDone(self): global transcripts global s2t global fs global data wavlength = len(data) / float(fs) ydata = [i[0] for i in data] xdata = range(len(ydata)) graphLineColor = (200, 200, 234) graphCriticalLineColor = (255, 128, 128) timeCount = 0 for i in s2t: dataPointLeft = int((timeCount / wavlength) * len(ydata)) dataPointRight = int((i[1] / wavlength) * len(ydata)) color = graphCriticalLineColor if profanity.contains_profanity( i[0]) else graphLineColor self.graphWidget.plot(xdata[dataPointLeft:dataPointRight], ydata[dataPointLeft:dataPointRight], pen=pg.mkPen(color=color)) timeCount = i[1] self.transcriptBox.clear() criticalBrush = QBrush(QColor(41, 41, 61)) for tr in transcripts: item = QListWidgetItem(str(profanity.censor(tr)).capitalize()) if profanity.contains_profanity(tr): item.setForeground(criticalBrush) item.setBackground(QColor(255, 128, 128)) self.transcriptBox.addItem(item) self.recordButtonOn = True
def test_leaves_paragraphs_untouched(self): innocent_text = """If you prick us do we not bleed? If you tickle us do we not laugh? If you poison us do we not die? And if you wrong us shall we not revenge?""" censored_text = profanity.censor(innocent_text) self.assertEqual(innocent_text, censored_text)
def main(args: argparse.Namespace) -> None: # this will store the passed content content = '' # if text is passed then read that if args.text is not None: content = args.text else: # if file is passed then read that file content try: # try to open the file with open(args.file) as f: for data in f.readlines(): content += data except FileNotFoundError: # if file not found # \033[91m specifies the red color print('\033[91mERROR: file not found\033[0m') exit(0) # exit the program # censor the content censored = profanity.censor(content) # writing censored data to a file if asked if args.output is not None: with open(args.output, 'w') as f: f.write(censored) # informing the user print(f'\033[92m[+] Censored data written to {args.output}\033[0m') # printing the content to the console print() print('\033[1mCENSORED DATA\033[0m') print() print(censored)
def clean_transcript(link): video_id=gen_id(link) transcript_list = YouTubeTranscriptApi.list_transcripts(video_id) transcript = transcript_list.find_generated_transcript(['en']) transcript=transcript.fetch() profanity.load_censor_words() for i in transcript: i['text']=profanity.censor(i['text']) title=get_title(link) if not os.path.exists(((os.path.join(SAVE_TRANSCRIPT_PATH,title))+'.txt')): file1=open(((os.path.join(SAVE_TRANSCRIPT_PATH,title))+'.txt'),'a') for line in transcript: text=line['text'] start=line['start'] duration=line['duration'] inf=[text,start,duration] file1.writelines(str(inf)) file1.write('\n') file1.close() print('Transcript saved to',((os.path.join(SAVE_TRANSCRIPT_PATH,title))+'.txt')) else: print('File Already Exists!') print() return transcript
def clean_tweets_data(tweets): text = "" # remove emoji from tweets: emoji_pattern = re.compile( "[" u"\U0001F600-\U0001F64F" # emoticons u"\U0001F300-\U0001F5FF" # symbols & pictographs u"\U0001F680-\U0001F6FF" # transport & map symbols u"\U0001F1E0-\U0001F1FF" # flags (iOS) "]+", flags=re.UNICODE, ) url_pattern = re.compile(r"\S+\/\S+", re.DOTALL) mentions_pattern = re.compile(r"@\S+", re.DOTALL) # return tweet for tweet in tweets: # workaround to get full_text from tweepy (otherwise text is truncated) text_without_emoji = emoji_pattern.sub(r"", tweet) text_without_url = url_pattern.sub(r"", text_without_emoji) cleaned_text = mentions_pattern.sub(r"", text_without_url) text += (profanity.censor(text_without_emoji) + "\n\n" ) # Make sure each tweet is handled properly by markovify return text
def remove_profanity(name: str) -> str: profanity.add_censor_words(['supremacia ariana']) name = profanity.censor(name, ' ').strip() name = re.sub( ' +', ' ', name) # We just replaced profanity with a space so compress spaces. return name
def Review(): #------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ #To get review from webscrapping in a webpage leave it as it is #Use your ID in place of (id="productTitle") given below censored = soup.find(id="productTitle").get_text() #------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ #To get review from user enable 'censored' below and disable above webscrapping 'censored' #censored=input("Enter the Review :") text = profanity.censor( censored ) #checks any abuse or faulty words are been used in the given review sentence #print(text) #Review sentence for reference if '*' in text: return 'The text is Negative' else: obj = TextBlob(text) sentiment = obj.sentiment.polarity #print(sentiment) to get the sentiment value in integers if sentiment < 0: return 'The text is Negative' elif sentiment == 0: return 'The text is Neutral' elif sentiment > 0 and sentiment <= 1: return 'The text is Positive' return censored
def test_unicode_censorship_5(self): bad_text = "Маргаре́та (э́то бы́ло её настоя́щее и́мя) родила́сь в 1876 (ты́сяча восемьсо́т се́мьдесят шесто́м) году́ в Нидерла́ндах. В 18 (восемна́дцать) лет Маргаре́та вы́шла за́муж и перее́хала в Индоне́зию. Там она́ изуча́ла ме́стную культу́ру и та́нцы." censored_text = "Маргаре́та (э́то бы́ло её настоя́щее и́мя) родила́сь в 1876 (ты́сяча восемьсо́т се́мьдесят ****) году́ в ****. В 18 (восемна́дцать) лет Маргаре́та вы́шла за́муж и **** в Индоне́зию. Там она́ изуча́ла ме́стную культу́ру и ****." profanity.load_censor_words( ["шесто́м", "Нидерла́ндах", "перее́хала", "та́нцы"]) self.assertEqual(profanity.censor(bad_text), censored_text)
def _tokenize_for_end_user(self, text): text = SentenceDiff._sound_out_dollars(profanity.censor(text, 'x')) words = str(text).strip().split() return [ word for word in words if len(self._remove_punctuation(word).strip()) > 0 ]
async def meme(ctx, args=""): subreddit = "" if len(shlex.split(args)) >= 1: subreddit = shlex.split(args)[0] memejson = json.loads( requests.get("https://meme-api.herokuapp.com/gimme/" + subreddit).text) if not "url" in memejson: await ctx.send("The subreddit you gave me is currently not available.") else: i = 0 while memejson["nsfw"] == True: memejson = json.loads( requests.get("https://meme-api.herokuapp.com/gimme/" + args).text) i += 1 if i == 10: await ctx.send( "Clean memes were not found after 10 tries, please try again." ) return profanity.load_censor_words() await ctx.send( embed=discord.Embed(title=profanity.censor(memejson["title"]), url=memejson["postLink"]).set_image( url=memejson["url"]))
def song_name(self): profanity.load_censor_words() profanity.add_censor_words(self.custom_profanity) title = str(Song.find_song(self.songName).title) title = profanity.censor(str(title)) return " ".join(self.process_word(word) for word in title.split())
def taboo_censor(request): if __name__ == "__main__": profanity.load_censor_words() # text = #Once Blog Implemented, string of text that gets submitted will be scanned here text = '' text = profanity.censor(text)
async def compliment(ctx, args=""): profanity.load_censor_words() await ctx.send( profanity.censor([ e.text_content() for e in html.fromstring( requests.get("http://toykeeper.net/programs/mad/compliments", ).text).xpath("//h3") ][0].replace("\n", "")))
def test_censorship_1(self): bad_text = "Dude, I hate shit. F**k bullshit." censored_text = profanity.censor(bad_text) # make sure it finds both instances self.assertFalse("shit" in censored_text) # make sure it's case sensitive self.assertFalse("f**k" in censored_text) # make sure some of the original text is still there self.assertTrue("Dude" in censored_text)
def message(data): message = profanity.censor(data['message'], '🤬') user_name = data['userName'] room_code = data['roomCode'] emoji = rooms[room_code].users[request.sid].emoji msg_item = {'username': user_name, 'emoji': emoji, 'message': message} emit('chatMSG', msg_item, broadcast=True, room=room_code)
def __message(message): url_pattern = r'''(?i)\b((?:https?://|www\d{0,3}[.]|[a-z0-9.\-]+[.][a-z]{2,4}/)(?:[^\s()<>]+|\(([^\s()<>]+|( \([^\s()<>]+\)))*\))+(?:\(([^\s()<>]+|(\([^\s()<>]+\)))*\)|[^\s`!()\[\]{};:'".,<>?«»“”‘’])) ''' censored = cleaner(profanity.censor(message.text)) result = re.sub(url_pattern, '', censored) content = \ f"#{message.message_id}: <i>" + result + "</i>" bot.send_message(CONFESSION, content, parse_mode='HTML') pass