def converse(message): """ converse function catches any a text signal emitted by socketIO client It emits a signal to all users in that room to add that message to the chat box :param message: Conversation Message """ ts = time.time() st = datetime.datetime.fromtimestamp(ts).strftime('%Y-%m-%d %H:%M:%S') client, db = open_db_connection() new_message = {'msg': profanity.censor(message['msg'])} print(session) room = session['room'] db_collection = session['room'].replace("-", "_") db['chat_log'][db_collection].insert( dict(room=session['room'].replace("-", "_").encode("utf-8"), message=new_message, by=session.get('net_id'), type=session.get('type'), time=st.encode("utf-8"))) close_db_connection(client) emit('message', { 'msg': session.get('net_id') + ':' + new_message['msg'], 'type': session.get('type') }, room=room)
def comment(): try: content = profanity.censor( unquote(request.args.get('content', type=str))) id = request.args.get('id', type=str) if id == '': return jsonify(error='Invalid id') if len(content) > app.config['MAX_COMMENT_LENGTH']: return jsonify(error='Comment exceeds 512 characters') if content == '': return jsonify(error='Comment must contain text') if Dish.query.filter_by(id=id).first() is None: return jsonify(error='Dish {} does not exist'.format(id)) if h.post_interval_exists(): time_remaining = app.config['MIN_POST_INTERVAL'] - ( int(time()) - g.user.last_activity) return jsonify(error='Please wait {} seconds before posting again'. format(time_remaining)) new_comment = Comment(g.user.id, id, content) db.session.add(new_comment) h.update_score(app.config['ADD_COMMENT_SCORE']) db.session.commit() date = new_comment.date.strftime("%B %d, %Y") return jsonify(date=date) except (KeyError, TypeError): return jsonify(error='Invalid content or id')
def test_leaves_paragraphs_untouched(self): innocent_text = """If you prick us do we not bleed? If you tickle us do we not laugh? If you poison us do we not die? And if you wrong us shall we not revenge?""" censored_text = profanity.censor(innocent_text) self.failUnless(innocent_text == censored_text)
def save_posted_tweet_to_redis(self, tweet, p, source): msg = json.dumps({ 'text': profanity.censor(tweet), # for public dashboard 'polarity': p, 'source': source }) r.publish('tweet_msgs', msg)
def check_profanity(text_to_check): is_it_dirty = profanity.contains_profanity(text_to_check) if is_it_dirty: suggested_text = profanity.censor(text_to_check) print("\nThe following text was censored:\n") print(suggested_text) return is_it_dirty
def remove_profanity(name: str) -> str: profanity.load_words(['bitch', 'penis']) profanity.set_censor_characters(' ') name = profanity.censor(name).strip() name = re.sub( ' +', ' ', name) # We just replaced profanity with a space so compress spaces. return name
def setmessage(user, message, args=[]): if len(args) < 1: return "No message provided" message = profanity.censor(message[message.find("!setmessage") + len("!setmessage") + 1:]) with open("./obs/message.txt", "w") as file: file.write(message)
def check(k): file = open(r"withoutabusewords.txt", "a") custom_words = ['f**k', 'bullshit', 'punkass', 'shit', 'pervert'] profanity.load_words(custom_words) censor = profanity.censor(k) print(censor, end=' ') file.write(censor) file.write(" ")
def on_data(self, data): # pprint (data) # saveFile = io.open('tweet_raw.json', 'a', encoding='utf-8')co # thetweets = json.loads(data) print(json.loads(data)) self.tweet_data.append(json.loads(data)) tweets = Htweets2() for x in self.tweet_data: self.just_text.append(x['text']) #cl.classify(x['text']) #result = #result2 = 'ing' #if cl.classify(x['text']) == cl.labels() == 'ing' else 'none' #result3 = 'normal' #if not (cl.classify(x['text']) == cl.labels() != result and cl.classify(x['text']) == cl.labels() != result2) else 'none' tweets.tweet_timestamp = x['timestamp_ms'] tweets.tweet_id = x['id'] tweets.tweet_screenname = x['user']['screen_name'] tweets.tweet_recount = x['retweet_count'] tweets.tweet_favour_count = x['favorite_count'] tweets.tweet_text = profanity.censor(x['text']) tweets.tweet_location = x['user']['location'] tweets.tweet_media_entities = x['source'] #critical_train2 = [(x['text']), 'norm'] #cl2 = NaiveBayesClassifier(critical_train2) classifier = PositiveNaiveBayesClassifier( positive_set=critical_train_neg, unlabeled_set=neg_neutral) classifier1 = PositiveNaiveBayesClassifier( positive_set=critical_train_ing, unlabeled_set=ing_neutral) classifier.classify(x['text']) classifier1.classify(x['text']) if classifier.classify(x['text']) is True and cl.classify( x['text']) == 'alert': print 'not normal - alert' tweets.tweet_status = 'not normal' tweets.tweet_score = 'alert' elif classifier.classify(x['text']) is False: print 'normal-no alert' tweets.tweet_status = 'normal' tweets.tweet_score = 'neutral' elif cl2.classify(x['text']) == 'neu': print 'normal-neutral' tweets.tweet_score = 'neutral' tweets.tweet_status = 'normal' elif classifier1.classify(x['text']) is True and cl.classify( x['text']) == 'critical': print 'not normal - critical' tweets.tweet_status = 'not normal' tweets.tweet_score = 'critical' elif classifier1.classify(x['text']) is False: print 'normal-no critical' tweets.tweet_score = 'neutral' tweets.tweet_status = 'normal' tweets.save()
def check_profanity(text_to_check): output = profanity.contains_profanity(text_to_check) if "True" in str(output): print("profanity alert!") print(profanity.censor(text_to_check)) elif "False" in str(output): print("this document is good to go.") else: print("cannot scan the document properly.")
def test_censorship(self): bad_text = "Dude, I hate shit. F**k bullshit." censored_text = profanity.censor(bad_text) # make sure it finds both instances self.failIf("shit" in censored_text) # make sure it's case sensitive self.failIf("f**k" in censored_text) # make sure some of the original text is still there self.failUnless("Dude" in censored_text)
def display(request): censored_input = profanity.censor(request.POST['user_input']) request.session['result'].insert(0, censored_input) request.session.modified = True return redirect('/')
def log_message(user, message, message_with_case=""): global LOG message = message.split(" ") try: index = len(message[0]) + 1 LOG.append([datetime.datetime.now(), user.name, profanity.censor(message_with_case[index:])]) pickle.dump(LOG, open("LOG.p","wb")) except: return "Invalid message."
def edit_user(id): user = User.query.filter_by(id=id).first() if user is None or id != str(g.user.id): abort(404) month_day_year = User.query.filter_by(id=id).first().\ date.strftime("%B %d, %Y") form = EditUserForm() if request.method == 'POST': if form.validate_on_submit(): if h.post_interval_exists(): return render_template('edit_user.html', form=form, month_day_year=month_day_year, user=user) user = User.query.filter_by(id=id) for entry in form: if entry.id in app.config['CONTENTS']: user.update({entry.id: h.stb(form[entry.id].data)}) elif entry.id != 'csrf_token': user.update({entry.id: form[entry.id].data}) user.update({'last_edited': int(time())}) user.update({'last_activity': int(time())}) user.update({'about': profanity.censor(form['about'].data)}) db.session.commit() flash('Thank you for your update!') return redirect(url_for('user_profile', id=id)) if request.method == 'GET': user_dict = h.rowtodict(user) for entry in form: if entry.id == "username": if user.username: form.username.data = user.username else: form.username.data = user.name elif entry.id == "about": form.about.data = user.about elif entry.id != "csrf_token": form[entry.id].data = str(user_dict[entry.id]) return render_template('edit_user.html', form=form, month_day_year=month_day_year, user=user)
def post(self): json_data = request.get_json(force=True) try: userId = json_data["userId"] except: userId = None current_user = load_user(userId) try: groupIdPosted = json_data["groupId"] except: groupIdPosted = None try: content = json_data["content"] except: content = None if content is not None: if profanity.contains_profanity(content): res = "The post contents offensive language therefore we are censoring it" content = profanity.censor(content) try: visibility = json_data["visibility"] except: visibility = None datetimePost = datetime.utcnow() if hasattr(current_user, "domain"): groupIdOwner = current_user.id postt = GroupPost(groupIdPosted=groupIdPosted, content=content, visibility=visibility, datetime=datetimePost, groupIdOwner=groupIdOwner) else: userIdOwner = current_user.id postt = UserPost(groupIdPosted=groupIdPosted, content=content, visibility=visibility, datetime=datetimePost, userIdOwner=userIdOwner) db.session.add(postt) db.session.commit() return jsonify({"description": "The post has been successfully made"}), 200, headers
def trans(t, n, d='en'): try: t = bs.utf8(t) lang = translator.detect(t) if (lang.lang in supported_langs) or d != 'en': tn = bs.utf8(translator.translate(t, d).pronunciation) if tn is None: tn = bs.utf8(translator.translate(t, d).text) tc = bs.utf8(profanity.censor(tn)) if tc != tn: import kicker kicker.kick(n, reason='Abuse', warn=True) if tc.lower() != t.lower(): bsInternal._chatMessage( t + ' ({}) ==> ({}) '.format(lang.lang, d) + tc) except Exception as e: pass
async def on_message(message): if message.content.startswith(client.user.mention): InitialMsg = await client.send_message(message.channel, message.author.mention + " **Thinking...**") SlashResponse = message.content SlashResponse = SlashResponse.replace(client.user.mention, "") if profanity.contains_profanity(SlashResponse): response = """```ERROR: You cannot send controversial messages using this bot.```""" else: response = str(chatbot.get_response(SlashResponse)) if profanity.contains_profanity(response): response = str(profanity.censor(response)) await client.edit_message(InitialMsg, message.author.mention + " " + response) if message.content.startswith('$$reload'): cw = [line.rstrip('\n') for line in open('controversialwords.txt')] profanity.load_words(cw) await client.send_message(message.channel, message.author.mention + " **Controversial words reloaded!**")
def spam_filter(msg=input("Enter message = ")): msg = TextBlob(msg) current_lang = msg.detect_language() print("Language of this message is = ", current_lang) if (current_lang != 'en'): msg.translate(to='en') else: msg.correct() X_dtm = vect.fit_transform(X) test_dtm = vect.transform([str(msg)]) model.fit(X_dtm, y) result = model.predict(test_dtm) prob = model.predict_proba(test_dtm) if result == [1]: print("SPAM ALERT!") else: print("HAM") predsa = clf.predict(vectsa.transform([str(msg)])) if predsa == [1]: print("Positive Feeling") elif predsa == [0]: print("Negative Feeling") else: print("Can't analyze ur Felling...Try API ? ....") senti = indicoio.sentiment_hq(str(msg)) print("Online Help , Positivity of Incoming Message = ", senti) p = indicoio.personality(str(msg)) d = [] d.append([ p['agreeableness'], p['conscientiousness'], p['extraversion'], p['openness'], msg.sentiment.polarity, msg.sentiment.subjectivity ]) traits = pd.DataFrame(d, columns=[ 'agreeableness', 'conscientiousness', 'extraversion', 'openness', 'polarity', 'subjectivity' ]) print(profanity.contains_profanity(str(msg)), " Profanity") print(profanity.censor(str(msg))) print("Summarizing this message =", msg.noun_phrases) percent = pd.DataFrame(prob, columns=["% HAM", "%SPAM"]) print(traits) print(percent)
def converse(message): """ converse function catches any a text signal emitted by socketIO client It emits a signal to all users in that room to add that message to the chat box :param message: Conversation Message """ ts = time.time() st = datetime.datetime.fromtimestamp(ts).strftime('%Y-%m-%d %H:%M:%S') client, db = open_db_connection() new_message ={'msg' : profanity.censor(message['msg'])} print(session) room = session['room'] db_collection = session['room'].replace("-", "_") db['chat_log'][db_collection].insert( dict(room=session['room'].replace("-", "_").encode("utf-8"), message=new_message, by = session.get('net_id'), type = session.get('type'), time=st.encode("utf-8"))) close_db_connection(client) emit('message', {'msg': session.get('net_id') + ':' + new_message['msg'], 'type':session.get('type')}, room=room)
def save_state(user, message, message_with_case=""): global SAVESTATES message_split = message.split(" ") try: num = int(message_split[0][10:]) except: try: num = int(message_split[1]) except: return "Invalid number." result = savestate(num, user) if result.startswith("Saving"): SAVESTATES[num-1] = profanity.censor(message_with_case[10:].lstrip()) return result
def post_process_review(review_id): review = database.Review.get_one_by(id=review_id) if not review: return original_review_body = review.body # check for profanity review.profanity = profanity.contains_profanity(original_review_body) if review.profanity: review.profanity_not_removed_body = original_review_body review.body = profanity.censor(original_review_body) # sentiment analysis text_blob = TextBlob(original_review_body) review.sentiment_polarity = text_blob.sentiment.polarity review.sentiment_subjectivity = text_blob.sentiment.subjectivity review.spell_checked_body = unicode(text_blob.correct()) # store database.add(review) database.push()
def addressProf(self, gameState, userResponse): # if profanity.contains_profanity(userResponse): dirtyWords = userResponse.split(" ") cleanWords = profanity.censor(userResponse).split(" ") for i in range(len(dirtyWords)): dirtyWord = "" cleanWord = "" if dirtyWords[i] != cleanWords[i]: if dirtyWords[i][-1] in [',', '.', '!', '?']: dirtyWord = dirtyWords[i][:-1] dirtyWord = '"' + dirtyWord + '"' else: dirtyWord = '"' + dirtyWords[i] + '"' if cleanWords[i][-1] in [',', '.', '!', '?']: cleanWord = cleanWords[i][:-1] else: cleanWord = cleanWords[i] self.engine.set_variable("dirty", dirtyWord) self.engine.set_variable("clean", cleanWord) return self.engine.generate('profanity') return "Something has gone wrong"
def return_censored_sentence(sentence): word_to_exchange_with_bad_word = "heaven" censored_sentence = "" try: profanity.set_censor_characters("*") censored_string = profanity.censor(sentence) words_in_censored_string = censored_string.split(" ") for i in range(0, len(words_in_censored_string)): if "*" in words_in_censored_string[i]: words_in_censored_string[i] = "" for word in words_in_censored_string: censored_sentence += word censored_sentence += " " print censored_sentence except: pass return sentence
api = TwitterAPI(consumer_key, consumer_secret, access_token_key, access_token_secret) r = api.request('search/tweets', {'q': random.choice(search_terms), 'lang': 'en'}) for tweet in r: tweet_text = tweet.get('text') tweet_poster = tweet.get('user').get('screen_name') tweet_id = tweet.get('id') if 'RT' not in tweet_text.split()[0] and len(tweet_text) < (133 - len(tweet_poster)): if profanity.contains_profanity(tweet_text): # tweet at the user with their corrected tweet censorship_tweet = api.request( 'statuses/update', { 'status': "{}*ftfy @{}".format(profanity.censor(tweet_text), tweet_poster), 'in_reply_to_status_id': int(tweet_id) } ) print('Censored: {}'.format(censorship_tweet.status_code)) # like the tweet like = api.request('favorites/create', {'id': int(tweet_id)}) print('Like: {}'.format(like.status_code)) # follow the user follow = api.request('friendships/create', {'screen_name': tweet_poster}) print('Follow: {}'.format(follow.status_code)) # mute the user mute = api.request('mutes/users/create', {'screen_name': tweet_poster})
# A simple Mad Libs games that include a profanity checker # User need to install the python package profanity by using pip install # profanity in terminal # It randomly picks one template to generate the sentence. import random from profanity import profanity exclamation = input('\nPlease input an exclamation phrase.\n') noun = input('\nPlease input a noun.\n') verb = input('\nPlease input a verb.\n') adj = input('\nPlease input an adjective.\n') adv = input('\nPlease input an adverb.\n') option = random.randint(1, 3) if option == 1: final = ( '%s! he said %s as he jumped into his convertible %s and drove off with his %s wife. He told her wife"I %s you!"' % (exclamation, adv, noun, adj, verb)) elif option == 2: final = ( 'Mary hold on to her %s. Her sister Ann %s said "%s! Don\'t %s my bear! I\'m %s now."' % (noun, adv, exclamation, verb, adj)) else: final = ( 'On Christmas Eve, Jack wishes to get his %s gift. "%s!"he screened. %s, he only get a %s.' % (adj, exclamation, adv, noun)) print(profanity.censor(final))
def set_message(user, message, message_with_case=""): message_with_case = message_with_case[12:].upper() f = open("message.txt", "w") f.write(profanity.censor(message_with_case)) f.close()
def censor(text: str): return profanity.censor(text)
def add_comments(username, comment, days): write_to_file( 'data/comments.txt', "({0}) {1}: {2} days because: {3}\n".format( datetime.now().strftime("%d %B"), username, days, profanity.censor(comment))) return redirect(request.form)
if input1 + " BREAK " + input2 not in responses[data_name]: examplenum += 1 found_new = False eoc = False input = [] response = "" continue else: #import pdb; pdb.set_trace() responses[data_name][input1 + " BREAK " + input2][ text_names[count]] = profanity.censor( response).replace("person2", "").replace( "\n", "").replace("\t", "").replace( ",", u"\u002C").replace( "__start__", "").replace(data_name, "").replace( "__unk__", "").replace("]", "").replace( "__SILENCE__", "").replace( names[count], "").lower().strip() #print(len([*responses])) response = "" input = [] examplenum += 1 d_counter += 1 continue #if "gold" in text_names[count]: # import pdb; pdb.set_trace() if examplenum not in random_keys: examplenum += 1
def savestate(user, message, args=[]): Controllers[0].hold_digital_duration({"name": "SAVESTATE1", "duration": 500}) with open("./obs/savestate_message.txt", "w") as file: file.write(profanity.censor(message[message.find("!savestate") + len("!savestate") + 1:]))
def at_converter(message): bot.reply_to( message, "{}\n Please mind your language.".format( profanity.censor(message.text)))
**************************************************************************** ________ ___. .__ .__ ___________ / _____/ ____\_ |__ | | |__| ____ \__ ___/_____ _ __ ___________ / \ ___ / _ \| __ \| | | |/ \ | | / _ \ \/ \/ // __ \_ __ \ \ \_\ ( <_> ) \_\ \ |_| | | \ | |( <_> ) /\ ___/| | \/ \______ /\____/|___ /____/__|___| / |____| \____/ \/\_/ \___ >__| \/ \/ \/ \/ **************************************************************************** Press ENTER to start""") input() clear = system('clear') name = profanity.censor(input("""Hello adventurer! What is your name? """)) while True: class_continue = True clear = system('clear') cprint("""One fateful night, you find yourself lost in the deep forests of Yaagnok during a violent lightning storm. You are at least a day from town and are quickly running out of supplies. You spot a old, run-down tower through a clearing in the trees. The storm picks up and you are left with no choice but to take shelter in the tower. What kind of adventurer are you? 1. Paladin (high health, low power) 2. Fighter (medium health, medium power) 3. Rogue (low health, high power)
def censorText(self, text): if self.filterEnabled is None or not self.filterEnabled: return text return profanity.censor(text)