示例#1
0
def update_post(post_id):
    post = Post.query.get_or_404(post_id)
    if post.author != current_user:
        abort(403)
    form = PostForm()  # Create an instance of the PostForm()

    if form.validate_on_submit():  # If the input is valid

        title_check = predict([form.title.data])
        content_check = predict([form.content.data])

        if (title_check[0] == 1 or content_check[0]
                == 1) and (current_user.profanity == False):
            post.title = censor.censor(
                form.title.data)  # The form title overwrites the post title
            post.content = censor.censor(form.content.data)
            current_user.reputation -= 1  # reputation score decremented by 1
            current_user.profanity = True
            db.session.commit()
            flash(
                'Warning! Profanity detected! Since this is your first offense, Your reputation score has been reduced by 1. Any subsequent offense will reduce your reputation score by 5!',
                'warning')
            return redirect(url_for('posts.post', post_id=post.id))

        if (title_check[0] == 1
                or content_check[0] == 1) and (current_user.profanity == True):
            post.title = censor.censor(
                form.title.data)  # The form title overwrites the post title
            post.content = censor.censor(
                form.content.data
            )  # The form content overwrites the post content
            current_user.reputation -= 5  # reputation score decremented by 5
            db.session.commit()
            flash(
                'Warning! Profanity detected! Your reputation score has been reduced by 5. You were warned!',
                'danger')
            return redirect(url_for('posts.post', post_id=post.id))

        # When there is no profanity:
        post.title = form.title.data  # The form title overwrites the post title
        post.content = form.content.data  # The form content overwrites the post content
        db.session.commit(
        )  # Commit changes to db!                                                                             # We do not need to add anything into the db, these objects are already in the db and being overwritten!
        flash(
            'Your post has been successfully updated!', 'success'
        )  # Display success message. For anyone wondering 'success' is a bootstrap class it gives a green-ish hue.
        return redirect(url_for(
            'posts.post', post_id=post.id))  # Redirect to the post/id page

    if request.method == 'GET':
        form.title.data = post.title  # This ensures that the fields are populated with the previous text! But only if it's a 'GET' request.
        form.content.data = post.content  # This ensures that the fields are populated with the previous text! But only if it's a 'GET' request.

    return render_template(
        'create_post.html',
        title='Update Post',
        form=form,
        legend='Update Post'
    )  # Redirect to Create_Post.html, but with some pre-filled text, and a newer legend!
示例#2
0
def new_post():
    form = PostForm()  # Create instance of the  PostForm()

    if form.validate_on_submit(
    ):  # If user input is valid (i.e) no null characters, etc

        title_check = predict([form.title.data])
        content_check = predict([form.content.data])

        if (title_check[0] == 1 or content_check[0]
                == 1) and (current_user.profanity == False):
            title = censor.censor(
                form.title.data)  # Feature # 11: Profanity Filter ...
            content = censor.censor(form.content.data)
            post = Post(
                title=title, content=content, author=current_user
            )  # Create instance of Post() and pass in user input given to the form
            current_user.reputation -= 1
            current_user.profanity = True
            db.session.commit()
            flash(
                'Warning! Profanity detected! Since this is your first offense, Your reputation score has been reduced by 1. Any subsequent offense will reduce your reputation score by 5!',
                'warning')
            return redirect(url_for('main.home'))  # Redirect to Home page

        if (title_check[0] == 1
                or content_check[0] == 1) and (current_user.profanity == True):
            title = censor.censor(
                form.title.data)  # Feature # 11: Profanity Filter ...
            content = censor.censor(form.content.data)
            post = Post(
                title=title, content=content, author=current_user
            )  # Create instance of Post() and pass in user input given to the form
            current_user.reputation -= 5
            db.session.commit()
            flash(
                'Warning! Profanity detected! Your reputation score has been reduced by 5. You were warned!',
                'danger')
            return redirect(url_for('main.home'))  # Redirect to Home page

        title = censor.censor(
            form.title.data)  # Feature # 11: Profanity Filter ...
        content = censor.censor(form.content.data)
        post = Post(
            title=title, content=content, author=current_user
        )  # Create instance of Post() and pass in user input given to the form
        db.session.add(post)  # Mark this 'post' to be added to the database
        db.session.commit()  # Commit changes to db
        flash(
            'Your post has been created!', 'success'
        )  # Display success message! For anyone wondering 'success' is just a bootstrap class, it gives a green-ish hue.
        return redirect(url_for('main.home'))  # Redirect to Home page

    return render_template('create_post.html',
                           title='New Post',
                           form=form,
                           legend='New Post')
示例#3
0
def is_profanity(input):
    if input in profanitydict.keys():
        return profanitydict[input]

    p1 = predict([input])

    # removing the last letter, in case a "s" is there
    p2 = predict([input[:-1]])

    isprofane = p1 or p2

    # Add to dict
    profanitydict[input] = isprofane

    return isprofane
示例#4
0
def vulgar(difference_list, old_text_list):
    vulgar_list_edit = pc.predict(difference_list)
    vulgar_list_old = pc.predict(old_text_list)
    count1 = 0
    count2 = 0
    for i in vulgar_list_edit:
        count1 += i

    for k in vulgar_list_old:
        count2 += k

    ratio1 = count1  #/ len(difference_list)
    ratio2 = count1 / count2

    return ratio1, ratio2
示例#5
0
def get_custom_id(custom_id):
    existing_id = LinkyItem.objects.all().values_list('id')
    if custom_id in existing_id:
        return generate_id()
    if predict([custom_id])[0] == 1:
        return generate_id()
    return custom_id
示例#6
0
    async def on_message(self, message):

        if not self.enabled:
            return

        channel = message.channel
        author = message.author

        if isinstance(author, discord.User):  # private channel
            return

        ids = {author.id, channel.id} | {r.id for r in author.roles}
        if self.whitelist.intersection(ids):  # anything intersects
            return

        profane = bool(predict([message.content])[0])
        if not profane:
            return

        await message.delete()

        temp = await channel.send(f"{author.mention} your message has "
                                  "been deleted for containing profanity.")

        await asyncio.sleep(5)
        await temp.delete()
示例#7
0
def add_to_highscore():
    if not 'usr' in request.args:
        return abort(400)
    if len(request.args['usr']) > 50:
        return abort(400, 'Username too long')
    if not is_highscore():
        return abort(400, 'Score is not a highscore')
    if predict([request.args['usr']])[0] == 1:
        return abort(400, 'Username is too offensive')
    conn = connect_db()
    try:
        cur = conn.cursor()
        cur.execute('insert into highscore values (%s, %s, %s)',
                    (request.args['usr'], session['now_score'],
                     datetime.strftime(datetime.now(), '%Y-%m-%d')))
        conn.commit()
        cleanup_database_if_needed()
        session['now_score'] = 0
        return '', 201
    except connector.Error as err:
        if err.errno == 1062:
            print('Not adding duplicate entry to database')
            return '', 200
        print(f'SQL Error in /add-to-highscore: {err}')
        return abort(500)
    except KeyError as err:
        print(f'Key Error in /add-to-highscore: {err}')
        return abort(400)
示例#8
0
    def process_record(self, record):
        # WET/plain text record
        if self.is_wet_text_record(record):
            text = record.content_stream().read().decode('utf-8')
        # html record
        elif self.is_html(record):
            text = self.get_text(record)
        else:
            yield 'error.unknown.record.type', 1
            return

        if len(text) < 500:
            yield 'tiny.page', 1
            return

        try:
            lang = detect(text)
            if lang != 'en':
                # skip non-English pages
                yield 'non.english.page', 1
                return
        except:
            yield 'error.lang.detect', 1
            return

        if predict([text])[0]:
            yield 'adult', 1
            return

        topic = self.predict(text)
        yield topic, 1
示例#9
0
    def validator(self, postData):
        errors = {}
        ans = predict([postData['desc']])
        if (int(ans[0]) > 0):
            errors['toxic'] = "Your post has toxic content!"

        return errors
示例#10
0
def mainMessage(sleep=0):
    time.sleep(sleep)
    rightChatBoxes = driver.find_elements_by_css_selector("._2ko65")
    print(rightChatBoxes)

    i = 1
    for rightChatBox in rightChatBoxes:
        soup = BeautifulSoup(rightChatBox.get_attribute('innerHTML'), 'html.parser')
        print(soup.prettify())
        name = soup.select("._19RFN")[0].get('title')
        mesg_time = soup.select("._0LqQ")[0].get_text()
        chatHead = driver.find_elements_by_css_selector(".P6z4j")[0]
        no_messages = int(chatHead.get_attribute('innerHTML'))
        print(no_messages)
        
        rightChatBox.click()

        if i == 1:
            time.sleep(sleep)
            i = i+1

        try :

            messages = driver.find_elements_by_css_selector("._F7Vk")[-no_messages:]   #_2Wx_5 _3LG3B #_12pGw
            print(messages)

            for message in messages:
                mesg = strip_tags(message.get_attribute('innerHTML'))
                print(mesg)
                if ".pdf" in mesg:
                    download_buttons = driver.find_elements_by_css_selector("._1mrMQ")[-no_messages:]   #[0].click()    #_1zGQT oty3x  #_1mrMQ
                    for download_button in download_buttons:
                        if mesg in download_button.get_attribute('innerHTML'):
                            download_button.click()
                            t1 = threading.Thread(target=search_downloads, args=(mesg,)) 
                            t1.start()
                            break

                    #print(download_button)
                else:
                    message.click()
                    mlist = []
                    mlist.append(mesg)
                    is_offensive = predict(mlist)[0]
                    if is_offensive:
                        driver.find_elements_by_css_selector("._2-qoA")[0].click() # 
                        print("click1")
                        driver.find_element(By.XPATH, '//*[@title="Star message"]').click()
                        print("click2")
                        send_message("'Offensive'", "{} @ {} : {}".format(name, mesg_time, mesg))

                    if "timetable" in mesg.lower():
                        create_event(mesg, 0)
                        send_message("'Timetable'", mesg)
                    if "event" in mesg.lower():
                        create_event(mesg, 1)
                
        except Exception as e:
            print(e)
            pass
示例#11
0
def addgame():

    form = AddGame()

    if form.validate_on_submit():

        new_game = form.gameName.data

        try:


            if 1 in predict([new_game]):

                flash("you can't use inappropraite words , i am smarter than u , bitch")

            else:

                game.CheckGame(new_game)

                session['game'] = new_game

                return redirect(url_for('index'))

        except Exception as e:

            print(str(e))

    return render_template('addgame.html',form = form)
def readLines(path):
    lines = []
    try:
        with open(path) as fr:
            lines = fr.readlines()
    except UnicodeDecodeError:
        print(path + " is not an accepted file type, either remove extension for accepted.settings or check your file")
        return
    except Exception as e:
        print("Some unkown error occured")
        print(e)
    else:
        if len(lines) > 0:
            arr = predict(lines)
            profanLines = np.argwhere(arr == 1)
            numProfans = profanLines.size
            if numProfans > 0 :
                pIndex = 0
                pLine = profanLines[pIndex]
                print("Profanaties detected")
                with open(path) as fr:
                    for i, line in enumerate(fr):
                        if i == pLine:
                            #This format is for visual studio link connecting, Not my perferred view, but its a nice feature
                            print("File \"" + path + "\", line " + str(pLine[0] + 1) + "\n" + line)
                            pIndex = pIndex + 1
                            if pIndex >= numProfans:
                                break
                            else: #this is a uselesss else, but ohwell
                                pLine = profanLines[pIndex]
示例#13
0
def sent(message: str):
    binPred = predict([message])
    probPred = predict_prob([message])

    dct = {"bin": str(binPred[0]), "prob": str(probPred[0])}
    jsonData = jsonable_encoder(dct)
    return JSONResponse(jsonData)
示例#14
0
def profanity_clean_fields(FormClass, form_instance):
    """ Checks for profanity in all fields of a form instance """
    cleaned_data = super(FormClass, form_instance).clean()
    for field in form_instance.fields.keys():
        if field and predict([cleaned_data.get(field)])[0]:
            raise forms.ValidationError('Watch your mouth!')
    return cleaned_data
示例#15
0
def numbadwords():

    if request.method == 'POST':
        video_id = request.form.get('video_id')
        if video_id == None:
            packet = dict(request.json)
            video_id = packet['video_id']
        try:
            data = YouTubeTranscriptApi.get_transcript(video_id)
        except youtube_transcript_api._errors.TranscriptsDisabled:
            return json.dumps({"Message":"Subtitles disabled for this video", "num_bad_words": 0})
        except youtube_transcript_api._errors.VideoUnavailable:
            return json.dumps({"Message":"Subtitles disabled for this video", "num_bad_words": 0})
        except youtube_transcript_api._errors.TranscriptsDisabled:
            return json.dumps({"Message":"Subtitles disabled for this video", "num_bad_words": 0})
        except youtube_transcript_api._errors.NoTranscriptAvailable:
            return json.dumps({"Message":"Subtitles disabled for this video", "num_bad_words": 0})
        df = pd.DataFrame(data)
        print(df)

        ixs = df.loc[predict(df.text) == [1]]
        return json.dumps({"num_bad_words":len(ixs.index)})




        return json.dumps({"Message": "Hello chrome extension ppl", "no_curse_words":no_curse_words, "offending_lines":offending_lines})
示例#16
0
def check_profanity(user_message):
    debug_log('in check_profanity, user_message: ' + user_message)
    user_message_words = re.compile("\W+").split(user_message)
    actual_words_in_user_message = [
        word for word in user_message_words if len(word) > 0
    ]
    if len(actual_words_in_user_message) < 1:
        return False
    else:
        debug_log('in check_profanity, user_message_words: ' +
                  str(actual_words_in_user_message))
        overall_profanity = sum(predict(actual_words_in_user_message))
        if overall_profanity > 0:
            return True
        debug_log('in check_profanity, overall_profanity: ' +
                  str(overall_profanity))
        overall_profanity_average = overall_profanity / len(
            actual_words_in_user_message)
        debug_log('in check_profanity, overall_profanity_average: ' +
                  str(overall_profanity_average))
        debug_log('in check_profanity, profanity probability: ' +
                  str(predict_prob(actual_words_in_user_message)))
        debug_log('in check_profanity, profanity probability: ' + str(
            sum(predict_prob(actual_words_in_user_message)) /
            len(actual_words_in_user_message)))
        if overall_profanity_average > 0.5:
            return True
    return False
示例#17
0
def generate_features(text_json):
    global stopwords
    df = pd.DataFrame(text_json, index=[0])
    print("text_json: ", text_json)
    df['num_chars'] = df["comment_text"].apply(len)
    df['words'] = df['comment_text'].apply(lambda x: len(x.split()))
    df['prop_words'] = df['words'] / df['num_chars']
    df['capitals'] = df['comment_text'].apply(
        lambda x: sum(1 for char in x if char.isupper()))
    df['prop_capitals'] = df['capitals'] / df['num_chars']
    df['prop_caps_vs_words'] = df['capitals'] / df['words']
    df['paragraphs'] = df['comment_text'].apply(lambda x: x.count('\n'))
    df['prop_paragraphs'] = df['paragraphs'] / df['num_chars']
    df['prop_paragraphs_vs_words'] = df['paragraphs'] / df['words']

    stopwords = set(stopwords.words("english"))
    df['num_stopwords'] = df['comment_text'].apply(
        lambda x: sum(x.count(w) for w in stopwords))
    df['prop_stopwords'] = df['num_stopwords'] / df['num_chars']
    df['prop_stopwords_vs_words'] = df['num_stopwords'] / df['words']

    df['exclamation'] = df['comment_text'].apply(lambda x: x.count("!"))
    df['prop_exclamation'] = df['exclamation'] / df['num_chars']
    df['prop_exclamation_vs_words'] = df['exclamation'] / df['words']

    df['question_marks'] = df['comment_text'].apply(lambda x: x.count("?"))
    df['prop_question'] = df['question_marks'] / df['num_chars']
    df['prop_question_vs_words'] = df['question_marks'] / df['words']

    df['punctuation'] = df['comment_text'].apply(
        lambda x: sum(x.count(w) for w in string.punctuation))
    df['prop_punctuation'] = df['punctuation'] / df['num_chars']
    df['prop_punctuation_vs_words'] = df['punctuation'] / df['words']

    df['unique_words'] = df['comment_text'].apply(
        lambda x: len(set(w for w in x.split())))
    df['prop_unique'] = df['unique_words'] / df['num_chars']
    df['prop_unique_vs_words'] = df['unique_words'] / df['words']

    df['repeated_words'] = df['comment_text'].apply(lambda x: num_repeated(x))
    df['toxic_count'] = df['comment_text'].apply(
        lambda x: sum(predict(x.split())))
    df['prop_repeated'] = df['repeated_words'] / df['num_chars']
    df['prop_repeated_vs_words'] = df['repeated_words'] / df['words']

    df['mentions'] = df['comment_text'].apply(lambda x: x.count("User:"))
    df['prop_mentions'] = df['mentions'] / df['num_chars']
    df['prop_mentions_vs_words'] = df['mentions'] / df['words']

    sid = SentimentIntensityAnalyzer()
    polarity_scores = df['comment_text'].apply(
        lambda x: sid.polarity_scores(x))
    df['sentiment_compound'] = [p['compound'] for p in polarity_scores]
    df['sentiment_positive'] = [p['pos'] for p in polarity_scores]
    df['sentiment_negative'] = [p['neg'] for p in polarity_scores]
    df['sentiment_neutral'] = [p['neu'] for p in polarity_scores]
    df = df.drop(columns=['comment_text'])

    return df
示例#18
0
def profanity_clean_field(form_instance, field):
    """ Checks for profanity in a specific form field """
    data = form_instance.cleaned_data.get(field)

    if predict([data])[0]:
        raise forms.ValidationError('Watch your mouth!')

    return data
示例#19
0
 async def profanity_filter_ml(self, *args):
     message = args[-1]
     if (self.should_run(message.author)
             and self.config["profanity_filter_ml"]
             and predict([message.content]) == [1]):
         await remove_message(message, "a banned word")
         if self.config["warn_on_censor"]:
             self.warn(message.author)
示例#20
0
def check_profanity(comment):
    """
    Check if the comment is offensive
    """

    if TRIGGER_WORD_FOR_TEST in comment:
        return 1

    return predict([comment])[0]
示例#21
0
def test_edge_cases():
    texts = [
        '',
        '                    ',
        'this is but a test string, there is no offensive language to be found here! :) '
        * 25,
        'aaaaaaa' * 100,
    ]
    assert list(predict(texts)) == [0, 0, 0, 0]
def explicit(song, artist): 
    if song and artist != "":
        genius = lyricsgenius.Genius("YOUR-GENIUS-ACCESS-KEY-HERE")
        song_name = song #Alive, Alive, UCLA, UCLA
        artist_name = artist #Alvie, Alive, RL Grimesss, RL Grimes
        song = genius.search_song(song_name, artist_name)
        sentence = [song.lyrics]
        # predict([sentence])
        # print(predict([sentence]))
        return predict(sentence)[0]
示例#23
0
def is_profane(message):
    for word in blacklist.keys():  # check hard-coded filter first
        if word in message:
            return True

    if pc.predict(
        [message]
    )[0] > threshold_value:  # after passing through fixed filter, use trained SVM model
        return True
    return False
示例#24
0
def offensive(input_text):
    words = input_text
    offensive_words = [predict([word]) for word in words]
    for i in range(len(offensive_words)):
        if offensive_words[i] == [1]:
            print()
            print('this sentence contain some offensive word/s')
        # print(offensive_words[i])
    # print(offensive_words[i] for i in [len(offensive_words)])
    # offensive_text = " , ".join(str([word])for word in offensive_words)
    return offensive_words
async def check_profanity_filter(event):
    chat = await event.get_chat()
    text = event.text

    if chat.id in PROFANITY_CHECK_CHATS:
        if chat.admin_rights:
            if chat.admin_rights.delete_messages:
                if text:
                    predict_profanity = predict([text])
                    if predict_profanity[0] == 1:
                        await event.delete()
示例#26
0
    async def on_message(self, message):
        if message.channel.id != channels["system"]:
            swearing = any(predict([message.content]))
            if swearing:
                probability = round(predict_prob([message.content])[0],
                                    2) * 100

                if probability > 85:
                    await message.guild.get_channel(channels["system"]).send(
                        f'{message.author.mention} swore in {message.channel.mention}: "{message.content}" ({probability}%)'
                    )
示例#27
0
def mean_filter():
    output = None
    answer = None
    if request.method == 'POST':
        text = request.form['text_provided']
        output = predict([text])
        if (output):
            answer = 'The text is offensive!'
        else:
            answer = 'The text is not offensive!'

    return render_template('mean.html', answer=answer)
示例#28
0
文件: d_pre.py 项目: moon23k/S3_proj
def make_features2(df):
    c_df = df.copy()
    
    col_name = 'text'
    
    # 비속어 포함개수
    c_df['profanity'] = c_df[col_name].apply(lambda x : predict(x).sum() if len(x) > 0 else 0)
    
    # 문장 내에서 반복되는 단어의 최빈값
    c_df['most_rep'] = c_df[col_name].apply(lambda x : FreqDist(np.hstack(x)).most_common(1)[0][1] if len(x) > 0 else 0)
    
    return c_df.drop(columns=col_name, axis=1)
示例#29
0
文件: main.py 项目: kfriesth/safely
 def check_textual_content(self) -> None:
     options = webdriver.ChromeOptions()
     options.add_argument('--headless')
     options.add_argument('--disable-gpu')
     options.add_argument('--no-sandbox')
     for url in self.urls:
         with contextlib.closing(webdriver.Chrome(chrome_options=options)) as driver:
             driver.get(url)
             word_list = ' '.join(driver.find_element_by_tag_name('body').get_attribute('innerText').split('\n')).split(' ')
             if any(predict(word_list)):
                 self.is_nsfw = True
                 return
示例#30
0
def check_abuse(text):
    offensive = False
    while not offensive:
        #text = input("Please kindly enter the text: \n")
        result = predict([text])
        print(result)
        if result == [0]:
            print("Text is okay")
            return "Okay"
            offensive = True
        else:
            return "Bad"
            print("The text is offensive")