예제 #1
0
 def __init__(self,
              blur_mask_fade=NUDITY_DETECTION_SETTINGS['blur_mask_fade'],
              threshold=NUDITY_DETECTION_SETTINGS['threshold']):
     self._classifier = NudeClassifier()
     self._blur = ImageBlur(PixelBlur(30), blur_mask_fade)
     self._threshold = threshold
     self._graph = tf.get_default_graph()
예제 #2
0
def detect(src_path, video):
    frame_size = (160, 90)
    interval_millis = 1000
    classifier = NudeClassifier(settings.NUDE_NET_CLASSIFIER_MODEL_PATH)

    video.status = Video.Status.DETECTING
    video.save()

    src_name = os.path.splitext(os.path.basename(src_path))[0]
    output_dir = os.path.join(os.path.dirname(src_path), src_name)
    os.makedirs(output_dir)
    extract_frames(src_path, output_dir, frame_size, interval_millis)

    detected = list()
    thresold = 3 * interval_millis
    print('start detecting %d files' % (len(os.listdir(output_dir))))
    for frame in sorted(os.listdir(output_dir), key=lambda f: frame_order(f)):
        order = frame_order(frame)
        if order < 0:
            continue
        framepath = os.path.join(output_dir, frame)
        if not os.path.exists(framepath) or os.path.isdir(framepath):
            continue
        result = classifier.classify(framepath)[framepath]
        nudity_prob = result['nude']
        if nudity_prob > 0.8:
            start_millis = order * interval_millis
            end_millis = (order + 1) * interval_millis
            if not detected:
                detected.append(
                    DetectedScene(src_video=video,
                                  start_millis=start_millis,
                                  end_millis=end_millis,
                                  cause=DetectedScene.DetectionCause.NUDITY))
            else:
                latest = detected[-1]
                if latest.end_millis + thresold <= start_millis:
                    detected.append(
                        DetectedScene(
                            src_video=video,
                            start_millis=start_millis,
                            end_millis=end_millis,
                            cause=DetectedScene.DetectionCause.NUDITY))
                else:
                    latest.end_millis = end_millis
    print('the number of detected scenes is %d' % len(detected))
    for scene in detected:
        scene.save()
    video.status = Video.Status.DETECTED
    video.save()
    try:
        shutil.rmtree(output_dir)
    except Exception as e:
        print('fail to remove directory', e)
    return detected
예제 #3
0
async def sex_message_check(redis, db):
    classifier = NudeClassifier()
    while True:
        lst = await redis.hgetall('avr', encoding='utf-8')
        if not lst == {}:
            for i in lst.keys():
                path = lst[i]
                status = classifier.classify(path)
                if status[path]['safe'] < status[path]['unsafe']:
                    await Message.delete_image(db=db, user=i, image=path)

        await asyncio.sleep(10)
예제 #4
0
    def __init__(self, bot: Red):
        super().__init__()
        self.bot = bot
        self.config = Config.get_conf(self, identifier=9811198108111121, force_registration=True)

        default_guild = {"enabled": False, "channel_id": None}

        self.config.register_guild(**default_guild)

        # self.detector = NudeDetector()
        self.classifier = NudeClassifier()

        self.data_path: pathlib.Path = cog_data_path(self)

        self.current_processes = 0
예제 #5
0
def scan_image(image_url):
    if image_url is None:
        return None

    file_name = ''.join(
        random.choices(string.ascii_uppercase + string.digits, k=18))
    tmp_path = os.path.join(tempfile.gettempdir(), file_name)

    with urllib.request.urlopen(image_url) as url:
        output = open(tmp_path, "wb")
        output.write(url.read())
        output.close()

    classifier = NudeClassifier()
    img_scan_result = classifier.classify(tmp_path)
    os.remove(tmp_path)
    return img_scan_result[tmp_path]['unsafe']
예제 #6
0
def check_for_nudity(user_data: dict):
    photo_path_list = user_data["message_photo_paths"]
    print(
        "\nChecking for nudity in message photos. This might take a while depending on the number of photos."
    )
    classifier = NudeClassifier()
    classification = classifier.classify(photo_path_list, 4)
    unsafe_photo_paths = []
    for photo_path in photo_path_list:
        try:
            if classification[photo_path]["unsafe"] > 0.95:
                print("OYASHII: ", photo_path)
                unsafe_photo_paths.append(photo_path)
        except KeyError:
            print("A photo is mysteriously missing.")

    nbr_of_unsafe_photos = len(unsafe_photo_paths)
    output_html = unsafe_photo_carousel_html_generator(unsafe_photo_paths)
    user_data["unsafe_html"] = output_html
    user_data["nbr_of_unsafe_photos"] = nbr_of_unsafe_photos
    user_data["unsafe_photo_paths"] = unsafe_photo_paths
    return user_data
예제 #7
0
파일: __init__.py 프로젝트: XorgX304/rvt2
    def run(self, path):
        """ Classify an image using NudeNet."""
        # if there is not a classifier, create it. This takes a long time, so create only one classifier (global variable)
        global classifier
        if classifier is None:
            self.logger().debug('Creating classifier from model=%s',
                                self.myconfig('model'))
            modelfile = self.myconfig('model')
            classifier = NudeClassifier(modelfile)
        threshold = float(self.myconfig('threshold'))

        self.logger().debug('Classifying path=%s', path)

        # classify path
        if os.path.isabs(path):
            abspath = path
            relpath = relative_path(path, self.myconfig('casedir'))
        else:
            abspath = os.path.join(self.myconfig('casedir'), path)
            relpath = path
        result = dict(path=relpath,
                      aiclassify=dict(classifier='NudeNet'),
                      preview=relpath)
        try:
            # we must convert the results, since they are returned as numpy object
            classification = classifier.classify(abspath)[abspath]
            result['aiclassify']['results'] = dict(
                safe=float(classification['safe']),
                unsafe=float(classification['unsafe']))
            result['aiclassify']['is_nude'] = result['aiclassify']['results'][
                'unsafe'] > threshold
        except Exception as exc:
            self.logger().warning('Cannot process path=%s %s', path, exc)
            result['aiclassify']['results'] = dict(safe=None, unsafe=None)
            result['aiclassify']['is_nude'] = None
        yield result
예제 #8
0
import os
import tensorflow as tf

tf.compat.v1.logging.set_verbosity(tf.compat.v1.logging.ERROR)

from nudenet import NudeClassifier

classifier = NudeClassifier()

result = classifier.classify('/nsfw/one.jpg')
result2 = classifier.classify('/nsfw/two.xjpg')

print('')
print('')
print('TEST RESULTS:')
print('********************************************')
print('')

print('one.jpg')
print(result)
print('')
print('two.xjpg')
print(result2)

print('')
print('********************************************')
print('')
예제 #9
0
    def __init__(self, mode, specific_formats=None, specific_folder=None):

        self.mode = mode
        self.lst_of = {}
        self.doc_ext = []
        self.img_ext = []
        self.vid_ext = []
        self.sound_ext = []
        self.zip_ext = []
        self.code_ext = []
        self.media_ext = []
        self.data_ext = []
        self.app_ext = []
        self.font_ext = []
        self.sys_ext = []
        self.flags = []
        self.specifics = []
        self.all_files = {}
        self.errors = []
        self.file_structure = {}
        self.load_ext()
        self.now = datetime.now()
        self.dt_string = self.now.strftime("%d-%m-%Y %Hh%M")
        self.nude_classifier = NudeClassifier()
        self.nude_detector = NudeDetector()
        self.s = sched.scheduler(time.time, time.sleep)

        self.number_of_files = 0
        self.time_taken = 0
        self.prev_dir = None
        self.curr_dir = None
        self.faces = None
        self.points = None

        self.walked_dir = "checked.dir"
        self.all_walked = []
        self.load_walked()

        self.available_dirs = []
        self.non_available_dirs = []
        self.attach = ":/"
        self.let_dir = [
            "A", "B", "C", "D", "E", "F", "G", "H", "I", "J", "K", "L", "M",
            "N", "O", "P", "Q", "R", "S", "T", "U", "V", "W", "X", "Y", "Z"
        ]

        self.runt = threading.Thread(target=self.find_all_dirs)
        self.runt.start()

        self.master_ext = [
            self.doc_ext, self.img_ext, self.vid_ext, self.sound_ext,
            self.zip_ext, self.code_ext, self.media_ext, self.data_ext,
            self.app_ext, self.font_ext, self.sys_ext, self.flags
        ]

        self.type_s = [
            "Documents", "Images", "Videos", "Sounds", "Compressed_Files",
            "Programming_Files", "Discs_Media", "Databases", "Applications",
            "Fonts", "System_Files", "Infected"
        ]

        self.face_detection = fc.Detection()
        self.face_recognition = fc.Recognition()
        self.face_verification = fc.Verification()

        if specific_formats is not None and specific_folder is not None:
            self.specifics = self.specifics + specific_formats
            self.master_ext.append(self.specifics)

            self.type_s.append(specific_folder)
예제 #10
0
import requests
import base64
import random
import json
from nudenet import NudeClassifier
import os
from datetime import datetime
from termcolor import colored

nsfw_classifier = NudeClassifier()
animals = ["cat", "dog", "wolf", "otter", "panda"]
muted_people = []

cat_images_api = "https://api.thecatapi.com/v1/images/search"
dog_facts_api = "https://dog-facts-api.herokuapp.com/api/v1/resources/dogs?number=1"
trivia_api = "https://opentdb.com/api.php?amount=1&category=9&difficulty=easy&type=boolean&encode=base64"
yoda_api = "http://yoda-api.appspot.com/api/v1/yodish?text="
joke_api = "https://official-joke-api.appspot.com/random_joke"
nasa_image_search_api = "https://images-api.nasa.gov/search?"


def get_json_data(url):
    result = requests.get(url)
    result_json = result.json()
    return result_json


def get_cat_image_url():
    json_object = get_json_data(cat_images_api)
    image_object = json_object[0]
    image_url = image_object["url"]
예제 #11
0
파일: __init__.py 프로젝트: XorgX304/rvt2
    def run(self, path):
        """ Classify a video using NudeNet."""
        if os.path.isabs(path):
            relpath = relative_path(path, self.myconfig('casedir'))
            abspath = path
        else:
            relpath = path
            abspath = os.path.join(self.myconfig('casedir'), path)

        self.check_params(abspath, check_path=True, check_path_exists=True)
        # if there is not a classifier, create it. This takes a long time, so create only one classifier (global variable)
        global classifier
        if classifier is None:
            self.logger().debug('Creating classifier from model=%s',
                                self.myconfig('model'))
            modelfile = self.myconfig('model')
            classifier = NudeClassifier(modelfile)

        if os.path.isabs(path):
            relpath = relative_path(path, self.myconfig('casedir'))
        else:
            relpath = path

        previews = list(self.from_module.run(path))
        # if the previews could be created, the video cannot be parsed: return None
        if not previews or len(previews) == 0:
            return [
                dict(path=relpath,
                     aiclassify=dict(classifier='NudeNet',
                                     results=dict(safe=None, unsafe=None),
                                     is_nude=None))
            ]

        # else, the video was parsed correctly: test each one of the previews.
        max_unsafe = 0
        min_safe = 0
        num_is_nude = 0
        preview_path = None
        for preview_image in previews[0]['preview']:
            # classify preview_image
            result = list(super().run(preview_image))[0]
            if result['aiclassify']['results']['unsafe'] is not None and result[
                    'aiclassify']['results']['unsafe'] > max_unsafe:
                max_unsafe = result['aiclassify']['results']['unsafe']
                min_safe = result['aiclassify']['results']['safe']
            if result['aiclassify']['is_nude']:
                num_is_nude += 1
                # if an image is unsafe: set the preview to this image
                preview_path = relative_path(preview_image,
                                             self.myconfig('casedir'))
        final_result = dict(classifier='NudeNet',
                            results=dict(safe=min_safe, unsafe=max_unsafe),
                            is_nude=None)
        if len(previews[0]['preview']) > 0:
            final_result['is_nude'] = (
                1.0 * num_is_nude / len(previews[0]['preview']) > float(
                    self.myconfig('threshold_preview')))
            if preview_path is None:
                # if no preview path is already set, get the image in the middle
                preview_path = previews[0]['preview'][int(
                    len(previews[0]['preview']) / 2)]
        return [
            dict(path=relpath, aiclassify=final_result, preview=preview_path)
        ]
def main(SUBREDDIT_NAMES):
    tempjpg = os.path.join(os.path.dirname(os.path.realpath(__file__)),
                           'temp.jpg')
    classifier = NudeClassifier()
    valid_extensions = ['.jpg', '.jpeg', '.bmp', '.png', '.tiff']
    SUBREDDIT_NAMES = SUBREDDIT_NAMES.replace(',', '+').replace(' ', '')
    while True:
        con = sqlite3.connect('log.db')
        cur = con.cursor()
        try:

            for submission in reddit.subreddit(
                    SUBREDDIT_NAMES).stream.submissions():

                gallery = []
                URL = submission.url
                #add .jpg to image link if its an imgur link
                if 'imgur.com' in URL:
                    URL += '.jpg'
                    gallery.append(URL)
                #get inidividual images from gallery
                elif 'reddit.com/gallery' in URL:
                    ids = [
                        i['media_id'] for i in submission.gallery_data['items']
                    ]
                    for i in ids:
                        try:
                            url = submission.media_metadata[i]['p'][0]['u']
                            url = url.split("?")[0].replace("preview", "i")
                            gallery.append(url)
                        except KeyError:
                            pass
                #normal image url
                else:
                    gallery.append(URL)

                for i in gallery:
                    isimage = False
                    if i.endswith(tuple(valid_extensions)):
                        isimage = True
                    if isimage == True:
                        try:
                            #save image as temp file
                            with urllib.request.urlopen(i) as url:
                                with open(tempjpg, 'wb') as f:
                                    f.write(url.read())
                                    f.close()
                        except Exception as err:
                            print(err)

                        prediction = classifier.classify(
                            tempjpg)[tempjpg]['unsafe']
                        #remove post if REMOVE_SUBMISSION is True
                        if prediction > NSFW_PROB_THRESHOLD:
                            #print("nsfw")
                            if LOGGING_ON:
                                cur.execute(
                                    "INSERT INTO logbook VALUES (?,?,?)",
                                    (submission.created_utc,
                                     str(submission.author),
                                     submission.permalink))
                                con.commit()
                            if not MOD_TEST:
                                submission.mod.nsfw()
                                if REMOVE_SUBMISSION:
                                    submission.mod.remove()
                                    submission.mod.send_removal_message(
                                        REMOVAL_MESSAGE)
                            #send mod mail to mod discussions for testing
                            else:
                                submission.subreddit.message(
                                    "NSFW image detected!",
                                    "post: " + submission.permalink + ' p = ' +
                                    str(prediction) +
                                    ', threshold is currently ' +
                                    str(NSFW_PROB_THRESHOLD))
                            break
                        else:
                            #print("notnsfw")
                            pass

        except Exception as err:
            con.close()
            print(err)

    con.close()
예제 #13
0
import pydload
import uuid
import json
import time
import requests
import logging

from flask import Flask, request
from flask_cors import CORS, cross_origin

from nudenet import NudeClassifier
classifier = NudeClassifier("models/classifier_model")
app = Flask(__name__)
cors = CORS(app)

# @app.route('/hello')
# @cross_origin()
# def hello():
#     return "Hello World!"


@app.route('/nudenet', methods=['GET', 'POST'])
def nudenet_classifier_from_url():
    if request.method == 'GET':
        url = request.args.get('url')
    elif request.method == 'POST':
        url = request.json['url']

    try:
        path = str(uuid.uuid4())
        dload_status = pydload.dload(url, path)
예제 #14
0
from nudenet import NudeClassifier

m = NudeClassifier()


def predictor(x, batch_size=2, extras=[]):
    preds = m.classify(x, batch_size=batch_size)
    return [preds.get(_) for _ in x]
예제 #15
0
import pyautogui
from nudenet import NudeClassifier
import time
classifier = NudeClassifier('models/classifier_model')

def main():
    # while True:
    # for i in range (10):
    #     print("taking screenshot")
    #     myScreenshot = pyautogui.screenshot()
    #     myScreenshot.save('imgs/test.png')

    #     print("starting classification")
    #     classifier = NudeClassifier('models/classifier_model')
    #     print(classifier.classify('imgs/test.png'))

    filepath = 'imgs/test.png'
    print("starting classification")
    classified_dict = classifier.classify('imgs/test.png')
    print(classified_dict)
    print(classified_dict[filepath]['unsafe'])

if __name__ == "__main__":
    main()
예제 #16
0
Options:
--tolerance=<tolerance>   Number of video frames to tolerate before issuing a warning.[default=4]
--debug                   Prints the inference logs.
"""
from collections import deque
import cv2

from docopt import docopt
from nudenet import NudeClassifier
from secureconnect import inference
from secureconnect.exceptions import UnsafeEnvironment


# load the model once for every inference.
mdl = NudeClassifier()


def is_safety_buffer_critical(queue, tolerance):
    """
    Exit program with an error.

    If all items in the queue are True. This means we
    have exhausted our tolerance for nsfw content and it
    is unsafe for continuing to use video-source.
    """
    unsafe_flags = [item for item in list(queue) if item]
    if unsafe_flags == tolerance:
        raise UnsafeEnvironment