Beispiel #1
0
def classifier(img, url):
    image_loader = 'yahoo'
    input_file = img
    input_type = 'tensor'
    model_weights = 'data/open_nsfw-weights.npy'

    model = OpenNsfwModel()

    with tf.Session() as sess:
        input_type = InputType[input_type.upper()]
        model.build(weights_path=model_weights, input_type=input_type)

        fn_load_image = None

        if input_type == InputType.TENSOR:
            if image_loader == IMAGE_LOADER_TENSORFLOW:
                fn_load_image = create_tensorflow_image_loader(sess)
            else:
                fn_load_image = create_yahoo_image_loader()
        elif input_type == InputType.BASE64_JPEG:
            import base64
            fn_load_image = lambda filename: np.array(
                [base64.urlsafe_b64encode(open(filename, "rb").read())])
            # fn_load_image = img

        sess.run(tf.global_variables_initializer())
        image = fn_load_image(input_file)
        predictions = sess.run(model.predictions,
                               feed_dict={model.input: image})
        sess.close()
    return {
        'url': url,
        'sfw': str(predictions[0][0]),
        'nsfw': str(predictions[0][1])
    }
def main(argv):
    global sess
    global model
    parser = argparse.ArgumentParser()

    parser.add_argument("-m",
                        "--model_weights",
                        required=True,
                        help="Path to trained model weights file")

    parser.add_argument("-t",
                        "--input_type",
                        default=InputType.TENSOR.name.lower(),
                        help="input type",
                        choices=[
                            InputType.TENSOR.name.lower(),
                            InputType.BASE64_JPEG.name.lower()
                        ])

    args = parser.parse_args()

    model = OpenNsfwModel()
    sess = tf.Session()
    if not (sess):
        exit(1)

    input_type = InputType[args.input_type.upper()]
    model.build(weights_path=args.model_weights, input_type=input_type)
    sess.run(tf.global_variables_initializer())
    print("Session  initialized. Running flask")
    app.run(debug=True, host='0.0.0.0')
def main(argv):
    parser = argparse.ArgumentParser()

    parser.add_argument("input_file",
                        help="Path to the input image.\
                        Only jpeg images are supported.")

    parser.add_argument("-m",
                        "--model_weights",
                        required=True,
                        help="Path to trained model weights file")

    parser.add_argument("-l",
                        "--image_loader",
                        default=IMAGE_LOADER_YAHOO,
                        help="image loading mechanism",
                        choices=[IMAGE_LOADER_YAHOO, IMAGE_LOADER_TENSORFLOW])

    parser.add_argument("-i",
                        "--input_type",
                        default=InputType.TENSOR.name.lower(),
                        help="input type",
                        choices=[
                            InputType.TENSOR.name.lower(),
                            InputType.BASE64_JPEG.name.lower()
                        ])

    args = parser.parse_args()

    model = OpenNsfwModel()

    with tf.Session() as sess:

        input_type = InputType[args.input_type.upper()]
        model.build(weights_path=args.model_weights, input_type=input_type)

        fn_load_image = None

        if input_type == InputType.TENSOR:
            if args.image_loader == IMAGE_LOADER_TENSORFLOW:
                fn_load_image = create_tensorflow_image_loader(
                    tf.Session(graph=tf.get_default_graph()))
            else:
                fn_load_image = create_yahoo_image_loader()
        elif input_type == InputType.BASE64_JPEG:
            import base64
            fn_load_image = lambda filename: np.array(
                [base64.urlsafe_b64encode(open(filename, "rb").read())])

        sess.run(tf.global_variables_initializer())

        image = fn_load_image(args.input_file)

        predictions = \
            sess.run(model.predictions,
                     feed_dict={model.input: image})

        print("Results for '{}'".format(args.input_file))
        print("\tSFW score:\t{}\n\tNSFW score:\t{}".format(*predictions[0]))
def main(argv):
    parser = argparse.ArgumentParser()

    parser.add_argument("input_file", help="Path to the input image.\
                        Only jpeg images are supported.")

    parser.add_argument("-m", "--model_weights", required=True,
                        help="Path to trained model weights file")

    parser.add_argument("-l", "--image_loader",
                        default=IMAGE_LOADER_YAHOO,
                        help="image loading mechanism",
                        choices=[IMAGE_LOADER_YAHOO, IMAGE_LOADER_TENSORFLOW])

    parser.add_argument("-i", "--input_type",
                        default=InputType.TENSOR.name.lower(),
                        help="input type",
                        choices=[InputType.TENSOR.name.lower(),
                                 InputType.BASE64_JPEG.name.lower()])

    args = parser.parse_args()

    model = OpenNsfwModel()

    with tf.Session() as sess:

        input_type = InputType[args.input_type.upper()]
        model.build(weights_path=args.model_weights, input_type=input_type)

        fn_load_image = None

        if input_type == InputType.TENSOR:
            if args.image_loader == IMAGE_LOADER_TENSORFLOW:
                fn_load_image = create_tensorflow_image_loader(tf.Session(graph=tf.Graph()))
            else:
                fn_load_image = create_yahoo_image_loader()
        elif input_type == InputType.BASE64_JPEG:
            import base64
            fn_load_image = lambda filename: np.array([base64.urlsafe_b64encode(open(filename, "rb").read())])

        sess.run(tf.global_variables_initializer())

        image = fn_load_image(args.input_file)

        predictions = \
            sess.run(model.predictions,
                     feed_dict={model.input: image})

        print("Results for '{}'".format(args.input_file))
        print("\tSFW score:\t{}\n\tNSFW score:\t{}".format(*predictions[0]))
Beispiel #5
0
def predict_nsfw_faster(image_path):

    print("predicting nsfw for the image: ", image_path)

    model = OpenNsfwModel()

    with tf.Session() as sess:

        itype = InputType.TENSOR.name.lower()
        image_loader = IMAGE_LOADER_TENSORFLOW

        input_type = InputType[itype.upper()]
        model.build(weights_path="open_nsfw-weights.npy", input_type=input_type)

        fn_load_image = None

        if input_type == InputType.TENSOR:
            if image_loader == IMAGE_LOADER_TENSORFLOW:
                fn_load_image = create_tensorflow_image_loader(tf.Session(graph=tf.Graph()))
            else:
                fn_load_image = create_yahoo_image_loader()
        elif input_type == InputType.BASE64_JPEG:
            import base64
            fn_load_image = lambda filename: np.array([base64.urlsafe_b64encode(open(filename, "rb").read())])

        sess.run(tf.global_variables_initializer())

        image = fn_load_image(image_path)

        predictions = \
            sess.run(model.predictions,
                     feed_dict={model.input: image})

        sfw_score = predictions[0][0]

        print("\tSFW score:\t{}".format(predictions[0][0]))
        print("\tNSFW score:\t{}".format(predictions[0][1]))

        if sfw_score > 0.94:
            return "sfw"
        else:
            return "nsfw"
Beispiel #6
0
 def get(self):
     model = OpenNsfwModel()
     fname = request.args.get('fn')
     
     with tf.Session() as sess:
       model.build(weights_path=model_weights, input_type=input_type)
       import base64
       fn_load_image = lambda filename: np.array([base64.urlsafe_b64encode(open(filename, "rb").read())])
       sess.run(tf.global_variables_initializer())
       image = fn_load_image(fname)
       predictions = \
           sess.run(model.predictions,
                    feed_dict={model.input: image})
       sfw, nsw = "{}:{}".format(*predictions[0]).split(":")
       scores = {
           "fileName" : fname,
           "sfw" : sfw,
           "nsw" : nsw
       }
       return jsonify(scores)
Beispiel #7
0
class nsfw:

    def __init__(self):
        """Cart bar detection class

        Arguments
        ---------
        frozen_path: str
            Path to .pb model. Default="carts_model/frozen_inference_graph.pb"
        gpu_memory_fraction: float
            Percentage of how much gpu should the model use. If too little model can fail to load. Default=0.5
        gpu_device: str
            Required to set env variable CUDA_VISIBLE_DEVICES so Tensorflow only uses desired gpu. Default='0'
        """ 

        """Limit visible GPUs"""
        os.environ["CUDA_VISIBLE_DEVICES"] = "0"

        self.model = OpenNsfwModel()
                
        self.model.build()

        self.sess = tf.Session()

        self.sess.run(tf.initialize_all_variables())


    def classify(self, image_str):

        img = decode_img_base64(image_str)
        cv2.imwrite("img.jpg", img)

        fn_load_image = create_tensorflow_image_loader(self.sess)

        image = fn_load_image("img.jpg")

        predictions = self.sess.run(self.model.predictions,feed_dict={self.model.input: image})

        # print("\tSFW score:\t{}\n\tNSFW score:\t{}".format(*predictions[0]))

        return predictions[0]
Beispiel #8
0
    def __init__(self,
        model_weights = '/home/citao/github/tensorflow-open_nsfw/data/open_nsfw-weights.npy',
        image_loader = 'IMAGE_LOADER_YAHOO',
        input_type = InputType.TENSOR.name.lower()
    ):
        self._sess = tf.Session()
        self._model = OpenNsfwModel()
        input_type = InputType[input_type.upper()]
        self._model.build(weights_path = model_weights,
                          input_type = input_type)
        
        self.fn_load_image = None
        if input_type == InputType.TENSOR:
            if image_loader == IMAGE_LOADER_TENSORFLOW:
                self.fn_load_image = create_tensorflow_image_loader(tf.Session(graph=tf.Graph()))
            else:
                self.fn_load_image = create_yahoo_image_loader()
        elif input_type == InputType.BASE64_JPEG:
            import base64
            self.fn_load_image = lambda filename: np.array([base64.urlsafe_b64encode(open(filename, "rb").read())])

        self._sess.run(tf.global_variables_initializer())
Beispiel #9
0
    def __init__(self):
        """Cart bar detection class

        Arguments
        ---------
        frozen_path: str
            Path to .pb model. Default="carts_model/frozen_inference_graph.pb"
        gpu_memory_fraction: float
            Percentage of how much gpu should the model use. If too little model can fail to load. Default=0.5
        gpu_device: str
            Required to set env variable CUDA_VISIBLE_DEVICES so Tensorflow only uses desired gpu. Default='0'
        """ 

        """Limit visible GPUs"""
        os.environ["CUDA_VISIBLE_DEVICES"] = "0"

        self.model = OpenNsfwModel()
                
        self.model.build()

        self.sess = tf.Session()

        self.sess.run(tf.initialize_all_variables())
def nsfw_main():
    os.environ["CUDA_VISIBLE_DEVICES"] = "-1"
    IMAGE_LOADER_TENSORFLOW = "tensorflow"
    class args:
        pass
    args.input_file = "girl.jpg"
    args.model_weights = "data/open_nsfw-weights.npy"
    args.image_loader = IMAGE_LOADER_TENSORFLOW
    args.input_type = InputType.TENSOR.name.lower()
    model = OpenNsfwModel()
    # This is important for reset graph
    tf.reset_default_graph()

    with tf.Session() as sess:

        input_type = InputType[args.input_type.upper()]
        model.build(weights_path=args.model_weights, input_type=input_type)

        fn_load_image = None

        if input_type == InputType.TENSOR:
            if args.image_loader == IMAGE_LOADER_TENSORFLOW:
                fn_load_image = create_tensorflow_image_loader(sess)
            else:
                fn_load_image = create_yahoo_image_loader()
        elif input_type == InputType.BASE64_JPEG:
            import base64
            fn_load_image = lambda filename: np.array([base64.urlsafe_b64encode(open(filename, "rb").read())])

        sess.run(tf.global_variables_initializer())
        image = fn_load_image(args.input_file)
        predictions = \
            sess.run(model.predictions,
                     feed_dict={model.input: image})

        print("Results for '{}'".format(args.input_file))
        print("\tSFW score:\t{}\n\tNSFW score:\t{}".format(*predictions[0]))
def main(argv):
    global sess
    global model
    parser = argparse.ArgumentParser()

    parser.add_argument("-m",
                        "--model_weights",
                        required=True,
                        help="Path to trained model weights file")

    parser.add_argument("-t",
                        "--input_type",
                        default=InputType.TENSOR.name.lower(),
                        help="input type",
                        choices=[
                            InputType.TENSOR.name.lower(),
                            InputType.BASE64_JPEG.name.lower()
                        ])

    parser.add_argument("-p", "--port", default=6000, help="port number")

    args = parser.parse_args()

    model = OpenNsfwModel()
    gpuConfig = tf.ConfigProto(
        gpu_options=tf.GPUOptions(per_process_gpu_memory_fraction=0.3),
        device_count={'GPU': 1})
    sess = tf.Session(config=gpuConfig)
    if not (sess):
        exit(1)

    input_type = InputType[args.input_type.upper()]
    model.build(weights_path=args.model_weights, input_type=input_type)
    sess.run(tf.global_variables_initializer())
    print("Session  initialized. Running flask")
    port = detect_port(int(args.port))
    app.run(debug=False, host='0.0.0.0', port=port)
Beispiel #12
0
class nsfw_detector():
    def __init__(self,
        model_weights = '/home/citao/github/tensorflow-open_nsfw/data/open_nsfw-weights.npy',
        image_loader = 'IMAGE_LOADER_YAHOO',
        input_type = InputType.TENSOR.name.lower()
    ):
        self._sess = tf.Session()
        self._model = OpenNsfwModel()
        input_type = InputType[input_type.upper()]
        self._model.build(weights_path = model_weights,
                          input_type = input_type)
        
        self.fn_load_image = None
        if input_type == InputType.TENSOR:
            if image_loader == IMAGE_LOADER_TENSORFLOW:
                self.fn_load_image = create_tensorflow_image_loader(tf.Session(graph=tf.Graph()))
            else:
                self.fn_load_image = create_yahoo_image_loader()
        elif input_type == InputType.BASE64_JPEG:
            import base64
            self.fn_load_image = lambda filename: np.array([base64.urlsafe_b64encode(open(filename, "rb").read())])

        self._sess.run(tf.global_variables_initializer())

    def predict(self, input_file):
        image = self.fn_load_image(input_file)
        predictions = self._sess.run(
            self._model.predictions,
            feed_dict = {self._model.input: image}
        )
        probs = predictions[0]
        result = {
            'SFW': probs[0],
            'NSFW': probs[1]
        }
        return result
Beispiel #13
0
def run():
    global BATCH_SIZE
    global testImg
    global LR
    global logs_train_dir
    prepare()
    images = []
    labels = []
    for line in testImg:
        images.append(line)
        filename = os.path.splitext(os.path.split(line)[1])[0]
        true_index = 0
        if filename.split('P**n')[0] == 'v':
            true_index = 1
        labels.append(true_index)

    images = tf.constant(images)
    labels = tf.constant(labels)
    images = tf.random_shuffle(images, seed=0)
    labels = tf.random_shuffle(labels, seed=0)
    data = tf.data.Dataset.from_tensor_slices((images, labels))

    data = data.shuffle(len(testImg))
    data = data.map(process, num_parallel_calls=16)
    data = data.prefetch(buffer_size=BATCH_SIZE * 8)
    batched_data = data.apply(
        tf.contrib.data.batch_and_drop_remainder(BATCH_SIZE)).repeat(
            40)  #num of epoch

    iterator = tf.data.Iterator.from_structure(batched_data.output_types,
                                               batched_data.output_shapes)
    init_op = iterator.make_initializer(batched_data)

    Y = tf.placeholder(tf.float32, shape=[
        BATCH_SIZE,
    ], name='Y')
    model = OpenNsfwModel()
    model.build(weights_path=None, batchsize=BATCH_SIZE, tag='')
    Y_pred = model.logits

    # sup1 = model.sup1
    # sup2 = model.sup2
    # sup3 = model.sup3
    # loss = supervision_loss(Y, Y_pred, sup1, sup2, sup3)
    loss = log_loss(Y, Y_pred)

    accuracy = evaluation(Y, Y_pred)
    train_op = trainning(loss, LR)

    config = tf.ConfigProto()
    config.gpu_options.per_process_gpu_memory_fraction = 0.6
    step = 0
    epoch = 0
    aver_acc = 0

    with tf.Session(config=config) as sess:

        saver = tf.train.Saver(max_to_keep=0)
        sess.run(tf.global_variables_initializer())
        sess.run(init_op)

        images, filenames, labels = iterator.get_next()
        ckpt = tf.train.get_checkpoint_state(logs_train_dir)
        if ckpt and ckpt.model_checkpoint_path:
            global_step = ckpt.model_checkpoint_path.split('/')[-1].split(
                '-')[-1]
            print('global_step', global_step)
            if global_step != '0':
                step = int(global_step) + 1
            saver.restore(sess, ckpt.model_checkpoint_path)
        record_loss = 0
        record_acc = 0
        while True:
            try:
                print('step', step)
                name, label_Y, input = sess.run([filenames, labels, images])
                get_loss, get_op, get_acc = sess.run(
                    [loss, train_op, accuracy],
                    feed_dict={
                        model.input: input,
                        Y: label_Y
                    })
                print('loss', get_loss, 'accuracy', get_acc)
                record_loss = record_loss + get_loss
                record_acc = record_acc + get_acc

                aver_acc = aver_acc + get_acc
                if (step + 1) % (len(testImg) / BATCH_SIZE) == 0:
                    epoch = epoch + 1
                    ###save record
                    record_loss = float(record_loss) / (len(testImg) /
                                                        BATCH_SIZE)
                    record_acc = float(record_acc) / (len(testImg) /
                                                      BATCH_SIZE)
                    update_csv(record_loss, record_acc, epoch)
                    ###
                    print('epoch', epoch)
                    print('train_learning_rate', LR)
                    checkpoint_path = os.path.join(logs_train_dir,
                                                   'model.ckpt')
                    saver.save(sess, checkpoint_path, global_step=step)
                step = step + 1
            except tf.errors.OutOfRangeError:
                sess.run(init_op)
                break
        print(float(aver_acc) / step)
            f.write(response.content)

# ### Png to Jpg Image

# In[ ]:

pngs = glob('./*.png')
for j in pngs:
    img = cv2.imread(j)
    cv2.imwrite((j[:-3] + 'jpg'), img)

# ### Model Load and Build

# In[ ]:

model = OpenNsfwModel()
model.build(weights_path="data/open_nsfw-weights.npy",
            input_type=InputType.BASE64_JPEG)

# In[ ]:

fn_load_image = None
fn_load_image = lambda filename: np.array(
    [base64.urlsafe_b64encode(open(filename, "rb").read())])

# In[ ]:

with tf.Session() as sess:
    jpgs = glob('./*.jpg')
    for j in range(len(jpgs)):
        if os.stat(jpgs[j]).st_size != 0:
import argparse
import tensorflow as tf
from model import OpenNsfwModel, InputType
from image_utils import create_yahoo_image_loader
from flask import Flask, request, jsonify

app = Flask(__name__)

@app.route('/classify', methods=['GET'])
def classify():
    filename = request.args["image_path"]
    image = create_yahoo_image_loader()(filename)
    predictions = sess.run(model.predictions, feed_dict={model.input: image})
    # print("\tSFW score:\t{}\n\tNSFW score:\t{}".format(*predictions[0]))

    predictions = predictions[0].tolist()
    return jsonify(dict(sfw=predictions[0], nsfw=predictions[1]))

if __name__ == '__main__':
    parser = argparse.ArgumentParser()
    parser.add_argument("-p", "--port", default=8082, help="server http port")
    args = parser.parse_args()

    model = OpenNsfwModel()

    with tf.compat.v1.Session() as sess:
        model.build(weights_path="data/open_nsfw-weights.npy", input_type=InputType["TENSOR"])
        sess.run(tf.compat.v1.global_variables_initializer())

        app.run(port=args.port)
def main(argv):
    parser = argparse.ArgumentParser()

    parser.add_argument(
        "path",
        help="Path to the input images. Only jpeg images are supported.")

    parser.add_argument("-b",
                        "--bucket",
                        required=True,
                        help="AWS S3 bucket name")

    parser.add_argument("-m",
                        "--model_weights",
                        required=True,
                        help="Path to trained model weights file")

    parser.add_argument("-o",
                        "--output",
                        required=True,
                        help="Path to output result file")

    parser.add_argument("-l",
                        "--image_loader",
                        default=IMAGE_LOADER_YAHOO,
                        help="image loading mechanism",
                        choices=[IMAGE_LOADER_YAHOO, IMAGE_LOADER_TENSORFLOW])

    parser.add_argument("-t",
                        "--input_type",
                        default=InputType.TENSOR.name.lower(),
                        help="input type",
                        choices=[
                            InputType.TENSOR.name.lower(),
                            InputType.BASE64_JPEG.name.lower()
                        ])

    args = parser.parse_args()

    s3 = boto3.client('s3')
    bucket_name = ''
    for bucket in s3.list_buckets().get('Buckets'):
        if bucket.get('Name') == args.bucket:
            bucket_name = bucket.get('Name')

    if not bucket_name:
        print("Bucket {} not available".format(args.bucket))
        exit(-1)

    images = []
    next_token = ''
    while True:
        if next_token:
            response = s3.list_objects_v2(Bucket=bucket_name,
                                          Delimiter='|',
                                          EncodingType='url',
                                          MaxKeys=1000,
                                          Prefix=args.path,
                                          ContinuationToken=next_token,
                                          FetchOwner=False)
        else:
            response = s3.list_objects_v2(Bucket=bucket_name,
                                          Delimiter='|',
                                          EncodingType='url',
                                          MaxKeys=1000,
                                          Prefix=args.path,
                                          FetchOwner=False)
        content = response.get('Contents')
        next_token = response.get('NextContinuationToken')
        for item in content:
            images.append(item.get('Key'))
        if not next_token:
            break
        print(next_token)
        # if len(images) > 100:
        #   break

    model = OpenNsfwModel()

    with tf.Session() as sess:

        input_type = InputType[args.input_type.upper()]
        model.build(weights_path=args.model_weights, input_type=input_type)

        fn_load_image = None

        if input_type == InputType.TENSOR:
            if args.image_loader == IMAGE_LOADER_TENSORFLOW:
                fn_load_image = create_tensorflow_image_loader(sess)
            else:
                fn_load_image = create_yahoo_image_loader()
        elif input_type == InputType.BASE64_JPEG:
            import base64
            fn_load_image = lambda filename: np.array(
                [base64.urlsafe_b64encode(open(filename, "rb").read())])

        sess.run(tf.global_variables_initializer())

        output = open(args.output, "a")

        for item in images:
            temp_file = tempfile.mkstemp()
            s3.download_file(bucket_name, item, temp_file[1])
            try:
                image = fn_load_image(temp_file[1])
            except IOError:
                print("Read Image Error")
                pass
            predictions = sess.run(model.predictions,
                                   feed_dict={model.input: image})
            output.write(
                "https://www.themebeta.com/media/cache/400x225/files/{}, {}\r\n"
                .format(item, predictions[0][0]))
            os.remove(temp_file[1])
            print(
                "Results for https://www.themebeta.com/media/cache/400x225/files/{} : {}"
                .format(item, predictions[0][0]))

        output.close()
def main(argv):
    parser = argparse.ArgumentParser()

    parser.add_argument("-s", "--source", required=True,
                        help="Folder containing the images to classify")

    parser.add_argument("-o", "--output_file", required=True,
                        help="Output file path")

    parser.add_argument("-m", "--model_weights", required=True,
                        help="Path to trained model weights file")

    parser.add_argument("-b", "--batch_size", help="Number of images to \
                        classify simultaneously.", type=int, default=64)

    parser.add_argument("-l", "--image_loader",
                        default=IMAGE_LOADER_YAHOO,
                        help="image loading mechanism",
                        choices=[IMAGE_LOADER_YAHOO, IMAGE_LOADER_TENSORFLOW])

    args = parser.parse_args()
    batch_size = args.batch_size
    output_file = args.output_file

    input_type = InputType.TENSOR
    model = OpenNsfwModel()

    filenames = glob.glob(args.source + "/*.jpg")
    num_files = len(filenames)

    num_batches = int(num_files / batch_size)

    print("Found", num_files, " files")
    print("Split into", num_batches, " batches")

    config = tf.ConfigProto()
    config.gpu_options.allow_growth = True

    batch_iterator = None

    if args.image_loader == IMAGE_LOADER_TENSORFLOW:
        batch_iterator = create_tf_batch_iterator(filenames, batch_size)
    else:
        fn_load_image = create_yahoo_image_loader(expand_dims=False)
        batch_iterator = create_batch_iterator(filenames, batch_size,
                                               fn_load_image)

    with tf.Session(graph=tf.Graph(), config=config) as session:
        model.build(weights_path=args.model_weights,
                    input_type=input_type)

        session.run(tf.global_variables_initializer())

        with tqdm(total=num_files) as progress_bar:
            with open(output_file, 'w') as o:
                o.write('File\tSFW Score\tNSFW Score\n')

                for batch_num, images in enumerate(batch_iterator):
                    predictions = \
                        session.run(model.predictions,
                                    feed_dict={model.input: images})

                    fi = (batch_num * batch_size)
                    for i, prediction in enumerate(predictions):
                        filename = os.path.basename(filenames[fi + i])
                        o.write('{}\t{}\t{}\n'.format(filename,
                                                      prediction[0],
                                                      prediction[1]))

                    progress_bar.update(len(images))
    parser.add_argument("-o", "--optimize", action='store_true',
                        default=False,
                        help="Optimize graph for inference")

    parser.add_argument("-f", "--freeze", action='store_true',
                        required=False, default=False,
                        help="Freeze graph: convert variables to ops")

    parser.add_argument("-t", "--text", action='store_true',
                        required=False, default=False,
                        help="Write graph as binary (.pb) or text (pbtext)")

    args = parser.parse_args()

    model = OpenNsfwModel()

    export_base_path = args.target
    do_freeze = args.freeze
    do_optimize = args.optimize
    as_binary =  not args.text
    input_type = InputType[args.input_type.upper()]

    input_node_name = 'input'
    output_node_name = 'predictions'

    base_name = 'open_nsfw'

    checkpoint_path = os.path.join(export_base_path, base_name + '.ckpt')

    if as_binary:
Beispiel #19
0
def main(argv):
    parser = argparse.ArgumentParser()

    parser.add_argument("input_file", help="Path to the input image.\
                        Only jpeg images are supported.")
    parser.add_argument("-m", "--model_weights", required=True,
                        help="Path to trained model weights file")

    parser.add_argument("-l", "--image_loader",
                        default=IMAGE_LOADER_YAHOO,
                        help="image loading mechanism",
                        choices=[IMAGE_LOADER_YAHOO, IMAGE_LOADER_TENSORFLOW])

    parser.add_argument("-t", "--input_type",
                        default=InputType.TENSOR.name.lower(),
                        help="input type",
                        choices=[InputType.TENSOR.name.lower(),
                                 InputType.BASE64_JPEG.name.lower()])

    args = parser.parse_args()

    model = OpenNsfwModel()

    with tf.Session() as sess:

        input_type = InputType[args.input_type.upper()]
        model.build(weights_path=args.model_weights, input_type=input_type)

        fn_load_image = None

        if input_type == InputType.TENSOR:
            if args.image_loader == IMAGE_LOADER_TENSORFLOW:
                fn_load_image = create_tensorflow_image_loader(sess)
            else:
                fn_load_image = create_yahoo_image_loader()
        elif input_type == InputType.BASE64_JPEG:
            import base64
            fn_load_image = lambda filename: np.array([base64.urlsafe_b64encode(open(filename, "rb").read())])

        sess.run(tf.global_variables_initializer())

        image = fn_load_image(args.input_file)

        predictions = \
            sess.run(model.predictions,
                     feed_dict={model.input: image})

        print("Results for '{}'".format(args.input_file))
        print(predictions[0][0])
        print(predictions[0][1])
        print("\tSFW score:\t{}\n\tNSFW score:\t{}".format(*predictions[0]))
        # SFW : Safe For Work , NSFW : Not Safe For Work 
        nude_json = {'SFW' : predictions[0][0] , 'NSFW' : predictions[0][1] }
        result= json.dumps(nude_json, cls=MyEncoder)
        loaded_json = json.loads(result)
        #for x in loaded_json:
        #  print("%s: %f" % (x, loaded_json[x]))
        print(loaded_json)
        f = open('data.txt', 'r+')
        f.truncate()
        with open('data.txt', 'w') as outfile:      
          json.dump(loaded_json, outfile) 

        return loaded_json  
Beispiel #20
0
def main(argv):
    parser = argparse.ArgumentParser()

    args = parser.parse_args()

    # args.input_file = "yyy-1.jpg"
    # args.input_file = "no-sexy.jpg"
    # args.input_file = "zzpic19597.jpg"
    args.input_file = "sexy.jpg"  # 输入图像

    print('[Info] 测试图像: {}'.format(args.input_file))
    args.image_loader = IMAGE_LOADER_YAHOO
    args.input_type = InputType.TENSOR.name.lower()
    args.model_weights = "data/open_nsfw-weights.npy"

    model = OpenNsfwModel()

    fn_load_image = None

    input_type = InputType[args.input_type.upper()]
    if input_type == InputType.TENSOR:
        if args.image_loader == IMAGE_LOADER_TENSORFLOW:
            fn_load_image = create_tensorflow_image_loader(tf.Session(graph=tf.Graph()))
        else:
            fn_load_image = create_yahoo_image_loader()
    elif input_type == InputType.BASE64_JPEG:
        fn_load_image = lambda filename: np.array([base64.urlsafe_b64encode(open(filename, "rb").read())])

    with tf.Session() as sess:
        model.build(weights_path=args.model_weights, input_type=input_type)
        sess.run(tf.global_variables_initializer())

        print('\n[Info] 原始版本')
        image = fn_load_image(args.input_file)  # 源图像处理格式
        model_predict(sess, model, image, args.input_file)  # 第2个版本

        print('\n[Info] 重写OpenCV版本')
        img_np = cv2.imread(args.input_file)
        img_np = cv2.cvtColor(img_np, cv2.COLOR_BGR2RGB)
        image_v2 = process_img_opencv(img_np)
        model_predict(sess, model, image_v2, args.input_file)  # 第2个版本

        # 存储模型的逻辑
        # print('\n[Info] 存储模型')
        # save_model(sess, model)

    print('\n[Info] base64模型版本')
    img_np = cv2.imread(args.input_file)
    img_np = cv2.cvtColor(img_np, cv2.COLOR_BGR2RGB)
    img_np = process_img_opencv(img_np)
    print('[Info] Img: {}'.format(img_np.shape))
    img_b64 = base64.urlsafe_b64encode(img_np)  # 转换base64
    img_tf = image_decode(img_b64)
    print('[Info] tf shape: {}'.format(img_tf.shape))
    img_np = tf.Session().run(img_tf)
    print('[Info] tf->np shape: {}'.format(img_np.shape))

    export_path = "data/model-tf"  # 模型文件

    with tf.Session(graph=tf.Graph()) as sess:
        tf.saved_model.loader.load(sess, ["serve"], export_path)
        graph = tf.get_default_graph()
        print(graph.get_operations())
        res = sess.run('predictions:0',
                       feed_dict={'input:0': img_np})
        print('[Info] 最终结果: {}'.format(res))

    print('[Info] 性感值: {}'.format(res[0][1] * 100.0))
Beispiel #21
0
def main(argv):
    parser = argparse.ArgumentParser()

    parser.add_argument("input_file",
                        help="Path to the input image.\
                        Only jpeg images are supported.")

    parser.add_argument("-m",
                        "--model_weights",
                        required=True,
                        help="Path to trained model weights file")

    parser.add_argument("-l",
                        "--image_loader",
                        default=IMAGE_LOADER_YAHOO,
                        help="image loading mechanism",
                        choices=[IMAGE_LOADER_YAHOO, IMAGE_LOADER_TENSORFLOW])

    parser.add_argument("-i",
                        "--input_type",
                        default=InputType.TENSOR.name.lower(),
                        help="input type")

    args = parser.parse_args()
    model = OpenNsfwModel()
    frameTotal = 0
    frameNsfw = 0
    with tf.compat.v1.Session() as sess:

        input_type = InputType[args.input_type.upper()]
        model.build(weights_path=args.model_weights, input_type=input_type)

        fn_load_image = None

        if input_type == InputType.TENSOR:
            if args.image_loader == IMAGE_LOADER_TENSORFLOW:
                fn_load_image = create_tensorflow_image_loader(
                    tf.Session(graph=tf.Graph()))
            else:
                fn_load_image = create_yahoo_image_loader()
        elif input_type == InputType.BASE64_JPEG:
            import base64
            fn_load_image = lambda filename: np.array(
                [base64.urlsafe_b64encode(open(filename, "rb").read())])

        sess.run(tf.compat.v1.global_variables_initializer())

        #image = fn_load_image(args.input_file)
        videoFile = args.input_file

        cap = cv2.VideoCapture(videoFile)
        frameRate = cap.get(5)  #frame rate
        while (cap.isOpened()):
            frameId = cap.get(1)  #current frame number
            ret, frame = cap.read()
            if (ret != True):
                break
            if (frameId % math.floor(frameRate) == 0):
                cv2.imwrite('./images/temp.jpg', frame)
                image = fn_load_image('./images/temp.jpg')
                frameTotal = frameTotal + 1

                predictions = \
                    sess.run(model.predictions,
                        feed_dict={model.input: image})
                if (predictions[0][1] >= 0.50):
                    frameNsfw = frameNsfw + 1


#print("\tSFW score:\t{}\n\tNSFW score:\t{}".format(*predictions[0]))

        cap.release()
        if (frameNsfw > 0):
            print("NSFW")
        else:
            print("SFW")
    print(str((frameNsfw / frameTotal) * 100))
Beispiel #22
0
# and initialize the FPS counter
print("[INFO] starting video stream ...")
vs = VideoStream(usePiCamera=True).start()
time.sleep(2.0)

# tensor flow loader options
IMAGE_LOADER_TENSORFLOW = "tensorflow"
IMAGE_LOADER_YAHOO = "yahoo"
MODEL = "open_nsfw-weights.npy"
INPUT_TYPE = "tensor"

image_loader = IMAGE_LOADER_YAHOO

print("[INFO] loading model ...")
now = time.time()
model = OpenNsfwModel()

start_time = time.time()
total_time = 0

sess = tf.Session()
input_type = InputType[INPUT_TYPE.upper()]
model.build(weights_path=MODEL, input_type=input_type)

fn_load_image = None

if input_type == InputType.TENSOR:
    if image_loader == IMAGE_LOADER_TENSORFLOW:
        fn_load_image = create_tensorflow_image_loader(sess)
    else:
        fn_load_image = create_yahoo_image_loader()
Beispiel #23
0
        fn_load_image = create_yahoo_image_loader()
    else:
        print("parameter error")
elif INPUT_TYPE == 'BASE64_JPEG':
    if FN_LOAD_IMAGE == IMAGE_LOADER_YAHOO:
        print("parameter mismatch")
    elif FN_LOAD_IMAGE == IMAGE_LOADER_TENSORFLOW:
        import base64
        fn_load_image = lambda filename: np.array(
            [base64.urlsafe_b64encode(open(filename, "rb").read())])
    else:
        print("parameter error")
else:
    print("parameter error")

model = OpenNsfwModel()
input_type = InputType[INPUT_TYPE]
model.build(weights_path="data/open_nsfw-weights.npy", input_type=input_type)
sess = tf.Session()


def process_start(conn, addr):
    logging.info("Connection address:" + str(addr))
    rec_d = bytes([])
    try:
        while True:
            data = conn.recv(BUFSIZ)
            if not data or len(data) == 0:
                break
            else:
                rec_d = rec_d + data
Beispiel #24
0
import argparse
import tensorflow as tf
import io
from model import OpenNsfwModel, InputType
import flask
from PIL import Image
import numpy as np
import skimage
import skimage.io
import os
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2'
import os
os.environ["CUDA_VISIBLE_DEVICES"] = "-1"

model_weights_path = 'data/open_nsfw-weights.npy'
model = OpenNsfwModel()

VGG_MEAN = [104, 117, 123]

img_width, img_height = 224, 224

app = flask.Flask(__name__)


# 将RGB按照BGR重新组装,然后对每一个RGB对应的值减去一定阈值
def prepare_image(image):
    H, W, _ = image.shape
    h, w = (img_width, img_height)

    h_off = max((H - h) // 2, 0)
    w_off = max((W - w) // 2, 0)
class NSFW:

    def __init__(self):
        self.fn_load_image = None
        self.sess = None
        self.model = OpenNsfwModel()
        self.data_path = None

    def session_run(self, source_type):
        with tf.compat.v1.Session() as self.sess:
            input_type = InputType[InputType.TENSOR.name.lower().upper()]
            self.model.build(
                weights_path='data/open_nsfw-weights.npy', input_type=input_type)

            if input_type == InputType.TENSOR:
                if IMAGE_LOADER_YAHOO == IMAGE_LOADER_TENSORFLOW:
                    self.fn_load_image = create_tensorflow_image_loader(
                        tf.Session(graph=tf.Graph()))
                else:
                    self.fn_load_image = create_yahoo_image_loader()
            elif input_type == InputType.BASE64_JPEG:
                import base64
                self.fn_load_image = lambda filename: np.array(
                    [base64.urlsafe_b64encode(open(filename, "rb").read())])

            self.sess.run(tf.compat.v1.global_variables_initializer())

            # ======
            data_txt = pd.read_table(
                self.data_path, sep='\t', header=None).values.tolist()
            data_result = []

            for d in data_txt:
                urls = get_urls(str(d[0]))
                # print('网站 = ', str(d[0]), '图片数量:', len(urls))
                if len(urls) > 0:
                    treu_sum = 0
                    for url in urls:
                        headers = {
                            'User-Agent': 'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/63.0.3239.132 Safari/537.36 QIHU 360SE'}
                        try:
                            res = requests.get(
                                url, headers=headers, timeout=20)
                        except:
                            continue
                        with open('img/text.jpg', 'wb') as f:
                            f.write(res.content)
                        f.close()

                        try:
                            image = self.fn_load_image('img/text.jpg')
                        except:
                            continue
                        predictions = self.sess.run(self.model.predictions, feed_dict={
                                                    self.model.input: image})
                        # print(float(predictions[0][1]), type(float(predictions[0][1])))
                        if float(predictions[0][1]) >= 0.8:
                            treu_sum += 1

                    if treu_sum / len(urls) >= 0.2:
                        data_result.append(str(d[0]))
                        sday = time.strftime(
                            '%Y-%m-%d', time.localtime(time.time()))
                        # print("INSERT INTO `pornographic_website_detection_zsf` (`sday`, `type`, `url`) VALUES ('" + sday + "', '"+source_type+"', '" + str(d[0]) + "' ,'" + str(d[1]) + "')")
                        # mysql_insert("INSERT INTO `pornographic_website_detection_zsf` (`sday`, `type`, `url`, `sum`) VALUES ('" +
                                        sday + "', '"+source_type+"', '" + str(d[0]) + "' ,'" + str(d[1]) + "')")
            
                    print('网站 = ', str(d[0]), '图片数量:', len(urls), '色情图片情况', treu_sum, len(urls) - treu_sum, '占比:', treu_sum / len(urls))

        return data_result
 def __init__(self):
     self.fn_load_image = None
     self.sess = None
     self.model = OpenNsfwModel()
     self.data_path = None
Beispiel #27
0
    parser.add_argument("target", help="output directory")

    parser.add_argument("-v",
                        "--export_version",
                        help="export model version",
                        default="1")

    parser.add_argument("-m",
                        "--model_weights",
                        required=True,
                        help="Path to trained model weights file")

    args = parser.parse_args()

    model = OpenNsfwModel()

    export_base_path = args.target
    export_version = args.export_version

    export_path = os.path.join(export_base_path, export_version)

    with tf.Session() as sess:
        model.build(weights_path=args.model_weights,
                    input_type=InputType.BASE64_JPEG)

        sess.run(tf.global_variables_initializer())

        builder = saved_model_builder.SavedModelBuilder(export_path)

        builder.add_meta_graph_and_variables(
def classify_nsfw_lambda(imgs):
    model = OpenNsfwModel()

    with tf.Session() as sess:

        def classity_nsfw(url):
            if '/a/' in url:
                return -1
            if not (url.endswith(".gif") or url.endswith(".jpg") or url.endswith.("jpeg") or url.endswith(".png")):
                return -1
            print("Downloading from '{}'".format(url))
            local_filename = url.split('/')[-1]
            try:
                r = requests.get(url, stream=True)
                with open(local_filename, 'wb') as f:
                    for chunk in r.iter_content(chunk_size=1024): 
                        if chunk: # filter out keep-alive new chunks
                            f.write(chunk)
                if '.gif' in local_filename:
                    print("gif found")
                    local_filename_gif = local_filename
                    local_filename = local_filename.replace('.gif', '.jpg')
                    print('calling convert {gif}[0] {jpg}'.format(gif=local_filename_gif, jpg=local_filename))
                    ret = subprocess.call('convert {gif}[0] {jpg}'.format(gif=local_filename_gif, jpg=local_filename), shell=True)
                    print('output: {}'.format(ret))
                elif '.png' in local_filename:
                    print("png found")
                    local_filename_png = local_filename
                    local_filename = local_filename.replace('.png', '.jpg')
                    print('convert {png} {jpg}'.format(png=local_filename_png, jpg=local_filename))
                    ret = subprocess.call('convert {png} {jpg}'.format(png=local_filename_png, jpg=local_filename), shell=True)
                    print('output: {}'.format(ret))
                image = fn_load_image(local_filename)
                predictions = \
                    sess.run(model.predictions,
                            feed_dict={model.input: image})
            
                print("Results for '{}'".format(local_filename))
                print("\tSFW score:\t{}\n\tNSFW score:\t{}".format(*predictions[0]))
                return predictions[0][1]
            except requests.exceptions.ConnectionError:
                print("Connection Err")
                return -1
            except tf.errors.InvalidArgumentError:
                print("Argument Err")
                return -1
            except (OSError, IOError) as e:
                print("FIle Not Found Err")
                return -1
            finally:
                dir_name = os.getcwd()
                test = os.listdir(dir_name)
                for item in test:
                    if item.endswith(".gif") or item.endswith(".jpg") or item.endswith(".png") or item.endswith("jpeg"):
                        os.remove(os.path.join(dir_name, item))
        model.build(weights_path='data/open_nsfw-weights.npy', input_type=InputType[InputType.TENSOR.name.upper()])

        fn_load_image = create_tensorflow_image_loader(sess)

        sess.run(tf.global_variables_initializer())

        outputObj = {'values': []}
        for img in imgs:
            p = classity_nsfw(img)
            outputObj['values'].append(p)
        return str(outputObj)
Beispiel #29
0
def main(argv):
    parser = argparse.ArgumentParser()

    parser.add_argument("input_file",
                        help="Path to the input image.\
                        Only jpeg images are supported.")
    parser.add_argument("-m",
                        "--model_weights",
                        required=True,
                        help="Path to trained model weights file")

    parser.add_argument("-l",
                        "--image_loader",
                        default=IMAGE_LOADER_YAHOO,
                        help="image loading mechanism",
                        choices=[IMAGE_LOADER_YAHOO, IMAGE_LOADER_TENSORFLOW])

    parser.add_argument("-t",
                        "--input_type",
                        default=InputType.TENSOR.name.lower(),
                        help="input type",
                        choices=[
                            InputType.TENSOR.name.lower(),
                            InputType.BASE64_JPEG.name.lower()
                        ])

    args = parser.parse_args()

    model = OpenNsfwModel()

    with tf.Session() as sess:

        input_type = InputType[args.input_type.upper()]
        model.build(weights_path=args.model_weights, input_type=input_type)

        fn_load_image = None

        if input_type == InputType.TENSOR:
            if args.image_loader == IMAGE_LOADER_TENSORFLOW:
                fn_load_image = create_tensorflow_image_loader(sess)
            else:
                fn_load_image = create_yahoo_image_loader()
        elif input_type == InputType.BASE64_JPEG:
            import base64
            fn_load_image = lambda filename: np.array(
                [base64.urlsafe_b64encode(open(filename, "rb").read())])

        sess.run(tf.global_variables_initializer())
        images = []
        images_names = []
        for i in os.listdir(args.input_file):
            images_names.append(i)
            image_path = os.path.join(args.input_file, i)
            image = fn_load_image(image_path)
            if images == []:
                images = image
                print(image_path)
            else:
                images = np.concatenate((images, image), axis=0)
        image = images

        predictions = \
            sess.run(model.predictions,
                     feed_dict={model.input: image})

        classify_to_folder(args, images_names, predictions)
Beispiel #30
0
        default=InputType.TENSOR.name.lower(),
        help=
        "Input type. Warning: base64_jpeg does not work with the standard TFLite runtime since a lot of operations are not supported",
        choices=[
            InputType.TENSOR.name.lower(),
            InputType.BASE64_JPEG.name.lower()
        ])

    parser.add_argument("-m",
                        "--model_weights",
                        required=True,
                        help="Path to trained model weights file")

    args = parser.parse_args()

    model = OpenNsfwModel()

    export_path = args.target
    input_type = InputType[args.input_type.upper()]

    with tf.Session() as sess:
        model.build(weights_path=args.model_weights, input_type=input_type)

        sess.run(tf.global_variables_initializer())

        converter = tf.contrib.lite.TFLiteConverter.from_session(
            sess, [model.input], [model.predictions])
        tflite_model = converter.convert()

        with open(export_path, "wb") as f:
            f.write(tflite_model)
def main(argv):
    parser = argparse.ArgumentParser()

    parser.add_argument("-s",
                        "--source",
                        required=True,
                        help="Folder containing the images to classify")

    parser.add_argument("-o",
                        "--output_file",
                        required=True,
                        help="Output file path")

    parser.add_argument("-m",
                        "--model_weights",
                        required=True,
                        help="Path to trained model weights file")

    parser.add_argument("-b",
                        "--batch_size",
                        help="Number of images to \
                        classify simultaneously.",
                        type=int,
                        default=64)

    parser.add_argument("-l",
                        "--image_loader",
                        default=IMAGE_LOADER_YAHOO,
                        help="image loading mechanism",
                        choices=[IMAGE_LOADER_YAHOO, IMAGE_LOADER_TENSORFLOW])

    args = parser.parse_args()
    batch_size = args.batch_size
    output_file = args.output_file

    input_type = InputType.TENSOR
    model = OpenNsfwModel()

    filenames = glob.glob(args.source + "/*.jpg")
    num_files = len(filenames)

    num_batches = int(num_files / batch_size)

    print("Found", num_files, " files")
    print("Split into", num_batches, " batches")

    config = tf.ConfigProto()
    config.gpu_options.allow_growth = True

    batch_iterator = None

    if args.image_loader == IMAGE_LOADER_TENSORFLOW:
        batch_iterator = create_tf_batch_iterator(filenames, batch_size)
    else:
        fn_load_image = create_yahoo_image_loader(expand_dims=False)
        batch_iterator = create_batch_iterator(filenames, batch_size,
                                               fn_load_image)

    with tf.Session(graph=tf.Graph(), config=config) as session:
        model.build(weights_path=args.model_weights, input_type=input_type)

        session.run(tf.global_variables_initializer())

        with tqdm(total=num_files) as progress_bar:
            with open(output_file, 'w') as o:
                o.write('File\tSFW Score\tNSFW Score\n')

                for batch_num, images in enumerate(batch_iterator):
                    predictions = \
                        session.run(model.predictions,
                                    feed_dict={model.input: images})

                    fi = (batch_num * batch_size)
                    for i, prediction in enumerate(predictions):
                        filename = os.path.basename(filenames[fi + i])
                        o.write('{}\t{}\t{}\n'.format(filename, prediction[0],
                                                      prediction[1]))

                    progress_bar.update(len(images))
Beispiel #32
0
def main(argv):
    # parse inputs
    parser = argparse.ArgumentParser()
    parser.add_argument("input_file", help="Path to the input video.")
    parser.add_argument("id_folder",
                        type=str,
                        nargs="+",
                        help="Folder containing ID folders")
    args = parser.parse_args()

    # initialize NSFW Model
    model = OpenNsfwModel()

    with tf.Graph().as_default():
        with tf.Session() as sess:

            # set variable defaults
            videoFile = args.input_file
            cap = cv2.VideoCapture(videoFile)
            frameRate = cap.get(5)  # get the frame rate
            totalFrameCount = cap.get(7)  # get the total number of frames
            img_size = 64
            margin = 0.4
            frameNsfw = 0
            isMinor = False
            minorDetected = False

            # set weights and initialize SFW model IsSFW
            with tf.variable_scope('IsSFW'):
                model.build(
                    weights_path="pretrained_models/open_nsfw-weights.npy")
                fn_load_image = None
                fn_load_image = create_yahoo_image_loader()
                sess.run(tf.global_variables_initializer())

            # initialize dlib face detector model and set variables
            detector = dlib.get_frontal_face_detector()
            model2 = WideResNet(img_size, 16, 8)()
            model2.load_weights("pretrained_models/weights.29-3.76_utk.hdf5")

            # initialize face identification model
            mtcnn = detect_and_align.create_mtcnn(sess, None)
            load_model("model/20170512-110547.pb")
            threshold = 1.0
            images_placeholder = tf.get_default_graph().get_tensor_by_name(
                "input:0")
            embeddings = tf.get_default_graph().get_tensor_by_name(
                "embeddings:0")
            phase_train_placeholder = tf.get_default_graph(
            ).get_tensor_by_name("phase_train:0")

            # Load anchor IDs for face identification model
            id_data = IdData(args.id_folder[0], mtcnn, sess, embeddings,
                             images_placeholder, phase_train_placeholder,
                             threshold)

            while (cap.isOpened()):
                ret, frame = cap.read()
                frameId = cap.get(1)  # get the current frame number
                if (ret !=
                        True):  # if there is no video frame detected then exit
                    break

                # write video frame to disk and load as an image
                cv2.imwrite('./temp_files/temp.jpg', frame)
                image = fn_load_image('./temp_files/temp.jpg')

                # determine SFW status
                predictions = sess.run(model.predictions,
                                       feed_dict={model.input: image})
                if (predictions[0][1] >= 0.50):
                    frameNsfw = frameNsfw + 1
                    display_lbl = "NSFW"
                    AlertColor = [0, 0, 255]
                else:
                    display_lbl = "SFW"
                    AlertColor = [255, 0, 0]

                # detect faces in dlib face detection model
                image2 = frame
                image2_h, image2_w, _ = np.shape(image2)
                detected = detector(image2, 0)
                faces = np.empty((len(detected), img_size, img_size, 3))
                if len(detected
                       ) > 0:  # one or more faces were found in the frame
                    for i, d in enumerate(detected):
                        # extract the coordinates of the face
                        x1, y1, x2, y2, w, h = d.left(), d.top(), d.right(
                        ) + 1, d.bottom() + 1, d.width(), d.height()
                        xw1 = max(int(x1 - margin * w), 0)
                        yw1 = max(int(y1 - margin * h), 0)
                        xw2 = min(int(x2 + margin * w), image2_w - 1)
                        yw2 = min(int(y2 + margin * h), image2_h - 1)
                        # draw a rectangle around the face
                        cv2.rectangle(image2, (x1, y1), (x2, y2), (255, 0, 0),
                                      2)
                        faces[i, :, :, :] = cv2.resize(
                            image2[yw1:yw2 + 1, xw1:xw2 + 1, :],
                            (img_size, img_size))
                        # determine the height of the rectangle in case is near top of frame
                        rectangle_height = y2 - y1

                    # predict ages and genders of faces using dlib model
                    results = model2.predict(faces)
                    predicted_genders = results[0]
                    ages = np.arange(0, 101).reshape(101, 1)
                    predicted_ages = results[1].dot(ages).flatten()

                    # draw predictions by faces using dlib model
                    for i, d in enumerate(detected):
                        isMinor = False
                        if (int(predicted_ages[i] < 18)
                            ):  # detect if a minor is present in the video
                            isMinor = True
                            minorDetected = True
                        label = "{},{},{}".format(
                            int(predicted_ages[i]),
                            "M" if predicted_genders[i][0] < 0.5 else "F",
                            "-MINOR" if isMinor else "")
                        draw_label(image2, (d.left(), d.top()), label,
                                   rectangle_height)

                # Locate faces and landmarks in frame for identification
                face_patches, padded_bounding_boxes, landmarks = detect_and_align.detect_faces(
                    frame, mtcnn)
                if len(face_patches) > 0:
                    face_patches = np.stack(face_patches)
                    feed_dict = {
                        images_placeholder: face_patches,
                        phase_train_placeholder: False
                    }
                    embs = sess.run(embeddings, feed_dict=feed_dict)
                    matching_ids, matching_distances = id_data.find_matching_ids(
                        embs)
                    for bb, landmark, matching_id, dist in zip(
                            padded_bounding_boxes, landmarks, matching_ids,
                            matching_distances):
                        font = cv2.FONT_HERSHEY_COMPLEX_SMALL
                        cv2.putText(frame, matching_id,
                                    (bb[0] + 30, bb[3] + 5), font, 1,
                                    (255, 0, 255), 1, cv2.LINE_AA)

                # display whether frame is SFW or not
                percentageComplete = round((frameId) / (totalFrameCount) * 100)
                display_lbl = display_lbl + " " + str(
                    percentageComplete) + "% fps= " + str(round(frameRate, 2))
                size = cv2.getTextSize(display_lbl, cv2.FONT_HERSHEY_SIMPLEX,
                                       0.4, 1)[0]
                cv2.rectangle(image2, (1, 15 - size[1]), (1 + size[0], 20),
                              AlertColor, cv2.FILLED)
                cv2.putText(image2,
                            display_lbl, (1, 19),
                            cv2.FONT_HERSHEY_SIMPLEX,
                            0.4, (255, 255, 255),
                            1,
                            lineType=cv2.LINE_AA)

                # display the frame as processed as quickly as possible
                cv2.imshow('frame2', image2)
                cv2.waitKey(1)

            # end of video
            cap.release()
            cv2.destroyAllWindows()
            if os.path.isfile('temp_files/temp.jpg'):
                os.remove("temp_files/temp.jpg")

        # print summary
        if totalFrameCount > 0:
            if (frameNsfw > 0):
                if (minorDetected):
                    print("This video contains minors, and " +
                          str(round((frameNsfw / totalFrameCount * 100), 1)) +
                          "% of the video contains NSFW elements.")
                else:
                    print(
                        str(round((frameNsfw / totalFrameCount * 100), 1)) +
                        "% of the video contains NSFW elements.")
            else:
                print("Video is SFW.")
        else:
            print(
                "No video frames were detected!  Please check the file type or file name."
            )
def main(argv):
    parser = argparse.ArgumentParser()

    parser.add_argument("input_file",
                        help="Path to the input image.\
                        Only jpeg images are supported.")

    parser.add_argument("-m",
                        "--model_weights",
                        required=True,
                        help="Path to trained model weights file")

    parser.add_argument("-cb", "--callback", default='', help="Callback Url")

    parser.add_argument("-l",
                        "--image_loader",
                        default=IMAGE_LOADER_YAHOO,
                        help="image loading mechanism",
                        choices=[IMAGE_LOADER_YAHOO, IMAGE_LOADER_TENSORFLOW])

    parser.add_argument("-i",
                        "--input_type",
                        default=InputType.TENSOR.name.lower(),
                        help="input type",
                        choices=[
                            InputType.TENSOR.name.lower(),
                            InputType.BASE64_JPEG.name.lower()
                        ])

    args = parser.parse_args()

    model = OpenNsfwModel()
    current_path = os.getcwd()
    dir_path = '%s/image_temp' % (current_path)
    if 'http' in args.input_file:
        image_file_path = utils.download(args.input_file, dir_path)
        logger.info("image download to: " + image_file_path)
    else:
        image_file_path = args.input_file

    if '.jpg' not in image_file_path:
        jpg_image_file_path = utils.convPNG2JPG(image_file_path)
        if False == jpg_image_file_path:
            logger.error('Conv Image Fail!' + image_file_path)
            exit(1)

        os.remove(image_file_path)
        image_file_path = jpg_image_file_path

    with tf.compat.v1.Session() as sess:

        input_type = InputType[args.input_type.upper()]
        model.build(weights_path=args.model_weights, input_type=input_type)

        fn_load_image = None

        if input_type == InputType.TENSOR:
            if args.image_loader == IMAGE_LOADER_TENSORFLOW:
                fn_load_image = create_tensorflow_image_loader(
                    tf.Session(graph=tf.Graph()))
            else:
                fn_load_image = create_yahoo_image_loader()
        elif input_type == InputType.BASE64_JPEG:
            import base64

            def fn_load_image(filename):
                return np.array(
                    [base64.urlsafe_b64encode(open(filename, "rb").read())])

        sess.run(tf.compat.v1.global_variables_initializer())

        image = fn_load_image(image_file_path)

        predictions = \
            sess.run(model.predictions,
                     feed_dict={model.input: image})

        logger.info("Results for '{}'".format(args.input_file))
        logger.info(
            "\tSFW score:\t{}\n\tNSFW score:\t{}".format(*predictions[0]))
        if '' != args.callback:
            param = {
                'sfw': str(predictions[0][0]),
                'nsfw': str(predictions[0][1])
            }
            ret = utils.get(args.callback, param)
            logger.info(ret)
    if 'http' in args.input_file:
        os.remove(image_file_path)