def session_run(self, source_type):
        with tf.compat.v1.Session() as self.sess:
            input_type = InputType[InputType.TENSOR.name.lower().upper()]
            self.model.build(
                weights_path='data/open_nsfw-weights.npy', input_type=input_type)

            if input_type == InputType.TENSOR:
                if IMAGE_LOADER_YAHOO == IMAGE_LOADER_TENSORFLOW:
                    self.fn_load_image = create_tensorflow_image_loader(
                        tf.Session(graph=tf.Graph()))
                else:
                    self.fn_load_image = create_yahoo_image_loader()
            elif input_type == InputType.BASE64_JPEG:
                import base64
                self.fn_load_image = lambda filename: np.array(
                    [base64.urlsafe_b64encode(open(filename, "rb").read())])

            self.sess.run(tf.compat.v1.global_variables_initializer())

            # ======
            data_txt = pd.read_table(
                self.data_path, sep='\t', header=None).values.tolist()
            data_result = []

            for d in data_txt:
                urls = get_urls(str(d[0]))
                # print('网站 = ', str(d[0]), '图片数量:', len(urls))
                if len(urls) > 0:
                    treu_sum = 0
                    for url in urls:
                        headers = {
                            'User-Agent': 'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/63.0.3239.132 Safari/537.36 QIHU 360SE'}
                        try:
                            res = requests.get(
                                url, headers=headers, timeout=20)
                        except:
                            continue
                        with open('img/text.jpg', 'wb') as f:
                            f.write(res.content)
                        f.close()

                        try:
                            image = self.fn_load_image('img/text.jpg')
                        except:
                            continue
                        predictions = self.sess.run(self.model.predictions, feed_dict={
                                                    self.model.input: image})
                        # print(float(predictions[0][1]), type(float(predictions[0][1])))
                        if float(predictions[0][1]) >= 0.8:
                            treu_sum += 1

                    if treu_sum / len(urls) >= 0.2:
                        data_result.append(str(d[0]))
                        sday = time.strftime(
                            '%Y-%m-%d', time.localtime(time.time()))
                        # print("INSERT INTO `pornographic_website_detection_zsf` (`sday`, `type`, `url`) VALUES ('" + sday + "', '"+source_type+"', '" + str(d[0]) + "' ,'" + str(d[1]) + "')")
                        # mysql_insert("INSERT INTO `pornographic_website_detection_zsf` (`sday`, `type`, `url`, `sum`) VALUES ('" +
                                        sday + "', '"+source_type+"', '" + str(d[0]) + "' ,'" + str(d[1]) + "')")
            
                    print('网站 = ', str(d[0]), '图片数量:', len(urls), '色情图片情况', treu_sum, len(urls) - treu_sum, '占比:', treu_sum / len(urls))
Пример #2
0
def classifier(img, url):
    image_loader = 'yahoo'
    input_file = img
    input_type = 'tensor'
    model_weights = 'data/open_nsfw-weights.npy'

    model = OpenNsfwModel()

    with tf.Session() as sess:
        input_type = InputType[input_type.upper()]
        model.build(weights_path=model_weights, input_type=input_type)

        fn_load_image = None

        if input_type == InputType.TENSOR:
            if image_loader == IMAGE_LOADER_TENSORFLOW:
                fn_load_image = create_tensorflow_image_loader(sess)
            else:
                fn_load_image = create_yahoo_image_loader()
        elif input_type == InputType.BASE64_JPEG:
            import base64
            fn_load_image = lambda filename: np.array(
                [base64.urlsafe_b64encode(open(filename, "rb").read())])
            # fn_load_image = img

        sess.run(tf.global_variables_initializer())
        image = fn_load_image(input_file)
        predictions = sess.run(model.predictions,
                               feed_dict={model.input: image})
        sess.close()
    return {
        'url': url,
        'sfw': str(predictions[0][0]),
        'nsfw': str(predictions[0][1])
    }
Пример #3
0
def main(argv):
    parser = argparse.ArgumentParser()

    parser.add_argument("input_file",
                        help="Path to the input image.\
                        Only jpeg images are supported.")

    parser.add_argument("-m",
                        "--model_weights",
                        required=True,
                        help="Path to trained model weights file")

    parser.add_argument("-l",
                        "--image_loader",
                        default=IMAGE_LOADER_YAHOO,
                        help="image loading mechanism",
                        choices=[IMAGE_LOADER_YAHOO, IMAGE_LOADER_TENSORFLOW])

    parser.add_argument("-i",
                        "--input_type",
                        default=InputType.TENSOR.name.lower(),
                        help="input type",
                        choices=[
                            InputType.TENSOR.name.lower(),
                            InputType.BASE64_JPEG.name.lower()
                        ])

    args = parser.parse_args()

    model = OpenNsfwModel()

    with tf.Session() as sess:

        input_type = InputType[args.input_type.upper()]
        model.build(weights_path=args.model_weights, input_type=input_type)

        fn_load_image = None

        if input_type == InputType.TENSOR:
            if args.image_loader == IMAGE_LOADER_TENSORFLOW:
                fn_load_image = create_tensorflow_image_loader(
                    tf.Session(graph=tf.get_default_graph()))
            else:
                fn_load_image = create_yahoo_image_loader()
        elif input_type == InputType.BASE64_JPEG:
            import base64
            fn_load_image = lambda filename: np.array(
                [base64.urlsafe_b64encode(open(filename, "rb").read())])

        sess.run(tf.global_variables_initializer())

        image = fn_load_image(args.input_file)

        predictions = \
            sess.run(model.predictions,
                     feed_dict={model.input: image})

        print("Results for '{}'".format(args.input_file))
        print("\tSFW score:\t{}\n\tNSFW score:\t{}".format(*predictions[0]))
def classify():
    filename = request.args["image_path"]
    image = create_yahoo_image_loader()(filename)
    predictions = sess.run(model.predictions, feed_dict={model.input: image})
    # print("\tSFW score:\t{}\n\tNSFW score:\t{}".format(*predictions[0]))

    predictions = predictions[0].tolist()
    return jsonify(dict(sfw=predictions[0], nsfw=predictions[1]))
Пример #5
0
def main(argv):
    parser = argparse.ArgumentParser()

    parser.add_argument("input_file", help="Path to the input image.\
                        Only jpeg images are supported.")

    parser.add_argument("-m", "--model_weights", required=True,
                        help="Path to trained model weights file")

    parser.add_argument("-l", "--image_loader",
                        default=IMAGE_LOADER_YAHOO,
                        help="image loading mechanism",
                        choices=[IMAGE_LOADER_YAHOO, IMAGE_LOADER_TENSORFLOW])

    parser.add_argument("-i", "--input_type",
                        default=InputType.TENSOR.name.lower(),
                        help="input type",
                        choices=[InputType.TENSOR.name.lower(),
                                 InputType.BASE64_JPEG.name.lower()])

    args = parser.parse_args()

    model = OpenNsfwModel()

    with tf.Session() as sess:

        input_type = InputType[args.input_type.upper()]
        model.build(weights_path=args.model_weights, input_type=input_type)

        fn_load_image = None

        if input_type == InputType.TENSOR:
            if args.image_loader == IMAGE_LOADER_TENSORFLOW:
                fn_load_image = create_tensorflow_image_loader(tf.Session(graph=tf.Graph()))
            else:
                fn_load_image = create_yahoo_image_loader()
        elif input_type == InputType.BASE64_JPEG:
            import base64
            fn_load_image = lambda filename: np.array([base64.urlsafe_b64encode(open(filename, "rb").read())])

        sess.run(tf.global_variables_initializer())

        image = fn_load_image(args.input_file)

        predictions = \
            sess.run(model.predictions,
                     feed_dict={model.input: image})

        print("Results for '{}'".format(args.input_file))
        print("\tSFW score:\t{}\n\tNSFW score:\t{}".format(*predictions[0]))
        def get_tf_image(image_path):
            if input_type == InputType.TENSOR:
                if image_loader == IMAGE_LOADER_TENSORFLOW:
                    fn_load_image = create_tensorflow_image_loader(
                        tf.Session(graph=tf.Graph()))
                else:
                    fn_load_image = create_yahoo_image_loader()
            elif input_type == InputType.BASE64_JPEG:
                import base64
                fn_load_image = lambda filename: np.array(
                    [base64.urlsafe_b64encode(open(image_path, "rb").read())])

            print("predicting nsfw for image {}".format(image_path))

            image = fn_load_image(image_path)
            return image
Пример #7
0
def predict_nsfw_faster(image_path):

    print("predicting nsfw for the image: ", image_path)

    model = OpenNsfwModel()

    with tf.Session() as sess:

        itype = InputType.TENSOR.name.lower()
        image_loader = IMAGE_LOADER_TENSORFLOW

        input_type = InputType[itype.upper()]
        model.build(weights_path="open_nsfw-weights.npy", input_type=input_type)

        fn_load_image = None

        if input_type == InputType.TENSOR:
            if image_loader == IMAGE_LOADER_TENSORFLOW:
                fn_load_image = create_tensorflow_image_loader(tf.Session(graph=tf.Graph()))
            else:
                fn_load_image = create_yahoo_image_loader()
        elif input_type == InputType.BASE64_JPEG:
            import base64
            fn_load_image = lambda filename: np.array([base64.urlsafe_b64encode(open(filename, "rb").read())])

        sess.run(tf.global_variables_initializer())

        image = fn_load_image(image_path)

        predictions = \
            sess.run(model.predictions,
                     feed_dict={model.input: image})

        sfw_score = predictions[0][0]

        print("\tSFW score:\t{}".format(predictions[0][0]))
        print("\tNSFW score:\t{}".format(predictions[0][1]))

        if sfw_score > 0.94:
            return "sfw"
        else:
            return "nsfw"
Пример #8
0
    def __init__(self,
        model_weights = '/home/citao/github/tensorflow-open_nsfw/data/open_nsfw-weights.npy',
        image_loader = 'IMAGE_LOADER_YAHOO',
        input_type = InputType.TENSOR.name.lower()
    ):
        self._sess = tf.Session()
        self._model = OpenNsfwModel()
        input_type = InputType[input_type.upper()]
        self._model.build(weights_path = model_weights,
                          input_type = input_type)
        
        self.fn_load_image = None
        if input_type == InputType.TENSOR:
            if image_loader == IMAGE_LOADER_TENSORFLOW:
                self.fn_load_image = create_tensorflow_image_loader(tf.Session(graph=tf.Graph()))
            else:
                self.fn_load_image = create_yahoo_image_loader()
        elif input_type == InputType.BASE64_JPEG:
            import base64
            self.fn_load_image = lambda filename: np.array([base64.urlsafe_b64encode(open(filename, "rb").read())])

        self._sess.run(tf.global_variables_initializer())
def nsfw_main():
    os.environ["CUDA_VISIBLE_DEVICES"] = "-1"
    IMAGE_LOADER_TENSORFLOW = "tensorflow"
    class args:
        pass
    args.input_file = "girl.jpg"
    args.model_weights = "data/open_nsfw-weights.npy"
    args.image_loader = IMAGE_LOADER_TENSORFLOW
    args.input_type = InputType.TENSOR.name.lower()
    model = OpenNsfwModel()
    # This is important for reset graph
    tf.reset_default_graph()

    with tf.Session() as sess:

        input_type = InputType[args.input_type.upper()]
        model.build(weights_path=args.model_weights, input_type=input_type)

        fn_load_image = None

        if input_type == InputType.TENSOR:
            if args.image_loader == IMAGE_LOADER_TENSORFLOW:
                fn_load_image = create_tensorflow_image_loader(sess)
            else:
                fn_load_image = create_yahoo_image_loader()
        elif input_type == InputType.BASE64_JPEG:
            import base64
            fn_load_image = lambda filename: np.array([base64.urlsafe_b64encode(open(filename, "rb").read())])

        sess.run(tf.global_variables_initializer())
        image = fn_load_image(args.input_file)
        predictions = \
            sess.run(model.predictions,
                     feed_dict={model.input: image})

        print("Results for '{}'".format(args.input_file))
        print("\tSFW score:\t{}\n\tNSFW score:\t{}".format(*predictions[0]))
def main(argv):
    parser = argparse.ArgumentParser()

    parser.add_argument("input_file",
                        help="Path to the input image.\
                        Only jpeg images are supported.")

    parser.add_argument("-m",
                        "--model_weights",
                        required=True,
                        help="Path to trained model weights file")

    parser.add_argument("-cb", "--callback", default='', help="Callback Url")

    parser.add_argument("-l",
                        "--image_loader",
                        default=IMAGE_LOADER_YAHOO,
                        help="image loading mechanism",
                        choices=[IMAGE_LOADER_YAHOO, IMAGE_LOADER_TENSORFLOW])

    parser.add_argument("-i",
                        "--input_type",
                        default=InputType.TENSOR.name.lower(),
                        help="input type",
                        choices=[
                            InputType.TENSOR.name.lower(),
                            InputType.BASE64_JPEG.name.lower()
                        ])

    args = parser.parse_args()

    model = OpenNsfwModel()
    current_path = os.getcwd()
    dir_path = '%s/image_temp' % (current_path)
    if 'http' in args.input_file:
        image_file_path = utils.download(args.input_file, dir_path)
        logger.info("image download to: " + image_file_path)
    else:
        image_file_path = args.input_file

    if '.jpg' not in image_file_path:
        jpg_image_file_path = utils.convPNG2JPG(image_file_path)
        if False == jpg_image_file_path:
            logger.error('Conv Image Fail!' + image_file_path)
            exit(1)

        os.remove(image_file_path)
        image_file_path = jpg_image_file_path

    with tf.compat.v1.Session() as sess:

        input_type = InputType[args.input_type.upper()]
        model.build(weights_path=args.model_weights, input_type=input_type)

        fn_load_image = None

        if input_type == InputType.TENSOR:
            if args.image_loader == IMAGE_LOADER_TENSORFLOW:
                fn_load_image = create_tensorflow_image_loader(
                    tf.Session(graph=tf.Graph()))
            else:
                fn_load_image = create_yahoo_image_loader()
        elif input_type == InputType.BASE64_JPEG:
            import base64

            def fn_load_image(filename):
                return np.array(
                    [base64.urlsafe_b64encode(open(filename, "rb").read())])

        sess.run(tf.compat.v1.global_variables_initializer())

        image = fn_load_image(image_file_path)

        predictions = \
            sess.run(model.predictions,
                     feed_dict={model.input: image})

        logger.info("Results for '{}'".format(args.input_file))
        logger.info(
            "\tSFW score:\t{}\n\tNSFW score:\t{}".format(*predictions[0]))
        if '' != args.callback:
            param = {
                'sfw': str(predictions[0][0]),
                'nsfw': str(predictions[0][1])
            }
            ret = utils.get(args.callback, param)
            logger.info(ret)
    if 'http' in args.input_file:
        os.remove(image_file_path)
def main(argv):
    parser = argparse.ArgumentParser()

    parser.add_argument("-s",
                        "--source",
                        required=True,
                        help="Folder containing the images to classify")

    parser.add_argument("-o",
                        "--output_file",
                        required=True,
                        help="Output file path")

    parser.add_argument("-m",
                        "--model_weights",
                        required=True,
                        help="Path to trained model weights file")

    parser.add_argument("-b",
                        "--batch_size",
                        help="Number of images to \
                        classify simultaneously.",
                        type=int,
                        default=64)

    parser.add_argument("-l",
                        "--image_loader",
                        default=IMAGE_LOADER_YAHOO,
                        help="image loading mechanism",
                        choices=[IMAGE_LOADER_YAHOO, IMAGE_LOADER_TENSORFLOW])

    args = parser.parse_args()
    batch_size = args.batch_size
    output_file = args.output_file

    input_type = InputType.TENSOR
    model = OpenNsfwModel()

    filenames = glob.glob(args.source + "/*.jpg")
    num_files = len(filenames)

    num_batches = int(num_files / batch_size)

    print("Found", num_files, " files")
    print("Split into", num_batches, " batches")

    config = tf.ConfigProto()
    config.gpu_options.allow_growth = True

    batch_iterator = None

    if args.image_loader == IMAGE_LOADER_TENSORFLOW:
        batch_iterator = create_tf_batch_iterator(filenames, batch_size)
    else:
        fn_load_image = create_yahoo_image_loader(expand_dims=False)
        batch_iterator = create_batch_iterator(filenames, batch_size,
                                               fn_load_image)

    with tf.Session(graph=tf.Graph(), config=config) as session:
        model.build(weights_path=args.model_weights, input_type=input_type)

        session.run(tf.global_variables_initializer())

        with tqdm(total=num_files) as progress_bar:
            with open(output_file, 'w') as o:
                o.write('File\tSFW Score\tNSFW Score\n')

                for batch_num, images in enumerate(batch_iterator):
                    predictions = \
                        session.run(model.predictions,
                                    feed_dict={model.input: images})

                    fi = (batch_num * batch_size)
                    for i, prediction in enumerate(predictions):
                        filename = os.path.basename(filenames[fi + i])
                        o.write('{}\t{}\t{}\n'.format(filename, prediction[0],
                                                      prediction[1]))

                    progress_bar.update(len(images))
Пример #12
0
def main(argv):
    parser = argparse.ArgumentParser()

    parser.add_argument("input_file",
                        help="Path to the input image.\
                        Only jpeg images are supported.")
    parser.add_argument("-m",
                        "--model_weights",
                        required=True,
                        help="Path to trained model weights file")

    parser.add_argument("-l",
                        "--image_loader",
                        default=IMAGE_LOADER_YAHOO,
                        help="image loading mechanism",
                        choices=[IMAGE_LOADER_YAHOO, IMAGE_LOADER_TENSORFLOW])

    parser.add_argument("-t",
                        "--input_type",
                        default=InputType.TENSOR.name.lower(),
                        help="input type",
                        choices=[
                            InputType.TENSOR.name.lower(),
                            InputType.BASE64_JPEG.name.lower()
                        ])

    args = parser.parse_args()

    model = OpenNsfwModel()

    with tf.Session() as sess:

        input_type = InputType[args.input_type.upper()]
        model.build(weights_path=args.model_weights, input_type=input_type)

        fn_load_image = None

        if input_type == InputType.TENSOR:
            if args.image_loader == IMAGE_LOADER_TENSORFLOW:
                fn_load_image = create_tensorflow_image_loader(sess)
            else:
                fn_load_image = create_yahoo_image_loader()
        elif input_type == InputType.BASE64_JPEG:
            import base64
            fn_load_image = lambda filename: np.array(
                [base64.urlsafe_b64encode(open(filename, "rb").read())])

        sess.run(tf.global_variables_initializer())
        images = []
        images_names = []
        for i in os.listdir(args.input_file):
            images_names.append(i)
            image_path = os.path.join(args.input_file, i)
            image = fn_load_image(image_path)
            if images == []:
                images = image
                print(image_path)
            else:
                images = np.concatenate((images, image), axis=0)
        image = images

        predictions = \
            sess.run(model.predictions,
                     feed_dict={model.input: image})

        classify_to_folder(args, images_names, predictions)
def main(argv):
    parser = argparse.ArgumentParser()

    parser.add_argument("-s",
                        "--source",
                        required=True,
                        help="Folder containing the images to classify")

    parser.add_argument("-o",
                        "--output_file",
                        required=True,
                        help="Output file path")

    parser.add_argument("-m",
                        "--frozen_graph_file",
                        required=True,
                        help="The frozen graph def file")

    parser.add_argument("-b",
                        "--batch_size",
                        help="Number of images to \
                        classify simultaneously.",
                        type=int,
                        default=64)

    args = parser.parse_args()
    batch_size = args.batch_size
    output_file = args.output_file

    filenames = glob.glob(args.source + "/*.jpg")
    num_files = len(filenames)

    num_batches = int(num_files / batch_size)

    print("Found", num_files, " files")
    print("Split into", num_batches, " batches")

    config = tf.ConfigProto()
    config.gpu_options.allow_growth = True

    fn_load_image = create_yahoo_image_loader(expand_dims=False)
    batch_iterator = create_batch_iterator(filenames, batch_size,
                                           fn_load_image)

    trt_graph = get_frozen_graph(args.frozen_graph_file)
    # Create session and load graph
    tf_config = tf.ConfigProto()
    tf_config.gpu_options.allow_growth = True
    tf_sess = tf.Session(config=tf_config)
    tf.import_graph_def(trt_graph, name='')

    input_node_name = 'input'
    output_node_name = 'predictions'

    # input and output tensor names.
    input_tensor_name = input_node_name + ":0"
    output_tensor_name = output_node_name + ":0"

    print("input_tensor_name: {}\noutput_tensor_name: {}\n".format(
        input_tensor_name, output_tensor_name))

    output_tensor = tf_sess.graph.get_tensor_by_name(output_tensor_name)

    with tqdm(total=num_files) as progress_bar:
        with open(output_file, 'w') as o:
            o.write('File\tSFW Score\tNSFW Score\n')

            for batch_num, images in enumerate(batch_iterator):
                predictions = \
                    tf_sess.run(output_tensor,
                                feed_dict={input_tensor_name: images})

                fi = (batch_num * batch_size)
                for i, prediction in enumerate(predictions):
                    filename = os.path.basename(filenames[fi + i])
                    o.write('{}\t{}\t{}\n'.format(filename, prediction[0],
                                                  prediction[1]))

                progress_bar.update(len(images))
Пример #14
0
def main(argv):
    # parse input
    parser = argparse.ArgumentParser()
    parser.add_argument("input_file",
                        help="Path to the input video.\
                        Only mp4 is supported.")
    args = parser.parse_args()

    # initialize NSFW Model
    model = OpenNsfwModel()

    # initialize variables
    frameTotal = 0
    frameNsfw = 0
    frame_skip = 0

    # load video (argument)
    videoFile = args.input_file
    st = args.input_file
    st = st[:-4]
    outfilename = st + "_Clean.avi"

    with tf.Session() as sess:

        # set weights and initialize SFW model
        model.build(weights_path="pretrained_models/open_nsfw-weights.npy")
        fn_load_image = None
        fn_load_image = create_yahoo_image_loader()
        sess.run(tf.global_variables_initializer())

        cap = cv2.VideoCapture(videoFile)
        frameRate = cap.get(5)  #frame rate
        ret, frame = cap.read()
        height, width, nchannels = frame.shape
        fourcc = cv2.VideoWriter_fourcc(*'MJPG')
        out = cv2.VideoWriter(outfilename, fourcc, math.floor(frameRate),
                              (width, height))
        global flag
        global frame_skip
        while (True):
            ret, frame = cap.read()
            if (ret != True):  # if there is no video frame detected then exit
                break

            else:
                cv2.imwrite('./temp_files/temp.jpg', frame)
                image = fn_load_image('./temp_files/temp.jpg')
                frameTotal = frameTotal + 1
                predictions = sess.run(model.predictions,
                                       feed_dict={model.input: image})
                if (predictions[0][1] <= 0.50):
                    out.write(frame)
                else:
                    frameNsfw = frameNsfw + 1


# print summary after program runs
        if (frameNsfw > 0):
            print("Video contained NSFW content.")
        else:
            print("Video is SFW.")
        print((frameNsfw / frameTotal) * 100)
        cap.release()
        out.release()

    print("Done")
Пример #15
0
def extract_nsfw_features(labeled_image_root_dir,
                          image_input_type,
                          image_loader_type,
                          model_dir,
                          has_supplement=False,
                          phase='train',
                          return_image_files=False):
    # load train data set
    with utils.timer('Load image files'):
        if (phase == 'train'):
            image_files, labels = data_utils.load_files(
                labeled_image_root_dir, train_data_source, sample_rate)
        else:
            image_files, labels = data_utils.load_files(
                labeled_image_root_dir, test_data_source, sample_rate)
        if (has_supplement):
            for part in [
                    'test_0819_part1_1', 'test_0819_part1_2',
                    'test_0819_part1_3', 'test_0819_part3_1'
            ]:
                supplement_dir = '{}/{}'.format(
                    '/'.join(labeled_image_root_dir.split('/')[:-1]), part)
                image_files_supplement, labels_supplement = data_utils.load_files(
                    supplement_dir, 'test_0819_part1', sample_rate)
                print('before supplement %s' % len(image_files))
                image_files = np.concatenate(
                    [image_files, image_files_supplement], axis=0)
                print('after supplement %s' % len(image_files))
                labels = np.concatenate([labels, labels_supplement], axis=0)
        print('image files %s' % len(image_files))

    X_train = []
    y_train = []
    # transform original image into nsfw features
    with tf.Session(graph=tf.Graph()) as sess:

        input_type = InputType[image_input_type.upper()]

        # function of loading image
        fn_load_image = None
        if input_type == InputType.TENSOR:
            if image_loader_type == IMAGE_LOADER_TENSORFLOW:
                fn_load_image = create_tensorflow_image_loader(sess)
            else:
                fn_load_image = create_yahoo_image_loader()
        elif input_type == InputType.BASE64_JPEG:
            fn_load_image = lambda filename: np.array(
                [base64.urlsafe_b64encode(open(filename, "rb").read())])

        # load model
        with utils.timer('Load model'):
            tf.saved_model.loader.load(sess, ["serve"], model_dir)
            graph = tf.get_default_graph()
            # extract tensor from graph
            input_image = graph.get_tensor_by_name("input:0")
            projected_features = graph.get_tensor_by_name('nsfw_features:0')
            predict_proba = graph.get_tensor_by_name("predictions:0")

        nsfw_batch_size = 512
        # extract projection features
        with utils.timer('Projection with batching'):
            start = 0
            end = start + nsfw_batch_size
            while (start < len(image_files)):
                if (end > len(image_files)):
                    end = len(image_files)
                with utils.timer('batch(%s) prediction' % nsfw_batch_size):
                    batch_images = np.array([
                        fn_load_image(image_files[i]).tolist()
                        for i in range(start, end)
                    ])
                    X_train.extend(
                        sess.run(projected_features,
                                 feed_dict={
                                     input_image: batch_images
                                 }).tolist())
                    y_train.extend(labels[start:end])
                print('projection %s done.' % end)
                start = end
                end = start + nsfw_batch_size
                del batch_images
                gc.collect()
    sess.close()

    # sanity check
    assert len(y_train) == len(labels)

    if (return_image_files == True):
        return np.array(X_train), np.array(y_train), image_files
    else:
        return np.array(X_train), np.array(y_train)
def main(argv):
    parser = argparse.ArgumentParser()

    parser.add_argument("input_file",
                        default="urls.txt",
                        help="Path to the input image.\
                        Only jpeg images are supported.")
    parser.add_argument("-m",
                        "--model_weights",
                        required=True,
                        help="Path to trained model weights file")

    parser.add_argument("-l",
                        "--image_loader",
                        default=IMAGE_LOADER_YAHOO,
                        help="image loading mechanism",
                        choices=[IMAGE_LOADER_YAHOO, IMAGE_LOADER_TENSORFLOW])

    parser.add_argument("-t",
                        "--input_type",
                        default=InputType.TENSOR.name.lower(),
                        help="input type",
                        choices=[
                            InputType.TENSOR.name.lower(),
                            InputType.BASE64_JPEG.name.lower()
                        ])

    args = parser.parse_args()

    model = OpenNsfwModel()

    with tf.Session() as sess:

        input_type = InputType[args.input_type.upper()]
        model.build(weights_path=args.model_weights, input_type=input_type)

        fn_load_image = None

        if input_type == InputType.TENSOR:
            if args.image_loader == IMAGE_LOADER_TENSORFLOW:
                fn_load_image = create_tensorflow_image_loader(sess)
            else:
                fn_load_image = create_yahoo_image_loader()
        elif input_type == InputType.BASE64_JPEG:
            import base64
            fn_load_image = lambda filename: np.array(
                [base64.urlsafe_b64encode(open(filename, "rb").read())])

        sess.run(tf.global_variables_initializer())
        urls_gen = faked_call_4_urls()
        for urls in urls_gen:
            for url in urls:
                md5 = hashlib.md5(url.encode('utf-8')).hexdigest()
                name = path + "/" + md5 + '.jpg'
                if os.path.exists(name) == False:
                    print("begin to download imgurl=", url)
                    name = safe_download("temp_image", url)
                else:
                    print("exist file name=" + name)
                image = read_frame_from_video(name)
                print("need to detect image size ", len(image))
                predictions = sess.run(model.predictions,
                                       feed_dict={model.input: image})
                write_to_file(url + "\t" + str(max_prob(predictions)))
                print(url + "\t" + str(max_prob(predictions)))
                if os.path.exists(name) == True:
                    os.remove(name)
                    print("delete file name=" + name)
Пример #17
0
def main(argv):
    parser = argparse.ArgumentParser()

    parser.add_argument("input_file",
                        help="Path to the input image.\
                        Only jpeg images are supported.")

    parser.add_argument("-m",
                        "--model_weights",
                        required=True,
                        help="Path to trained model weights file")

    parser.add_argument("-l",
                        "--image_loader",
                        default=IMAGE_LOADER_YAHOO,
                        help="image loading mechanism",
                        choices=[IMAGE_LOADER_YAHOO, IMAGE_LOADER_TENSORFLOW])

    parser.add_argument("-i",
                        "--input_type",
                        default=InputType.TENSOR.name.lower(),
                        help="input type")

    args = parser.parse_args()
    model = OpenNsfwModel()
    frameTotal = 0
    frameNsfw = 0
    with tf.compat.v1.Session() as sess:

        input_type = InputType[args.input_type.upper()]
        model.build(weights_path=args.model_weights, input_type=input_type)

        fn_load_image = None

        if input_type == InputType.TENSOR:
            if args.image_loader == IMAGE_LOADER_TENSORFLOW:
                fn_load_image = create_tensorflow_image_loader(
                    tf.Session(graph=tf.Graph()))
            else:
                fn_load_image = create_yahoo_image_loader()
        elif input_type == InputType.BASE64_JPEG:
            import base64
            fn_load_image = lambda filename: np.array(
                [base64.urlsafe_b64encode(open(filename, "rb").read())])

        sess.run(tf.compat.v1.global_variables_initializer())

        #image = fn_load_image(args.input_file)
        videoFile = args.input_file

        cap = cv2.VideoCapture(videoFile)
        frameRate = cap.get(5)  #frame rate
        while (cap.isOpened()):
            frameId = cap.get(1)  #current frame number
            ret, frame = cap.read()
            if (ret != True):
                break
            if (frameId % math.floor(frameRate) == 0):
                cv2.imwrite('./images/temp.jpg', frame)
                image = fn_load_image('./images/temp.jpg')
                frameTotal = frameTotal + 1

                predictions = \
                    sess.run(model.predictions,
                        feed_dict={model.input: image})
                if (predictions[0][1] >= 0.50):
                    frameNsfw = frameNsfw + 1


#print("\tSFW score:\t{}\n\tNSFW score:\t{}".format(*predictions[0]))

        cap.release()
        if (frameNsfw > 0):
            print("NSFW")
        else:
            print("SFW")
    print(str((frameNsfw / frameTotal) * 100))
    parser.add_argument("-t", "--target", required=True,
                        choices=['ml-engine', 'tf-serving'],
                        help="Create json request for ml-engine or tensorflow-serving")

    args = parser.parse_args()
    target = args.target

    input_type = InputType[args.input_type.upper()]

    image_data = None

    if input_type == InputType.TENSOR:
        fn_load_image = None

        if args.image_loader == IMAGE_LOADER_TENSORFLOW:
            with tf.Session() as sess:
                fn_load_image = create_tensorflow_image_loader(sess)
                sess.run(tf.global_variables_initializer())
                image_data = fn_load_image(args.input_file)[0]
        else:
            image_data = create_yahoo_image_loader(tf.Session(graph=tf.Graph()))(args.input_file)[0]
    elif input_type == InputType.BASE64_JPEG:
        import base64
        image_data = base64.urlsafe_b64encode(open(args.input_file, "rb").read()).decode("ascii")

    if target == "ml-engine":
        print(json.dumps({PREDICT_INPUTS: image_data}, cls=NumpyEncoder))
    elif target == "tf-serving":
        print(json.dumps({"instances": [image_data]}, cls=NumpyEncoder))
def main():
    
    model = OpenNsfwModel()

    with tf.Session() as sess:

        # input_type = InputType[args.input_type.upper()]
        input_type = InputType.TENSOR   ############### 
        model.build(weights_path=model_weights, input_type=input_type)

        fn_load_image = None

        if input_type == InputType.TENSOR:
            if image_loader == IMAGE_LOADER_TENSORFLOW:
                fn_load_image = create_tensorflow_image_loader(sess)
            else:
                fn_load_image = create_yahoo_image_loader()
        elif input_type == InputType.BASE64_JPEG:
            import base64
            fn_load_image = lambda filename: np.array([base64.urlsafe_b64encode(open(filename, "rb").read())])

        sess.run(tf.global_variables_initializer())
        
        ##########
        result_list = []
        from os_utils import get_file_list
        file_list = get_file_list("dataset") 

        remark = []

        for file in file_list:
            input_image_file = os.path.join(".",file)  

            if input_image_file[-4::] == ".png":
                from skimage import io
                from skimage.color import rgba2rgb
                im = io.imread(file)
                im_rgb = rgba2rgb(im)
                io.imsave("current_temp.jpeg", im_rgb)
                input_file = "current_temp.jpeg"
            else: 
                input_file = file

            image = fn_load_image(input_file)

            predictions = \
                sess.run(model.predictions,
                        feed_dict={model.input: image})
            # print(predictions[0])
            result_list.append(predictions[0][1])
            print("Results for '{}'".format(input_file))
            print("\tSFW score:\t{}\n\tNSFW score:\t{}".format(*predictions[0]))
            if predictions[0][1] >= 0.03: 
                print("\t低俗图片")
                remark.append("低俗")
            else: 
                print("\t不是低俗图片")
                remark.append("正常")

            print(predictions[0][1])
            
            os.system("rm -rf current_temp.jpeg")
        
        result_dict = {"image_list":file_list, "result_list":result_list, "remark":remark}

    return result_dict
Пример #20
0
def main(argv):
    parser = argparse.ArgumentParser()

    parser.add_argument("-s", "--source", required=True,
                        help="Folder containing the images to classify")

    parser.add_argument("-o", "--output_file", required=True,
                        help="Output file path")

    parser.add_argument("-m", "--model_weights", required=True,
                        help="Path to trained model weights file")

    parser.add_argument("-b", "--batch_size", help="Number of images to \
                        classify simultaneously.", type=int, default=64)

    parser.add_argument("-l", "--image_loader",
                        default=IMAGE_LOADER_YAHOO,
                        help="image loading mechanism",
                        choices=[IMAGE_LOADER_YAHOO, IMAGE_LOADER_TENSORFLOW])

    args = parser.parse_args()
    batch_size = args.batch_size
    output_file = args.output_file

    input_type = InputType.TENSOR
    model = OpenNsfwModel()

    filenames = glob.glob(args.source + "/*.jpg")
    num_files = len(filenames)

    num_batches = int(num_files / batch_size)

    print("Found", num_files, " files")
    print("Split into", num_batches, " batches")

    config = tf.ConfigProto()
    config.gpu_options.allow_growth = True

    batch_iterator = None

    if args.image_loader == IMAGE_LOADER_TENSORFLOW:
        batch_iterator = create_tf_batch_iterator(filenames, batch_size)
    else:
        fn_load_image = create_yahoo_image_loader(expand_dims=False)
        batch_iterator = create_batch_iterator(filenames, batch_size,
                                               fn_load_image)

    with tf.Session(graph=tf.Graph(), config=config) as session:
        model.build(weights_path=args.model_weights,
                    input_type=input_type)

        session.run(tf.global_variables_initializer())

        with tqdm(total=num_files) as progress_bar:
            with open(output_file, 'w') as o:
                o.write('File\tSFW Score\tNSFW Score\n')

                for batch_num, images in enumerate(batch_iterator):
                    predictions = \
                        session.run(model.predictions,
                                    feed_dict={model.input: images})

                    fi = (batch_num * batch_size)
                    for i, prediction in enumerate(predictions):
                        filename = os.path.basename(filenames[fi + i])
                        o.write('{}\t{}\t{}\n'.format(filename,
                                                      prediction[0],
                                                      prediction[1]))

                    progress_bar.update(len(images))
Пример #21
0
def main(argv):
    parser = argparse.ArgumentParser()

    parser.add_argument(
        '-i',
        "--input_dir",
        default=config.test_data_set[data_source],
        help="Path to the input image. Only jpeg images are supported.")

    parser.add_argument("-m",
                        "--model_weights",
                        default=config.nsfw_model_weight_file,
                        help="Path to trained model weights file")

    parser.add_argument("-l",
                        "--image_loader",
                        default=IMAGE_LOADER_YAHOO,
                        help="image loading mechanism",
                        choices=[IMAGE_LOADER_YAHOO, IMAGE_LOADER_TENSORFLOW])

    parser.add_argument("-t",
                        "--input_type",
                        default=InputType.TENSOR.name.lower(),
                        help="input type",
                        choices=[
                            InputType.TENSOR.name.lower(),
                            InputType.BASE64_JPEG.name.lower()
                        ])

    args = parser.parse_args()

    with utils.timer('Load image files'):
        image_files, labels = data_utils.load_files(args.input_dir,
                                                    data_source, sample_rate)
        print('image files %s' % len(image_files))

    model = OpenNsfwModel()

    predictions = []

    with tf.Session() as sess:

        input_type = InputType[args.input_type.upper()]

        with utils.timer('Load model weight'):
            model.build(weights_path=args.model_weights, input_type=input_type)

        fn_load_image = None

        if input_type == InputType.TENSOR:
            if args.image_loader == IMAGE_LOADER_TENSORFLOW:
                fn_load_image = create_tensorflow_image_loader(sess)
            else:
                fn_load_image = create_yahoo_image_loader()
        elif input_type == InputType.BASE64_JPEG:
            import base64
            fn_load_image = lambda filename: np.array(
                [base64.urlsafe_b64encode(open(filename, "rb").read())])

        sess.run(tf.global_variables_initializer())

        with utils.timer('Prediction'):
            start = 0
            end = start + config.batch_size
            while (start < len(image_files)):
                if (end > len(image_files)):
                    end = len(image_files)
                with utils.timer('Batch[%s] prediction' % config.batch_size):
                    batch_images = [
                        fn_load_image(image_files[i])
                        for i in range(start, end)
                    ]
                    predictions.extend(
                        sess.run(model.predictions,
                                 feed_dict={model.input: batch_images})[:, 1])
                print('Prediction %s done.' % end)
                start = end
                end = start + config.batch_size

    # save
    PredictOutputFile = '%s/%s.csv' % (config.TestOutputDir, data_source)
    with utils.timer('Save predictions'):
        data_utils.save_predictions(image_files, labels, predictions,
                                    PredictOutputFile)

    # visualization on threshold for f1/precision/recall
    if (data_source == 'hisotry'):
        output_image_file = '%s/%s_vs_threshold.jpg' % (config.TestOutputDir,
                                                        level)
        with utils.timer('Save visualization for threshold'):
            plot_utils.threshold_vs_toxic(labels, predictions, level,
                                          output_image_file)
Пример #22
0
def nsfw_main(setting_file="tmp.csv"):

    data = pd.read_csv(setting_file, names=['location', 'nsfw'])
    tarray = []

    # os.environ["CUDA_VISIBLE_DEVICES"] = "-1"

    IMAGE_LOADER_TENSORFLOW = "tensorflow"

    class args:
        pass

    args.input_file = "girl.jpg"
    args.model_weights = "data/open_nsfw-weights.npy"
    args.image_loader = IMAGE_LOADER_TENSORFLOW
    args.input_type = InputType.TENSOR.name.lower()
    model = OpenNsfwModel()
    # This is important for reset graph
    tf.reset_default_graph()

    config = tf.ConfigProto(gpu_options=tf.GPUOptions(
        visible_device_list="0",  # specify GPU number
        allow_growth=True))
    with tf.Session(config=config) as sess:

        input_type = InputType[args.input_type.upper()]
        model.build(weights_path=args.model_weights, input_type=input_type)

        fn_load_image = None

        if input_type == InputType.TENSOR:
            if args.image_loader == IMAGE_LOADER_TENSORFLOW:
                fn_load_image = create_tensorflow_image_loader(sess)
            else:
                fn_load_image = create_yahoo_image_loader()
        elif input_type == InputType.BASE64_JPEG:
            import base64
            fn_load_image = lambda filename: np.array(
                [base64.urlsafe_b64encode(open(filename, "rb").read())])
        sess.run(tf.global_variables_initializer())

        count = 1
        for index, row in data.iterrows():
            # if count > 100:
            #    break
            start = time.time()
            image = fn_load_image(row[0])
            predictions = \
                sess.run(model.predictions,
                         feed_dict={model.input: image})
            print("Results for '{}'".format(row[0]))
            print(
                "\tSFW score:\t{}\n\tNSFW score:\t{}".format(*predictions[0]))
            tarray.append(predictions[0][1])
            print("elapsed_time:{0}".format(time.time() - start) + "[sec]")
            count += 1

    data['nsfw'] = tarray
    data.to_csv(setting_file, index=False, header=False)
    print(data)
    return 0
Пример #23
0
    tf_sess = tf.Session(config=tf_config)
    tf.import_graph_def(frozen_graph, name='')

    # input and output tensor names.
    input_tensor_name = input_node_name + ":0"
    output_tensor_name = output_node_name + ":0"

    print("input_tensor_name: {}\noutput_tensor_name: {}\n".format(
          input_tensor_name, output_tensor_name))

    print("get_tensor_by_name\n")
    output_tensor = tf.get_default_graph().get_tensor_by_name(output_tensor_name)

    print("fn_load_image\n");

    fn_load_image = create_yahoo_image_loader()

    filenames = glob.glob(args.images + "/*.jpg")
    num_files = len(filenames)

    begin = datetime.datetime.now()

    with tqdm(total=num_files) as progress_bar:
        for img_file in filenames:
            img = fn_load_image(img_file)
            # print("load '{}' file\n".format(img_file))

            feed_dict = {
                input_tensor_name: img
            }
            preds = tf_sess.run(output_tensor, feed_dict)
Пример #24
0
def main(argv):
    # parse inputs
    parser = argparse.ArgumentParser()
    parser.add_argument("input_file", help="Path to the input video.")
    parser.add_argument("id_folder",
                        type=str,
                        nargs="+",
                        help="Folder containing ID folders")
    args = parser.parse_args()

    # initialize NSFW Model
    model = OpenNsfwModel()

    with tf.Graph().as_default():
        with tf.Session() as sess:

            # set variable defaults
            videoFile = args.input_file
            cap = cv2.VideoCapture(videoFile)
            frameRate = cap.get(5)  # get the frame rate
            totalFrameCount = cap.get(7)  # get the total number of frames
            img_size = 64
            margin = 0.4
            frameNsfw = 0
            isMinor = False
            minorDetected = False

            # set weights and initialize SFW model IsSFW
            with tf.variable_scope('IsSFW'):
                model.build(
                    weights_path="pretrained_models/open_nsfw-weights.npy")
                fn_load_image = None
                fn_load_image = create_yahoo_image_loader()
                sess.run(tf.global_variables_initializer())

            # initialize dlib face detector model and set variables
            detector = dlib.get_frontal_face_detector()
            model2 = WideResNet(img_size, 16, 8)()
            model2.load_weights("pretrained_models/weights.29-3.76_utk.hdf5")

            # initialize face identification model
            mtcnn = detect_and_align.create_mtcnn(sess, None)
            load_model("model/20170512-110547.pb")
            threshold = 1.0
            images_placeholder = tf.get_default_graph().get_tensor_by_name(
                "input:0")
            embeddings = tf.get_default_graph().get_tensor_by_name(
                "embeddings:0")
            phase_train_placeholder = tf.get_default_graph(
            ).get_tensor_by_name("phase_train:0")

            # Load anchor IDs for face identification model
            id_data = IdData(args.id_folder[0], mtcnn, sess, embeddings,
                             images_placeholder, phase_train_placeholder,
                             threshold)

            while (cap.isOpened()):
                ret, frame = cap.read()
                frameId = cap.get(1)  # get the current frame number
                if (ret !=
                        True):  # if there is no video frame detected then exit
                    break

                # write video frame to disk and load as an image
                cv2.imwrite('./temp_files/temp.jpg', frame)
                image = fn_load_image('./temp_files/temp.jpg')

                # determine SFW status
                predictions = sess.run(model.predictions,
                                       feed_dict={model.input: image})
                if (predictions[0][1] >= 0.50):
                    frameNsfw = frameNsfw + 1
                    display_lbl = "NSFW"
                    AlertColor = [0, 0, 255]
                else:
                    display_lbl = "SFW"
                    AlertColor = [255, 0, 0]

                # detect faces in dlib face detection model
                image2 = frame
                image2_h, image2_w, _ = np.shape(image2)
                detected = detector(image2, 0)
                faces = np.empty((len(detected), img_size, img_size, 3))
                if len(detected
                       ) > 0:  # one or more faces were found in the frame
                    for i, d in enumerate(detected):
                        # extract the coordinates of the face
                        x1, y1, x2, y2, w, h = d.left(), d.top(), d.right(
                        ) + 1, d.bottom() + 1, d.width(), d.height()
                        xw1 = max(int(x1 - margin * w), 0)
                        yw1 = max(int(y1 - margin * h), 0)
                        xw2 = min(int(x2 + margin * w), image2_w - 1)
                        yw2 = min(int(y2 + margin * h), image2_h - 1)
                        # draw a rectangle around the face
                        cv2.rectangle(image2, (x1, y1), (x2, y2), (255, 0, 0),
                                      2)
                        faces[i, :, :, :] = cv2.resize(
                            image2[yw1:yw2 + 1, xw1:xw2 + 1, :],
                            (img_size, img_size))
                        # determine the height of the rectangle in case is near top of frame
                        rectangle_height = y2 - y1

                    # predict ages and genders of faces using dlib model
                    results = model2.predict(faces)
                    predicted_genders = results[0]
                    ages = np.arange(0, 101).reshape(101, 1)
                    predicted_ages = results[1].dot(ages).flatten()

                    # draw predictions by faces using dlib model
                    for i, d in enumerate(detected):
                        isMinor = False
                        if (int(predicted_ages[i] < 18)
                            ):  # detect if a minor is present in the video
                            isMinor = True
                            minorDetected = True
                        label = "{},{},{}".format(
                            int(predicted_ages[i]),
                            "M" if predicted_genders[i][0] < 0.5 else "F",
                            "-MINOR" if isMinor else "")
                        draw_label(image2, (d.left(), d.top()), label,
                                   rectangle_height)

                # Locate faces and landmarks in frame for identification
                face_patches, padded_bounding_boxes, landmarks = detect_and_align.detect_faces(
                    frame, mtcnn)
                if len(face_patches) > 0:
                    face_patches = np.stack(face_patches)
                    feed_dict = {
                        images_placeholder: face_patches,
                        phase_train_placeholder: False
                    }
                    embs = sess.run(embeddings, feed_dict=feed_dict)
                    matching_ids, matching_distances = id_data.find_matching_ids(
                        embs)
                    for bb, landmark, matching_id, dist in zip(
                            padded_bounding_boxes, landmarks, matching_ids,
                            matching_distances):
                        font = cv2.FONT_HERSHEY_COMPLEX_SMALL
                        cv2.putText(frame, matching_id,
                                    (bb[0] + 30, bb[3] + 5), font, 1,
                                    (255, 0, 255), 1, cv2.LINE_AA)

                # display whether frame is SFW or not
                percentageComplete = round((frameId) / (totalFrameCount) * 100)
                display_lbl = display_lbl + " " + str(
                    percentageComplete) + "% fps= " + str(round(frameRate, 2))
                size = cv2.getTextSize(display_lbl, cv2.FONT_HERSHEY_SIMPLEX,
                                       0.4, 1)[0]
                cv2.rectangle(image2, (1, 15 - size[1]), (1 + size[0], 20),
                              AlertColor, cv2.FILLED)
                cv2.putText(image2,
                            display_lbl, (1, 19),
                            cv2.FONT_HERSHEY_SIMPLEX,
                            0.4, (255, 255, 255),
                            1,
                            lineType=cv2.LINE_AA)

                # display the frame as processed as quickly as possible
                cv2.imshow('frame2', image2)
                cv2.waitKey(1)

            # end of video
            cap.release()
            cv2.destroyAllWindows()
            if os.path.isfile('temp_files/temp.jpg'):
                os.remove("temp_files/temp.jpg")

        # print summary
        if totalFrameCount > 0:
            if (frameNsfw > 0):
                if (minorDetected):
                    print("This video contains minors, and " +
                          str(round((frameNsfw / totalFrameCount * 100), 1)) +
                          "% of the video contains NSFW elements.")
                else:
                    print(
                        str(round((frameNsfw / totalFrameCount * 100), 1)) +
                        "% of the video contains NSFW elements.")
            else:
                print("Video is SFW.")
        else:
            print(
                "No video frames were detected!  Please check the file type or file name."
            )
Пример #25
0
def main(argv):
    parser = argparse.ArgumentParser()

    parser.add_argument("input_file", help="Path to the input image.\
                        Only jpeg images are supported.")
    parser.add_argument("-m", "--model_weights", required=True,
                        help="Path to trained model weights file")

    parser.add_argument("-l", "--image_loader",
                        default=IMAGE_LOADER_YAHOO,
                        help="image loading mechanism",
                        choices=[IMAGE_LOADER_YAHOO, IMAGE_LOADER_TENSORFLOW])

    parser.add_argument("-t", "--input_type",
                        default=InputType.TENSOR.name.lower(),
                        help="input type",
                        choices=[InputType.TENSOR.name.lower(),
                                 InputType.BASE64_JPEG.name.lower()])

    args = parser.parse_args()

    model = OpenNsfwModel()

    with tf.Session() as sess:

        input_type = InputType[args.input_type.upper()]
        model.build(weights_path=args.model_weights, input_type=input_type)

        fn_load_image = None

        if input_type == InputType.TENSOR:
            if args.image_loader == IMAGE_LOADER_TENSORFLOW:
                fn_load_image = create_tensorflow_image_loader(sess)
            else:
                fn_load_image = create_yahoo_image_loader()
        elif input_type == InputType.BASE64_JPEG:
            import base64
            fn_load_image = lambda filename: np.array([base64.urlsafe_b64encode(open(filename, "rb").read())])

        sess.run(tf.global_variables_initializer())

        image = fn_load_image(args.input_file)

        predictions = \
            sess.run(model.predictions,
                     feed_dict={model.input: image})

        print("Results for '{}'".format(args.input_file))
        print(predictions[0][0])
        print(predictions[0][1])
        print("\tSFW score:\t{}\n\tNSFW score:\t{}".format(*predictions[0]))
        # SFW : Safe For Work , NSFW : Not Safe For Work 
        nude_json = {'SFW' : predictions[0][0] , 'NSFW' : predictions[0][1] }
        result= json.dumps(nude_json, cls=MyEncoder)
        loaded_json = json.loads(result)
        #for x in loaded_json:
        #  print("%s: %f" % (x, loaded_json[x]))
        print(loaded_json)
        f = open('data.txt', 'r+')
        f.truncate()
        with open('data.txt', 'w') as outfile:      
          json.dump(loaded_json, outfile) 

        return loaded_json  
Пример #26
0
def main(argv):
    parser = argparse.ArgumentParser()

    args = parser.parse_args()

    # args.input_file = "yyy-1.jpg"
    # args.input_file = "no-sexy.jpg"
    # args.input_file = "zzpic19597.jpg"
    args.input_file = "sexy.jpg"  # 输入图像

    print('[Info] 测试图像: {}'.format(args.input_file))
    args.image_loader = IMAGE_LOADER_YAHOO
    args.input_type = InputType.TENSOR.name.lower()
    args.model_weights = "data/open_nsfw-weights.npy"

    model = OpenNsfwModel()

    fn_load_image = None

    input_type = InputType[args.input_type.upper()]
    if input_type == InputType.TENSOR:
        if args.image_loader == IMAGE_LOADER_TENSORFLOW:
            fn_load_image = create_tensorflow_image_loader(tf.Session(graph=tf.Graph()))
        else:
            fn_load_image = create_yahoo_image_loader()
    elif input_type == InputType.BASE64_JPEG:
        fn_load_image = lambda filename: np.array([base64.urlsafe_b64encode(open(filename, "rb").read())])

    with tf.Session() as sess:
        model.build(weights_path=args.model_weights, input_type=input_type)
        sess.run(tf.global_variables_initializer())

        print('\n[Info] 原始版本')
        image = fn_load_image(args.input_file)  # 源图像处理格式
        model_predict(sess, model, image, args.input_file)  # 第2个版本

        print('\n[Info] 重写OpenCV版本')
        img_np = cv2.imread(args.input_file)
        img_np = cv2.cvtColor(img_np, cv2.COLOR_BGR2RGB)
        image_v2 = process_img_opencv(img_np)
        model_predict(sess, model, image_v2, args.input_file)  # 第2个版本

        # 存储模型的逻辑
        # print('\n[Info] 存储模型')
        # save_model(sess, model)

    print('\n[Info] base64模型版本')
    img_np = cv2.imread(args.input_file)
    img_np = cv2.cvtColor(img_np, cv2.COLOR_BGR2RGB)
    img_np = process_img_opencv(img_np)
    print('[Info] Img: {}'.format(img_np.shape))
    img_b64 = base64.urlsafe_b64encode(img_np)  # 转换base64
    img_tf = image_decode(img_b64)
    print('[Info] tf shape: {}'.format(img_tf.shape))
    img_np = tf.Session().run(img_tf)
    print('[Info] tf->np shape: {}'.format(img_np.shape))

    export_path = "data/model-tf"  # 模型文件

    with tf.Session(graph=tf.Graph()) as sess:
        tf.saved_model.loader.load(sess, ["serve"], export_path)
        graph = tf.get_default_graph()
        print(graph.get_operations())
        res = sess.run('predictions:0',
                       feed_dict={'input:0': img_np})
        print('[Info] 最终结果: {}'.format(res))

    print('[Info] 性感值: {}'.format(res[0][1] * 100.0))
Пример #27
0
def main(argv):
    parser = argparse.ArgumentParser()

    parser.add_argument(
        "path",
        help="Path to the input images. Only jpeg images are supported.")

    parser.add_argument("-b",
                        "--bucket",
                        required=True,
                        help="AWS S3 bucket name")

    parser.add_argument("-m",
                        "--model_weights",
                        required=True,
                        help="Path to trained model weights file")

    parser.add_argument("-o",
                        "--output",
                        required=True,
                        help="Path to output result file")

    parser.add_argument("-l",
                        "--image_loader",
                        default=IMAGE_LOADER_YAHOO,
                        help="image loading mechanism",
                        choices=[IMAGE_LOADER_YAHOO, IMAGE_LOADER_TENSORFLOW])

    parser.add_argument("-t",
                        "--input_type",
                        default=InputType.TENSOR.name.lower(),
                        help="input type",
                        choices=[
                            InputType.TENSOR.name.lower(),
                            InputType.BASE64_JPEG.name.lower()
                        ])

    args = parser.parse_args()

    s3 = boto3.client('s3')
    bucket_name = ''
    for bucket in s3.list_buckets().get('Buckets'):
        if bucket.get('Name') == args.bucket:
            bucket_name = bucket.get('Name')

    if not bucket_name:
        print("Bucket {} not available".format(args.bucket))
        exit(-1)

    images = []
    next_token = ''
    while True:
        if next_token:
            response = s3.list_objects_v2(Bucket=bucket_name,
                                          Delimiter='|',
                                          EncodingType='url',
                                          MaxKeys=1000,
                                          Prefix=args.path,
                                          ContinuationToken=next_token,
                                          FetchOwner=False)
        else:
            response = s3.list_objects_v2(Bucket=bucket_name,
                                          Delimiter='|',
                                          EncodingType='url',
                                          MaxKeys=1000,
                                          Prefix=args.path,
                                          FetchOwner=False)
        content = response.get('Contents')
        next_token = response.get('NextContinuationToken')
        for item in content:
            images.append(item.get('Key'))
        if not next_token:
            break
        print(next_token)
        # if len(images) > 100:
        #   break

    model = OpenNsfwModel()

    with tf.Session() as sess:

        input_type = InputType[args.input_type.upper()]
        model.build(weights_path=args.model_weights, input_type=input_type)

        fn_load_image = None

        if input_type == InputType.TENSOR:
            if args.image_loader == IMAGE_LOADER_TENSORFLOW:
                fn_load_image = create_tensorflow_image_loader(sess)
            else:
                fn_load_image = create_yahoo_image_loader()
        elif input_type == InputType.BASE64_JPEG:
            import base64
            fn_load_image = lambda filename: np.array(
                [base64.urlsafe_b64encode(open(filename, "rb").read())])

        sess.run(tf.global_variables_initializer())

        output = open(args.output, "a")

        for item in images:
            temp_file = tempfile.mkstemp()
            s3.download_file(bucket_name, item, temp_file[1])
            try:
                image = fn_load_image(temp_file[1])
            except IOError:
                print("Read Image Error")
                pass
            predictions = sess.run(model.predictions,
                                   feed_dict={model.input: image})
            output.write(
                "https://www.themebeta.com/media/cache/400x225/files/{}, {}\r\n"
                .format(item, predictions[0][0]))
            os.remove(temp_file[1])
            print(
                "Results for https://www.themebeta.com/media/cache/400x225/files/{} : {}"
                .format(item, predictions[0][0]))

        output.close()
        choices=['ml-engine', 'tf-serving'],
        help="Create json request for ml-engine or tensorflow-serving")

    args = parser.parse_args()
    target = args.target

    input_type = InputType[args.input_type.upper()]

    image_data = None

    if input_type == InputType.TENSOR:
        fn_load_image = None

        if args.image_loader == IMAGE_LOADER_TENSORFLOW:
            with tf.Session() as sess:
                fn_load_image = create_tensorflow_image_loader(sess)
                sess.run(tf.global_variables_initializer())
                image_data = fn_load_image(args.input_file)[0]
        else:
            image_data = create_yahoo_image_loader(
                tf.Session(graph=tf.Graph()))(args.input_file)[0]
    elif input_type == InputType.BASE64_JPEG:
        import base64
        image_data = base64.urlsafe_b64encode(
            open(args.input_file, "rb").read()).decode("ascii")

    if target == "ml-engine":
        print(json.dumps({PREDICT_INPUTS: image_data}, cls=NumpyEncoder))
    elif target == "tf-serving":
        print(json.dumps({"instances": [image_data]}, cls=NumpyEncoder))