예제 #1
0
def cal_fid():
    """
    与真实图片对比,每张图片通过Illustation2vec feature extractor得到一个1*4096向量
    然后计算FID
    :return:fid
    """

    illust2vec = i2v.make_i2v_with_chainer("illust2vec_ver200.caffemodel")
    # 真实图片的特征向量
    root = "resize/true/"
    paths = os.listdir(root)
    true_list = []
    for path in paths:
        if "jpg" in path:
            true_list.append(Image.open(root + path))
    res_true = []
    for i in range(opt.batch_size):
        result_real = illust2vec.extract_feature([true_list[i]])
        res_true.append(result_real)
        # print(str(i))
    true_vec = np.concatenate(tuple(res_true), axis=0)
    mu1 = true_vec.mean(axis=0)
    sigma1 = np.cov(true_vec, rowvar=False)
    print("true_vec_done")

    fid1 = temp_cal("resize/fake/", 256, mu1, sigma1)
    print("fid1:" + str(fid1))
    fid2 = temp_cal("resize/fake2/40/", 64, mu1, sigma1)
    print("fid2:" + str(fid2))
    fid3 = temp_cal("resize/fake3/40/", 64, mu1, sigma1)
    print("fid2:" + str(fid3))
예제 #2
0
def main(args):
    #引数の読み込み
    print(args)
    src_dir = args.input_dir
    out_dir = args.out_dir
    threshold = float(args.threshold)
    vectorize = args.vectorize
    title = os.path.split(src_dir)[0].split('/')[len(os.path.split(src_dir)[0].split('/')) - 2]
    if vectorize == "False":
        out_file = out_dir + '/' +  title + '_tags' + str(threshold) + '_output.csv'
        f = open(out_file, 'w')
        f.close()
        #read model
        print("start reading tag model...")
        illust2vec = i2v.make_i2v_with_chainer(
            "illust2vec_tag_ver200.caffemodel", "tag_list.json")
        print("finished reading model")
    if vectorize == "True":
        out_file = out_dir + '/' +  title + '_vecs' + '_output.csv'
        f = open(out_file, 'w')
        f.close()
        #read model
        print("start reading vec model...")
        #don't have to use tag model and don't have to include tag_list.json
        illust2vec = i2v.make_i2v_with_chainer(
            "illust2vec_ver200.caffemodel")
        print("finished reading model")
    # In the case of caffe, please use i2v.make_i2v_with_caffe instead:
    # illust2vec = i2v.make_i2v_with_caffe(
    #     "illust2vec_tag.prototxt", "illust2vec_tag_ver200.caffemodel",
    #     "tag_list.json")
    files = []
    jpgs = []
    for x in os.listdir(src_dir):
        if os.path.isfile(src_dir + x):
            files.append(x) 
    for y in files:
        if(y[-4:] == '.jpg'):     #ファイル名の後ろ4文字を取り出してそれが.jpgなら
            jpgs.append(y)  #リストに追加
    for file_name in jpgs:
        file_path = src_dir + '/' + file_name
        vectorize_f(illust2vec, file_path, out_file, threshold, vectorize)
예제 #3
0
def main():

    # Connect to S3
    bucket_name = 'pixiv-image-backet'
    client = boto3.client('s3')
    s3 = boto3.resource('s3')
    sbucket = Session().resource('s3').Bucket(bucket_name)
    keys = [
        obj.key
        for obj in sbucket.objects.filter(Prefix='origin_size_face_images')
    ]

    client.download_file(bucket_name, 'image_tag.csv', './image_tag.csv')
    image_tag_df = pd.read_csv('image_tag.csv').drop('Unnamed: 0', axis=1)

    # Read hair color list
    with open('tag.json', 'r') as f:
        tag_list = json.load(f)
    hair_color_list = tag_list['hair_color']

    # Except completed images
    completed_image_list = [
        'origin_size_face_images/' + img_name
        for img_name in list(image_tag_df['image name'])
    ]
    keys = list(set(keys) - set(completed_image_list))

    # Load illustration2vec
    illust2vec = i2v.make_i2v_with_chainer(
        './i2v/illust2vec_tag_ver200.caffemodel', './i2v/tag_list.json')

    # Tag extraction
    for key in keys:
        print(key)
        image_name = key.lstrip('origin_size_face_images/')
        client.download_file(bucket_name, key, './tmp_image.png')
        image = Image.open('./tmp_image.png')
        result = illust2vec.estimate_plausible_tags([image], threshold=0.2)

        image_tag_df = image_tag_df.append(pd.Series(
            [image_name, None], index=image_tag_df.columns),
                                           ignore_index=True)
        for tag, _ in result[0]['general']:
            if tag in hair_color_list:
                image_tag_df.loc[image_tag_df['image name'] == image_name,
                                 'hair color'] = tag
                break

    # Uploade to S3
    image_tag_df.to_csv('image_tag.csv')
    sbucket.upload_file('./image_tag.csv', 'image_tag.csv')
예제 #4
0
def data_import(width, height):
    image = np.empty((0, height, width, DIM), dtype=np.uint8)
    list = glob.glob(TRAIN_IMAGE_PATH)
    for i in list:
        im_reading = Image.open(i).resize((width, height))
        im_reading = im_reading.convert("RGB")
        im_reading = np.array(im_reading)
        print(im_reading.shape)
        #im_reading = im_reading.transpose(1,0,2)
        print(i)
        image = np.append(image, [im_reading], axis=0)

    try:
        with open('tags.pickle', 'rb') as f:
            estimated_tags = pickle.load(f)
            print("load from pickle")

    except:

        try:
            with open('illust2vec.pickle', 'rb') as f:
                illust2vec = pickle.load(f)
                print("pickle i2v")
        except:
            illust2vec = i2v.make_i2v_with_chainer(
                "./i2v/illust2vec_tag_ver200.caffemodel",
                "./i2v/tag_list.json")
            with open('illust2vec.pickle', 'wb') as f:
                pickle.dump(illust2vec, f)
            print("new i2v")

        batch_size = 5
        estimated_tags = np.zeros((0, NUMBER_OF_TAG))
        for i in range(math.floor(len(image) / batch_size + 1)):
            print(str(i) + "/" + str(math.floor(len(image) / batch_size + 1)))
            if len(image) < batch_size * (i + 1):
                batch = np.array(image[batch_size * i:])
            else:
                batch = np.array(image[batch_size * i:batch_size * (i + 1)])
            print(estimated_tags.shape)

            estimated_tags = np.append(estimated_tags,
                                       illust2vec.extract_feature(batch),
                                       axis=0)

        with open('tags.pickle', 'wb') as f:
            pickle.dump(estimated_tags, f)

    return image, estimated_tags
예제 #5
0
def get_main_tag(img: Image) -> np.array:
    """

  :param img: PIL.Image
  :return: tag list
  """
    illust2vec_tag = i2v.make_i2v_with_chainer(config.tag_model_path,
                                               config.tag_list_json)

    result = illust2vec_tag.estimate_plausible_tags(
        [img], threshold=config.tag_threshold)[-1]['general']

    feat = []
    for each in result:
        if each[0] in utils.tag_map.keys():
            feat.append(each[0])
    return feat
def get_model():
    illust2vec = []

    p = os.path.abspath(os.path.join("../", os.pardir))

    with open(p + "/config.json") as json_data:
        config = json.load(json_data)

    try:
        illust2vec_f = open(config['directories']['ill2vec_ser'], "rb")
        illust2vec = pickle.load(illust2vec_f)
        illust2vec_f.close()
    except IOError:
        illust2vec = i2v.make_i2v_with_chainer(
            config['directories']['ill2vec_model'],
            config['directories']['ill2vec_tag_list'])
        save_model = open(config['directories']['ill2vec_ser'], "wb")
        pickle.dump(illust2vec, save_model)
        save_model.close()
    return illust2vec
예제 #7
0
#!/usr/bin/env python2

import time

import i2v
from PIL import Image

HANDLER_NAME = "i2v_offline"
MAXSIZE = (2500, 2500)

print "Loading model..."
start = time.time()

illust2vec = i2v.make_i2v_with_chainer("data/i2v_model.caffemodel", "data/i2v_tag_list.json")

end   = time.time()
print "Done. Took " + str(end - start) + "s"

def run(fname):
    def fst(p):
        return p[0]

    print "Doing inference (i2v - offline)..."
    start = time.time()

    img = Image.open(fname)
    if img.size > MAXSIZE:
        print "Image too large. Resizing."
        img.thumbnail(MAXSIZE, Image.ANTIALIAS)

    res = fst(illust2vec.estimate_plausible_tags([img], threshold=0.5))
예제 #8
0
import sys
sys.path.append("/Users/KOKI/PerfectMakeGirls/python/i2v")
import i2v

TRAIN_IMAGE_PATH = "/Users/KOKI/Documents/TrainData3/*"
GENERATED_IMAGE_PATH = "/Users/KOKI/Documents/Generated/"
BATCH_SIZE = 200
NUM_EPOCH = 10
DIM = 3
NUMBER_OF_TAG = 1539

try:
    with open('illust2vec.pickle', 'r') as f:
        illust2vec = pickle.load(f)
except:
    illust2vec = i2v.make_i2v_with_chainer(
        "./i2v/illust2vec_tag_ver200.caffemodel", "./i2v/tag_list.json")
    with open('illust2vec.pickle', 'wb') as f:
        pickle.dump(illust2vec, f)


def generator_model(width, height):
    model = Sequential()
    model.add(
        Dense(input_dim=100, output_dim=128, bias_initializer='he_uniform'))
    model.add(Activation('tanh'))
    model.add(Dense(width * height))
    model.add(Activation('tanh'))
    model.add(Dense(width * height * 4))
    model.add(Activation('tanh'))
    model.add(Dense(width * 4 * height * 4 * 4))
    model.add(BatchNormalization())
예제 #9
0
def get_model():
    return i2v.make_i2v_with_chainer(
        '/usr/src/data/illustration2vec/illust2vec_tag_ver200.caffemodel',
        '/usr/src/data/illustration2vec/tag_list.json'
    )
예제 #10
0
import pickle
import codecs
import i2v
import os
from PIL import Image


def getFileList(base):
    searchPath = "./" + base + "/*/*.jpg"
    flist = glob.glob(searchPath)
    flist.sort()
    return flist


print("load i2v")
clf = i2v.make_i2v_with_chainer("illust2vec_tag_ver200.caffemodel",
                                "tag_list.json")
print("load finish")


def I2vTagging(fname, output):
    img = Image.open(fname)
    data = clf.estimate_plausible_tags([img], threshold=0.0)
    fout = codecs.open(output + "pkl", "wb")
    pickle.dump(data, fout)
    fout.close()
    fout = codecs.open(output, "w", encoding="utf-8")
    fout.write(str(data))
    fout.close()


#    print(data)
예제 #11
0
 def __init__(self, model, list):
     self.evaluator = i2v.make_i2v_with_chainer(model, list)
예제 #12
0
def data_import(width, height):
    try:
        with open('images.pickle', 'rb') as f:
            image = pickle.load(f)
            print("image load from pickle")
    except:
        image = np.empty((0, height, width, DIM), dtype=np.uint8)
        list = glob.glob(TRAIN_IMAGE_PATH)
        for i in list:
            im_reading = Image.open(i).resize((width, height))
            if im_reading.mode == "RGB":
                im_reading = np.array(im_reading)

            else:
                im_reading = im_reading.convert("RGB")
                im_reading = np.array(im_reading)
                for j in range(len(im_reading)):
                    for k in range(len(im_reading[0])):
                        if np.all(im_reading[j][k] == [71, 112, 76]) or np.all(
                                im_reading[j][k] == [0, 0, 0]) or np.all(
                                    im_reading[j][k] == [76, 105, 113]):
                            im_reading[j][k] = [255, 255, 255]

                            #RGB (71,112,76),(75,105,113)のための例外処理。なんかいい方法があったら言ってくれ
            print(i)

            #im_reading = im_reading.transpose(1,0,2)
            image = np.append(image, [im_reading], axis=0)

            with open('images.pickle', 'wb') as f:
                pickle.dump(image, f)
        print("new image")
    try:
        with open('tags.pickle', 'rb') as f:
            estimated_tags = pickle.load(f)
            print("load from pickle")

    except:

        try:
            with open('illust2vec.pickle', 'rb') as f:
                illust2vec = pickle.load(f)
                print("pickle i2v")
        except:
            illust2vec = i2v.make_i2v_with_chainer(
                "./i2v/illust2vec_tag_ver200.caffemodel",
                "./i2v/tag_list.json")
            with open('illust2vec.pickle', 'wb') as f:
                pickle.dump(illust2vec, f)
            print("new i2v")

        batch_size = 5
        estimated_tags = np.zeros((0, NUMBER_OF_TAG))
        for i in range(math.floor(len(image) / batch_size + 1)):
            print(str(i) + "/" + str(math.floor(len(image) / batch_size + 1)))
            if len(image) < batch_size * (i + 1):
                batch = np.array(image[batch_size * i:])
            else:
                batch = np.array(image[batch_size * i:batch_size * (i + 1)])
            print(estimated_tags.shape)

            estimated_tags = np.append(estimated_tags,
                                       illust2vec.extract_feature(batch),
                                       axis=0)

        with open('tags.pickle', 'wb') as f:
            pickle.dump(estimated_tags, f)

    return image, estimated_tags
예제 #13
0
    except:
        pass


if __name__ == '__main__':

    try:
        # Tries to load the illustration2vec model
        illust2vec_f = open(
            "Illustration2Vector/illustration2vec_master/illust2vec.pickle",
            "rb")
        illust2vec = pickle.load(illust2vec_f)
        illust2vec_f.close()
    except IOError:
        illust2vec = i2v.make_i2v_with_chainer(
            "Illustration2Vector/illustration2vec-master/illust2vec_tag_ver200.caffemodel",
            "Illustration2Vector/illustration2vec-master/tag_list.json")
        save_model = open(
            "Illustration2Vector/illustration2vec_master/illust2vec.pickle",
            "wb")
        pickle.dump(illust2vec, save_model)
        save_model.close()

    icons = []
    screenshots = []
    description = ""
    # Gets all the content from database
    db = sqlite3.connect('./API to download database/app_info_big.db')
    c = db.cursor()

    explicit_content = []
예제 #14
0
 def __init__(self, model, list):
     self.evaluator = i2v.make_i2v_with_chainer(model, list)
예제 #15
0
import i2v
import cv2
import glob
import os
from imageio import imread
from tqdm import tqdm
import matplotlib.pyplot as plt
import numpy as np
import pickle

# 读取图片路径
images = glob.glob('characters/*.jpg')
print(len(images))

# 加载两个模型
illust2vec = i2v.make_i2v_with_chainer('illust2vec_tag_ver200.caffemodel', 'tag_list.json')
cascade = cv2.CascadeClassifier('lbpcascade_animeface.xml')
OUTPUT_DIR = 'faces/'
if not os.path.exists(OUTPUT_DIR):
    os.mkdir(OUTPUT_DIR)

# 提取全部头像,共检测到27772张
num = 0
for x in tqdm(range(len(images))):
    img_path = images[x]
    image = cv2.imread(img_path)
    gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
    gray = cv2.equalizeHist(gray)
    faces = cascade.detectMultiScale(gray, scaleFactor=1.1, minNeighbors=5, minSize=(64, 64))
    for (x, y, w, h) in faces:
        cx = x + w // 2
예제 #16
0
#!/usr/bin/env python2

import time

import i2v
from PIL import Image

HANDLER_NAME = "i2v_offline"
MAXSIZE = (2500, 2500)

print "Loading model..."
start = time.time()

illust2vec = i2v.make_i2v_with_chainer("data/i2v_model.caffemodel",
                                       "data/i2v_tag_list.json")

end = time.time()
print "Done. Took " + str(end - start) + "s"


def run(fname):
    def fst(p):
        return p[0]

    print "Doing inference (i2v - offline)..."
    start = time.time()

    img = Image.open(fname)
    if img.size > MAXSIZE:
        print "Image too large. Resizing."
        img.thumbnail(MAXSIZE, Image.ANTIALIAS)