nbatch = 128 x, y, stream = get_mnist(nbatch) g = InfoGenerator(g_size=(1, 28, 28), g_nb_filters=64, g_nb_noise=100, # not coding but noise g_scales=2, g_FC=[1024], g_info=[ CategoryDist(n=10, lmbd=1e-3), # UniformDist(min=-1.0,max=+1.0,lmbd=1e-3,stddev_fix=True), # UniformDist(min=-1.0,max=+1.0,lmbd=1e-3,stddev_fix=True), # UniformDist(min=-1.0,max=+1.0,lmbd=1e-3,stddev_fix=True), ], g_init=InitNormal(scale=0.02), ) d = Discriminator(d_size=g.g_size, d_nb_filters=64, d_scales=2, d_FC=[1024], d_init=InitNormal(scale=0.02)) Q = Sequential([ Dense(200, batch_input_shape=d.layers[-2].output_shape) , BN(), Activation('relu'), Dense(g.g_info.Qdim), ]) gan = InfoGAN(generator=g, discriminator=d, Qdist=Q) from keras.optimizers import Adam, SGD, RMSprop gan.fit(stream, save_dir='./samples/mnist_info', k=2, nmax=nbatch*100, nbatch=nbatch, # opt=RMSprop(lr=0.0005)) opt=Adam(lr=0.0001))
from GAN.utils.data import transform_skeleton, inverse_transform_skeleton from GAN.utils.init import InitNormal from keras.optimizers import Adam, SGD, RMSprop if __name__ == '__main__': nbatch = 128 nmax = nbatch * 100 npxw, npxh = 64, 128 from load import people, load_all va_data, tr_stream, _ = people(pathfile='protocol/PPPS.txt', size=(npxw, npxh), batch_size=nbatch) g = Generator(g_size=(8, npxh, npxw), g_nb_filters=128, g_nb_coding=500, g_scales=4, g_init=InitNormal(scale=0.002))#, g_FC=[5000]) d = Discriminator(d_size=g.g_size, d_nb_filters=128, d_scales=4, d_init=InitNormal(scale=0.002))#, d_FC=[5000]) gan = GAN(g, d) from keras.optimizers import Adam, SGD, RMSprop gan.fit(tr_stream, save_dir='./samples/parsing_skeleton/', k=1, nbatch=nbatch, nmax=nmax, opt=Adam(lr=0.0002, beta_1=0.5, decay=1e-5), transform=transform_skeleton, #opt=RMSprop(lr=0.01)) inverse_transform=inverse_transform_skeleton)
import numpy as np import pandas as pd from sklearn.neighbors import KNeighborsClassifier from facenet_pytorch import InceptionResnetV1 from GAN.models import Discriminator resnet = InceptionResnetV1(pretrained='cassia_webface') gallery = pd.read_csv('path/to/gallery') disc = Discriminator() knn_clf = KNeighborsClassifier(weights='distance', metric='cosine') knn_clf.fit(gallery.drop('names', axis=1), gallery['names']) def get_prediction(image_bytes): tensor = prepare_image(image_bytes=image_bytes) embedding = resnet(tensor).detach().cpu().numpy() name = knn_clf.predict(embedding) score = disc(tensor).item() return name, score
if __name__ == '__main__': # init with ae and then run gan nbatch = 128 x, y, stream = get_mnist(nbatch) g = Generator(g_size=(1, 28, 28), g_nb_filters=64, g_nb_coding=200, g_scales=2, g_FC=[1024], g_init=InitNormal(scale=0.05)) d = Discriminator(d_size=g.g_size, d_nb_filters=64, d_scales=2, d_FC=[1024], d_init=InitNormal(scale=0.05)) g.load_weights('models/mnist_ae_g.h5') print g.get_weights()[0].sum() d.load_weights('models/mnist_ae_d.h5') print d.get_weights()[0].sum() # gan = GAN(g, d) gan = AEGAN(g, d) from keras.optimizers import Adam, SGD, RMSprop gan.fit(stream, save_dir='./samples/mnist', k=1, nbatch=nbatch,
va_data, tr_stream, _ = people(pathfile='protocol/cuhk01-train.txt', size=(npxw, npxh), batch_size=nbatch) g = InfoGenerator(g_size=(3, npxh, npxw), g_nb_filters=128, g_nb_noise=500, # not coding but noise g_scales=3, g_FC=None, g_info=[ # CategoryDist(n=10, lmbd=1e-3), UniformDist(min=-1, max=+1, lmbd=1e-3), ], g_init=InitNormal(scale=0.002) ) d = Discriminator(d_size=g.g_size, d_nb_filters=128, d_scales=3, d_FC=None, d_init=InitNormal(scale=0.002)) Q = Sequential([ Dense(500, batch_input_shape=d.layers[-2].output_shape) , BN(), Activation('relu'), Dense(g.g_info.Qdim), ]) gan = InfoGAN(generator=g, discriminator=d, Qdist=Q) from keras.optimizers import Adam, SGD, RMSprop gan.fit(tr_stream, save_dir='./samples/cuhk01_info_small/', k=1, nbatch=nbatch,