def test(generated_images_dir, annotations_file_test): print(generated_images_dir, annotations_file_test) print ("Loading images...") input_images, target_images, generated_images, names = load_generated_images(generated_images_dir) print ("Compute inception score...") inception_score = get_inception_score(generated_images) print ("Inception score ", inception_score) print(generated_images_dir) print ("Compute structured similarity score (SSIM)...") structured_score = ssim_score(generated_images, target_images) print ("SSIM score ", structured_score) print(generated_images_dir) print ("Compute l1 score...") norm_score = l1_score(generated_images, target_images) print ("L1 score ", norm_score) print ("Compute masked inception score...") generated_images_masked = create_masked_image(names, generated_images, annotations_file_test) reference_images_masked = create_masked_image(names, target_images, annotations_file_test) inception_score_masked = get_inception_score(generated_images_masked) print ("Inception score masked ", inception_score_masked) print(generated_images_dir) print ("Compute masked SSIM...") structured_score_masked = ssim_score(generated_images_masked, reference_images_masked) print ("SSIM score masked ", structured_score_masked) print ("Compute masked l1 score...") norm_score_masked = l1_score(generated_images_masked, reference_images_masked) print ("L1 score masked ", norm_score_masked) print ("IS", inception_score, " masked IS ", inception_score_masked, " SSIM ", structured_score, " masked SSIM ", structured_score_masked, " l1 ", norm_score, " masked l1 ", norm_score_masked)
def test(generated_images_dir, annotations_file_test): print(generated_images_dir, annotations_file_test) print("Loading images...") input_images, target_images, generated_images, names = load_generated_images( generated_images_dir) print("Compute inception score...") inception_score = get_inception_score(generated_images) print("Inception score %s" % inception_score[0]) print("Compute structured similarity score (SSIM)...") structured_score = ssim_score(generated_images, target_images) print("SSIM score %s" % structured_score) print("Compute l1 score...") norm_score = l1_score(generated_images, target_images) print("L1 score %s" % norm_score) print("Compute masked inception score...") generated_images_masked = create_masked_image(names, generated_images, annotations_file_test) reference_images_masked = create_masked_image(names, target_images, annotations_file_test) inception_score_masked = get_inception_score(generated_images_masked) print("Inception score masked %s" % inception_score_masked[0]) print("Compute masked SSIM...") structured_score_masked = ssim_score(generated_images_masked, reference_images_masked) print("SSIM score masked %s" % structured_score_masked) print( "Inception score = %s, masked = %s; SSIM score = %s, masked = %s; l1 score = %s" % (inception_score, inception_score_masked, structured_score, structured_score_masked, norm_score))
def main(): args = parse_args() all_images = images_to_array(args.images_folder) inception_score_mean, inception_score_std = inception_score.get_inception_score(all_images) print('Inception score mean: {}'.format(inception_score_mean)) print('Inception score std : {}'.format(inception_score_std))
def score(path): print("--------------------------------") print('path:', path) print("--------------------------------") li = os.listdir(path) imgs = [] for filename in li: if '.tif' in filename: img = numpy.array(imread(os.path.join(path, filename))) img = img.reshape(img.shape + (1, )) img = ((img - img.min()) * 255.0 / (img.max() - img.min())) imgs.append(img) else: pass try: imgs = grayscale_to_rgb(imgs) except: print('Not able to convert to rgb') # print(imgs.shape) print('min : ', numpy.min(imgs[0])) print('max : ', numpy.max(imgs[0])) result = get_inception_score(imgs) with open('%s/inception_score.txt' % path, 'w') as f: f.write('inception score...\n') f.write('mean : %s\n' % result[0]) f.write('std : %s\n' % result[1]) f.close() print('scoring finished!') print('mean : %s\n' % result[0]) print('std : %s\n' % result[1])
def score(path): print("--------------------------------") print('path:', path) print("--------------------------------") li = os.listdir(path) imgs = [] for filename in li: if filename[-4:] == '.jpg': img = scipy.misc.imread(os.path.join(path, filename)) imgs.append(img) else: pass print('min : ', np.min(imgs[0])) print('max : ', np.max(imgs[0])) result = get_inception_score(imgs) with open('%s/inception_score.txt' % path, 'w') as f: f.write('inception score...\n') f.write('mean : %s\n' % result[0]) f.write('std : %s\n' % result[1]) f.close() print('scoring finished!') print('mean : %s\n' % result[0]) print('std : %s\n' % result[1])
def test(generated_images_dir): f = open('score.txt', 'a+') # load images print("Loading images...") input_images, target_images, generated_images, names = load_generated_images( generated_images_dir) print("Compute inception score...") inception_score = get_inception_score(generated_images) print("Inception score %s" % inception_score[0]) print("Compute structured similarity score (SSIM)...") structured_score = ssim_score(generated_images, target_images) print("SSIM score %s" % structured_score) print("Compute FID score...") FID_score = fid_score.calculate_fid_given_paths( [generated_images, target_images], 1, dims=2048) print("FID score %s" % FID_score) print("Compute LPIPS score...") LPIPS_score = lpips_score(generated_images, target_images) print("LPIPS score %s" % LPIPS_score) msg = "Inception score = %s; SSIM score = %s; FID score = %s; LPIPS score = %s" % ( inception_score, structured_score, FID_score, LPIPS_score) print(msg) f.writelines('\nTarget image dir %s\n' % generated_images_dir) f.writelines("%s\n\n" % msg) f.close()
def evaluate(self, num_samples): """ Evaluates probability of test set via Gaussian parzen window Currently assumes MNIST """ # test_X, test_y = self.load_mnist_test() # generate 50000 samples samples = [] for k in range(num_samples // 64): sample_z = np.random.uniform(-1, 1, size=(64, self.z_dim)) labels = np.zeros([64, 10]) for i in range(64): labels[i, np.random.randint(0, 10)] = 1 samples.extend( self.sess.run([self.sampler], feed_dict={ self.z: sample_z, self.y: labels })[0]) #images = inverse_transform(np.array(samples)) * 256 images = [ skimage.img_as_ubyte(np.squeeze(gray2rgb(x))) for x in samples ] #scipy.misc.imsave('test/blah.png', images[0]) # print(images[0].shape) # print(images[0]) return get_inception_score(list(images))
def get_inception_score(self, n, resize=0, ret_samples=False): all_samples = [] if self.dcgan.has_labels: train_gen, _ = hdf5_images.load( batch_size=self.dcgan.batch_size, data_file=self.dcgan.data_dir, resolution=self.dcgan.output_height, label_name=self.dcgan.label_file) def inf_train_gen(): while True: for _, labels in train_gen(): yield labels for i in tqdm(xrange(n // self.dcgan.batch_size)): if self.dcgan.has_labels: all_samples.append( self.sample_z( z=self.gen_z(), y=np.eye(self.dcgan.y_dim)[inf_train_gen().next()])) else: all_samples.append(self.sample_z(z=self.gen_z())) all_samples = np.concatenate(all_samples, axis=0) if resize: all_samples = np.array([ scipy.misc.imresize(sample, (resize, resize)) for sample in tqdm(all_samples) ]) all_samples = (all_samples + 1) * 127.5 if ret_samples: return all_samples return inception_score.get_inception_score(list(all_samples))
def compute_inception_score(self, epoch, idx): # Generates images and their inception score try: # Generate images and save them sample_op = self.model.generator(self.model.z, train=False) generated_images = self.sess.run( sample_op, feed_dict={ self.model.z: self.model.sample_z, }, ) save_images( generated_images, image_manifold_size(generated_images.shape[0]), './{}/train_{:02d}_{:04d}.png'.format(self.model.sample_dir, epoch, idx)) # Compute inception score generated_images_list = [(image + 1) * 255 / 2 for image in generated_images] score = get_inception_score(generated_images_list, self.sess, splits=5) return score except Exception as e: print("Sampling error:", e) return np.nan
def get_inception_score(sess): fake_list = [] for i in range(10): noise_z = np.random.normal(size=[100, FLAGS.z_dim]) noise_c = np.random.uniform(0., 1., size=[100, 2]) _fake, = sess.run([fake_images], feed_dict={z: noise_z, c: noise_c}) fake_list.append(_fake) fakes = np.concatenate(fake_list, axis=0) fakes = (fakes + 1.) * (255. / 2.) # change to range [0, 255] return inception_score.get_inception_score(list(fakes))
def test(): args = cmd.args() print("Loading images...") generated_images = load_generated_images(args.generated_images_dir) print("Compute inception score...") inception_score = get_inception_score(generated_images) print("Inception score %s" % inception_score[0]) return inception_score
def get_inception_score(n=N_IS, softmax=softmax): all_samples = [] for i in xrange(n / 100): z_input = np.random.randn(100, input_dim) all_samples.append(session.run(x_generated, feed_dict={z: z_input})) all_samples = np.concatenate(all_samples, axis=0) all_samples = ((all_samples + 1.) * (255. / 2)).astype('int32') all_samples = all_samples.reshape((-1, 32, 32, 3)) return inception_score.get_inception_score(list(all_samples), softmax=softmax)
def batch_inception(numpy_arrays): ''' numpy_arrays should be shape (N, C, W, H) with (min, max) values of (0, 1). ''' data = (numpy_arrays - 0.5) * 2 # range [-1, 1] return inception_score.get_inception_score( data) # returns (mean, standard deviation).
def get_inception_score_origin(generator_out, data_format, session, n): all_samples = [] img_dim = 64 for i in range(n // 100): all_samples.append(session.run(generator_out)) all_samples = np.concatenate(all_samples, axis=0) all_samples = ((all_samples + 1.) * (255. / 2)).astype('int32') all_samples = all_samples.reshape((-1, 3, img_dim, img_dim)) if data_format == 'NCHW': all_samples = all_samples.transpose(0, 2, 3, 1) return inception_score.get_inception_score(list(all_samples), session)
def test(generated_IMG_dir): print(generated_images_dir) print("Loading image Pairs...") generated_images = [] for img_nameG in os.listdir(generated_IMG_dir): imgG = imread(os.path.join(generated_IMG_dir, img_nameG)) generated_images.append(imgG) print("#######IS########") print("Compute inception score...") inception_score = get_inception_score(generated_images) print("Inception score %s" % inception_score[0])
def tf_inception_score(netG, z_dim=128, n_samples=5000): from inception_score import get_inception_score netG.eval() with torch.no_grad(): images = [] for i in tqdm(range(n_samples // 100)): z = torch.randn(100, z_dim).cuda() x = netG(z) images.append(x) images = torch.cat(images, 0).cpu().numpy() netG.train() return get_inception_score(images)
def test(generated_images_dir, annotations_file_test): # load images print ("Loading images...") input_images, target_images, generated_images, names = load_generated_images(generated_images_dir) print ("Compute inception score...") inception_score = get_inception_score(generated_images) print ("Inception score %s" % inception_score[0]) print ("Compute structured similarity score (SSIM)...") structured_score = ssim_score(generated_images, target_images) print ("SSIM score %s" % structured_score) print ("Inception score = %s; SSIM score = %s" % (inception_score, structured_score))
def get_inception_score(self): z_batch = self.sess.run(self.inception_z_batch_op) bs = 5000 n = int(self.inc_batch_size / bs) all_samples = [] for i in range(n): g = self.sess.run(ops.inverse_transform(self.G), feed_dict={self.Z: z_batch[i * bs:(i + 1) * bs]}) all_samples.append(g) all_samples = np.concatenate(all_samples, axis=0) all_samples = all_samples.astype('int32') print('Calculating inception score...') score = inception_score.get_inception_score(self.sess, list(all_samples)) print(score) return score
def get_inception_score(self, iter_time): if np.mod(iter_time, self.flags.inception_freq) == 0: sample_size = 100 all_samples = [] for _ in range(int(self.num_examples_IS/sample_size)): imgs = self.model.sample_imgs(sample_size=sample_size) all_samples.append(imgs[0]) all_samples = np.concatenate(all_samples, axis=0) all_samples = ((all_samples + 1.) * 255. / 2.).astype(np.uint8) mean_IS, std_IS = get_inception_score(list(all_samples), self.flags) # print('Inception score iter: {}, IS: {}'.format(self.iter_time, mean_IS)) plot.plot('inception score', mean_IS) plot.flush(self.log_out_dir) # write logs plot.tick()
def get_inception_score(netG, arc, args, n_iter=500): all_samples = [] for i in range(n_iter): with torch.no_grad(): samples_100 = Variable(torch.randn(100, args.latent_dim, 1, 1)).cuda() if args.trainG: fake_images = netG(samples_100, arc).mul_(0.5).add_(0.5).mul_(255).clamp_( 0, 255).to('cpu', torch.uint8).numpy() else: fake_images = netG(samples_100).mul_(0.5).add_(0.5).mul_( 255).clamp_(0, 255).to('cpu', torch.uint8).numpy() all_samples.append(fake_images) all_samples = np.concatenate(all_samples, axis=0) all_samples = all_samples.reshape((-1, 3, 32, 32)) return inception_score.get_inception_score(list(all_samples), 100)
def get_inception_score(self, z_batch=None): if z_batch is None: z_batch = self.sess.run(self.inception_z_batch_op) all_samples = [] bs = 5000 n = int(len(z_batch) / bs) for i in range(n): g = self.sess.run(ops.inverse_transform(self.G), feed_dict={self.Z: z_batch[i * bs:(i + 1) * bs]}) all_samples.append(g) all_samples = np.concatenate(all_samples, axis=0).astype('int32') print('Calculating inception score...') st = time.time() score = inception_score.get_inception_score(self.sess, list(all_samples)) print(score, ' ...', str(time.time() - st), 's') return score
def test(generated_images_dir, annotations_file_test): # load images print ("Loading images...") input_images, target_images, generated_images, names = load_generated_images(generated_images_dir) print ("Compute structured similarity score (SSIM)...") structured_score = ssim_score(generated_images, target_images) print ("SSIM score ", structured_score) print(generated_images_dir) print ("Compute l1 score...") norm_score = l1_score(generated_images, target_images) print ("L1 score ", norm_score) print ("Compute inception score...") inception_score = get_inception_score(generated_images) print ("Inception score ", inception_score) print ("Inception score ", inception_score, " SSIM score ", structured_score, " L1 score ", norm_score)
def incep_score(self, checkpoint=None, n_samples=1000): checkpoint = self.check_point_file np.random.seed(1) tf.set_random_seed(1) checkpoint = self.check_point_file z = tf.random_normal([n_samples, self.LATENTDIM], 0, 1, dtype=tf.float32) if self.dpp_weight == 0: samples = self._decoder_network(z, reuse=True) else: samples = self._decoder_network(z, reuse=False) session = tf.Session() session.run(tf.global_variables_initializer()) saver = tf.train.Saver(self.parameters) saver.restore(session, checkpoint) generated_images = np.array(session.run(samples)) all_samples = (((generated_images - generated_images.min()) / (generated_images.max() - generated_images.min())) * 255.).astype('int32') incep_score = get_inception_score(list(all_samples)) print('Inception score: {:.5f} +/- {:.5f}'.format(incep_score[0], incep_score[1])) return incep_score[0], incep_score[1]
def main(): parser = argparse.ArgumentParser() parser.add_argument('--dataset', required=True, help='name of dataset') parser.add_argument('--dataroot', required=True, help='path to images') parser.add_argument('--log_name', required=True, help='name of log file') parser.add_argument( '--imageSize', type=int, default=64, help='the height / width of the input image to network') opt = parser.parse_args() print(opt) # get the list of images as inputs of calculating inception score images_list = get_images(opt.dataroot, opt.imageSize) # calculate the inception score mean_score, std_score = inception_score.get_inception_score( images=images_list, log_file=opt.log_name) print("mean score : {}".format(mean_score)) print("std score : {}".format(std_score))
def compute_inception_score(self): """ Inception score """ self.sess.run([self.ds_inception_z_init_op]) if self.verbosity >= 3: print('[!] Computing inception score. ' 'This may take a while...') inception_images = [] while True: try: inception_images_batch = \ self.sess.run([self.inception_images]) inception_images.extend(inception_images_batch) except tf.errors.OutOfRangeError: break inception_images = \ np.concatenate(inception_images, axis=0) return inception_score.get_inception_score(inception_images, gpu_id=self.gpu_id)
def main(): """Entry point to dcgan""" print("|------- new changes!!!!!!!!!") # to get the dataset and net configuration train_data, val_data = get_dataset(dataset) netG = get_netG() netD = get_netD() loss, trainerG, trainerD = get_configurations(netG, netD) # set labels real_label = mx.nd.ones((opt.batch_size, ), ctx=ctx) fake_label = mx.nd.zeros((opt.batch_size, ), ctx=ctx) metric = mx.metric.Accuracy() print('Training... ') stamp = datetime.now().strftime('%Y_%m_%d-%H_%M') iter = 0 # to metric the network loss_d = [] loss_g = [] inception_score = [] for epoch in range(opt.nepoch): tic = time.time() btic = time.time() for data, _ in train_data: ############################ # (1) Update D network: maximize log(D(x)) + log(1 - D(G(z))) ########################### # train with real_t data = data.as_in_context(ctx) noise = mx.nd.random.normal(0, 1, shape=(opt.batch_size, nz, 1, 1), ctx=ctx) with autograd.record(): output = netD(data) # reshape output from (opt.batch_size, 2, 1, 1) to (opt.batch_size, 2) output = output.reshape((opt.batch_size, 2)) errD_real = loss(output, real_label) metric.update([ real_label, ], [ output, ]) with autograd.record(): fake = netG(noise) output = netD(fake.detach()) output = output.reshape((opt.batch_size, 2)) errD_fake = loss(output, fake_label) errD = errD_real + errD_fake errD.backward() metric.update([ fake_label, ], [ output, ]) trainerD.step(opt.batch_size) ############################ # (2) Update G network: maximize log(D(G(z))) ########################### with autograd.record(): output = netD(fake) output = output.reshape((-1, 2)) errG = loss(output, real_label) errG.backward() trainerG.step(opt.batch_size) name, acc = metric.get() logging.info( 'discriminator loss = %f, generator loss = %f, binary training acc = %f at iter %d epoch %d', mx.nd.mean(errD).asscalar(), mx.nd.mean(errG).asscalar(), acc, iter, epoch) if iter % niter == 0: visual('gout', fake.asnumpy(), name=os.path.join(outf, 'fake_img_iter_%d.png' % iter)) visual('data', data.asnumpy(), name=os.path.join(outf, 'real_img_iter_%d.png' % iter)) # record the metric data loss_d.append(errD) loss_g.append(errG) if opt.inception_score: score, _ = get_inception_score(fake) inception_score.append(score) iter = iter + 1 btic = time.time() name, acc = metric.get() metric.reset() logging.info('\nbinary training acc at epoch %d: %s=%f', epoch, name, acc) logging.info('time: %f', time.time() - tic) # save check_point if check_point: netG.save_parameters( os.path.join(outf, 'generator_epoch_%d.params' % epoch)) netD.save_parameters( os.path.join(outf, 'discriminator_epoch_%d.params' % epoch)) # save parameter netG.save_parameters(os.path.join(outf, 'generator.params')) netD.save_parameters(os.path.join(outf, 'discriminator.params')) # visualization the inception_score as a picture if opt.inception_score: ins_save(inception_score)
import inception_score import glob import os import numpy as np from scipy.misc import imread image_path = './test/GibbsNet' image_list = glob.glob(os.path.join(image_path, '*.png')) images = [imread(str(fn)).astype(np.float32) for fn in image_list] print(inception_score.get_inception_score(images, splits=10))
def calc_inception_score(images): if images.shape[1] == 1: images = images.repeat(3, axis=1) images = list(images.transpose(0, 2, 3, 1)) return inception_score.get_inception_score(images)
# This way Inception score is more correct since there are different generated examples from every class of Inception model sample_list = [] for i in range(10): z = Variable( Tensor( np.random.normal(0, 1, (800, opt.latent_dim)))) samples = generator(z) sample_list.append(samples.data.cpu().numpy()) # Flattening list of list into one list new_sample_list = list(chain.from_iterable(sample_list)) #print("Calculating Inception Score over 8k generated images") # Feeding list of numpy arrays inception_score = get_inception_score(new_sample_list, cuda=True, batch_size=32, resize=True, splits=10) msg += ' [IS: %.4f]' % inception_score[0] writer.add_scalar('G/inception_score_mean', inception_score[0], global_step=step) writer.add_scalar('G/inception_score_std', inception_score[1], global_step=step) #print(msg) tdl.set_description(msg) generator.eval() generator_sample = generator(z_sample)
def main(): """Entry point to dcgan""" print("|------- new changes!!!!!!!!!") # to get the dataset and net configuration train_data, val_data = get_dataset(dataset) netG = get_netG() netD = get_netD() loss, trainerG, trainerD = get_configurations(netG, netD) # set labels real_label = mx.nd.ones((opt.batch_size,), ctx=ctx) fake_label = mx.nd.zeros((opt.batch_size,), ctx=ctx) metric = mx.metric.Accuracy() print('Training... ') stamp = datetime.now().strftime('%Y_%m_%d-%H_%M') iter = 0 # to metric the network loss_d = [] loss_g = [] inception_score = [] for epoch in range(opt.nepoch): tic = time.time() btic = time.time() for data, _ in train_data: ############################ # (1) Update D network: maximize log(D(x)) + log(1 - D(G(z))) ########################### # train with real_t data = data.as_in_context(ctx) noise = mx.nd.random.normal(0, 1, shape=(opt.batch_size, nz, 1, 1), ctx=ctx) with autograd.record(): output = netD(data) # reshape output from (opt.batch_size, 2, 1, 1) to (opt.batch_size, 2) output = output.reshape((opt.batch_size, 2)) errD_real = loss(output, real_label) metric.update([real_label, ], [output, ]) with autograd.record(): fake = netG(noise) output = netD(fake.detach()) output = output.reshape((opt.batch_size, 2)) errD_fake = loss(output, fake_label) errD = errD_real + errD_fake errD.backward() metric.update([fake_label,], [output,]) trainerD.step(opt.batch_size) ############################ # (2) Update G network: maximize log(D(G(z))) ########################### with autograd.record(): output = netD(fake) output = output.reshape((-1, 2)) errG = loss(output, real_label) errG.backward() trainerG.step(opt.batch_size) name, acc = metric.get() logging.info('discriminator loss = %f, generator loss = %f, binary training acc = %f at iter %d epoch %d' , mx.nd.mean(errD).asscalar(), mx.nd.mean(errG).asscalar(), acc, iter, epoch) if iter % niter == 0: visual('gout', fake.asnumpy(), name=os.path.join(outf, 'fake_img_iter_%d.png' % iter)) visual('data', data.asnumpy(), name=os.path.join(outf, 'real_img_iter_%d.png' % iter)) # record the metric data loss_d.append(errD) loss_g.append(errG) if opt.inception_score: score, _ = get_inception_score(fake) inception_score.append(score) iter = iter + 1 btic = time.time() name, acc = metric.get() metric.reset() logging.info('\nbinary training acc at epoch %d: %s=%f', epoch, name, acc) logging.info('time: %f', time.time() - tic) # save check_point if check_point: netG.save_parameters(os.path.join(outf, 'generator_epoch_%d.params' %epoch)) netD.save_parameters(os.path.join(outf, 'discriminator_epoch_%d.params' % epoch)) # save parameter netG.save_parameters(os.path.join(outf, 'generator.params')) netD.save_parameters(os.path.join(outf, 'discriminator.params')) # visualization the inception_score as a picture if opt.inception_score: ins_save(inception_score)
import sys import os import shutil import argparse import numpy as np import time import inception_score as inception for epoch in range(0, 106, 5): #imgs = np.load('sampled_imgs/MixGAN1_single0_samples_for_inception_epoch%d.npy' % epoch) #imgs = np.load('sampled_imgs/single1_single0_samples_for_inception_epoch199.npy') imgs = np.load('sampled_imgs/baseline_DCGAN_samples_for_inception_%d.npy' % epoch) imgs = (imgs + 1.) * 127.5 score, std = inception.get_inception_score(list(imgs), splits=10) print("------------") print(score) print(std)