def updater(noiser,
             sess=sess,
             update=update,
             in_im=in_im,
             batch_size=batch_size,
             size=size):
     image_i = 'data/gaussian_noise.png'
     for j in range(batch_size):
         noiser[j:j + 1] = np.copy(
             func.img_preprocess(image_i, size=size, augment=True))
     sess.run(update, feed_dict={in_im: noiser})
 def updater(noiser,
             sess=sess,
             update=update,
             in_im=in_im,
             batch_size=batch_size,
             size=size,
             img_list=img_list):
     rander = np.random.randint(low=0,
                                high=(len(img_list) - batch_size))
     for j in range(batch_size):
         noiser[j:j + 1] = np.copy(
             func.img_preprocess(img_list[rander + j].strip(),
                                 size=size,
                                 augment=False))
     sess.run(update, feed_dict={in_im: noiser})
Esempio n. 3
0
train_idxs_all = range(no_samples)
np.random.shuffle(train_idxs_all)
train_range_batch = np.zeros((batch_size,size,size,3))



while(iteration_no < max_iter):
	np.random.shuffle(train_idxs_all)
	## one epoch of training
	for i in range(no_samples/batch_size):
		iteration_no+=1
		train_idxs = train_idxs_all[i*batch_size: (i+1)*batch_size]
		train_images = img_list[train_idxs, ...]
		train_zn = np.random.uniform(low=-1. , high=1. ,size = (batch_size,z_dim))
		for j in range(batch_size):
			train_range_batch[j:j+1] = np.copy(func.img_preprocess(noise_image,size=size,augment=True))
		## Training step
		_, train_gen_loss, train_true_softmax, train_adv_softmax  = sess.run([update, gen_loss, true_softmax, adv_softmax], 
			feed_dict={input_image:train_images, zn:train_zn, range_image: train_range_batch})
		## Current Summary
		train_true_pred = np.argmax(train_true_softmax, axis = 1)
		train_adv_pred = np.argmax(train_adv_softmax, axis = 1)
		flip = np.sum([train_true_pred[j] != train_adv_pred[j] for j in range(batch_size) ])*(100.0/batch_size)
		print 'iter',iteration_no,'train loss', train_gen_loss, 'fool rate', flip
		## Writing summary
		if iteration_no%100 == 0 :
			train_summary = sess.run(merge_op, feed_dict={input_image:train_images, 
				zn:train_zn, fooling_acc: flip, range_image: train_range_batch})
			train_writer.add_summary(train_summary, iteration_no)
		## Validation
		if iteration_no% 100 == 0 :