def mnist_data_providers(batch_size, crop_size=[], use_test_set=False): if isinstance(crop_size, int): crop_size = [crop_size] from load import mnist_with_valid_set trX, vaX, teX, trY, vaY, teY = mnist_with_valid_set() if use_test_set: trX = np.concatenate([trX, vaX], axis=0) trY = np.concatenate([trY, vaY], axis=0) vaX = teX vaY = teY shape = 1, 28, 28 return { 'train': MemoryDataProvider(trX, trY, batch_size, crop_size=crop_size, image_shape=shape), 'val': MemoryDataProvider(vaX, vaY, batch_size, crop_size=crop_size, image_shape=shape), 'test': MemoryDataProvider(teX, teY, batch_size, crop_size=crop_size, image_shape=shape), }
learning_rate = 0.0002 batch_size = 128 image_shape = [28, 28, 1] dim_z = 100 dim_W1 = 1024 dim_W2 = 128 dim_W3 = 64 dim_channel = 1 visualize_dim = 196 # SMITH: Restore model option restore = True end_point = 20000 trX, vaX, teX, trY, vaY, teY = mnist_with_valid_set() dcgan_model = DCGAN( batch_size=batch_size, image_shape=image_shape, dim_z=dim_z, dim_W1=dim_W1, dim_W2=dim_W2, dim_W3=dim_W3, ) Z_tf, Y_tf, image_tf, d_cost_tf, g_cost_tf, p_real, p_gen = dcgan_model.build_model( ) sess = tf.InteractiveSession() saver = tf.train.Saver(max_to_keep=10)
from lib.rng import py_rng, np_rng from lib.ops import batchnorm, conv_cond_concat, deconv, dropout from lib.theano_utils import floatX, sharedX from lib.data_utils import shuffle, iter_data from load import mnist_with_valid_set # # Phil's business # from MatryoshkaModules import DiscConvModule, DiscFCModule, GenConvModule, \ GenFCModule, BasicConvModule # path for dumping experiment info and fetching dataset EXP_DIR = "./mnist" trX, vaX, teX, trY, vaY, teY = mnist_with_valid_set("{}/data".format(EXP_DIR)) vaX = floatX(vaX) / 255. k = 1 # # of discrim updates for each gen update l2 = 1.5e-5 # l2 weight decay b1 = 0.5 # momentum term of adam nc = 1 # # of channels in image nbatch = 128 # # of examples in batch npx = 28 # # of pixels width/height of images nz0 = 32 # # of dim for Z0 nz1 = 8 # # of dim for Z1 ngfc = 256 # # of gen units for fully connected layers ndfc = 256 # # of discrim units for fully connected layers ngf = 64 # # of gen filters in first conv layer ndf = 64 # # of discrim filters in first conv layer
from lib.rng import py_rng, np_rng from lib.ops import batchnorm, conv_cond_concat, deconv, dropout from lib.theano_utils import floatX, sharedX from lib.data_utils import shuffle, iter_data from load import mnist_with_valid_set # # Phil's business # from MatryoshkaModules import DiscConvModule, DiscFCModule, GenConvModule, \ GenFCModule, BasicConvModule # path for dumping experiment info and fetching dataset EXP_DIR = "./mnist" trX, vaX, teX, trY, vaY, teY = mnist_with_valid_set("{}/data".format(EXP_DIR)) vaX = floatX(vaX)/255. k = 1 # # of discrim updates for each gen update l2 = 1.5e-5 # l2 weight decay b1 = 0.5 # momentum term of adam nc = 1 # # of channels in image nbatch = 128 # # of examples in batch npx = 28 # # of pixels width/height of images nz0 = 32 # # of dim for Z0 nz1 = 8 # # of dim for Z1 ngfc = 256 # # of gen units for fully connected layers ndfc = 256 # # of discrim units for fully connected layers ngf = 64 # # of gen filters in first conv layer ndf = 64 # # of discrim filters in first conv layer
from util import * from load import mnist_with_valid_set n_epochs = 10000 learning_rate = 0.0001 batch_size = 128 image_shape = [28, 28, 1] dim_z = 100 dim_W1 = 1024 dim_W2 = 128 dim_W3 = 64 dim_channel = 1 visualize_dim = 196 trX, vaX, teX, trY, vaY, teY = mnist_with_valid_set() dcgan_model = DCGAN( batch_size=batch_size, image_shape=image_shape, dim_z=dim_z, dim_W1=dim_W1, dim_W2=dim_W2, dim_W3=dim_W3, ) Z_tf, Y_tf, image_tf, d_cost_tf, g_cost_tf, p_real, p_gen = dcgan_model.build_model() sess = tf.InteractiveSession() saver = tf.train.Saver(max_to_keep=10) discrim_vars = filter(lambda x: x.name.startswith('discrim'), tf.trainable_variables())