def model(data, train=False, prefix=""): """The Model definition.""" # 2D convolution, with 'SAME' padding (i.e. the output feature map has # the same size as the input). Note that {strides} is a 4D array whose # shape matches the data layout: [image index, y, x, depth]. # 28x28 conv = helpers.conv2d( data, name=prefix + "conv1", kernel_width=5, num_filters=32, transfer=tf.nn.relu, decay_rate=DECAY_RATE ) print("After first conv: " + str(conv.get_shape())) # Max pooling. The kernel size spec {ksize} also follows the layout of # the data. Here we have a pooling window of 2, and a stride of 2. pool = helpers.pool(conv, name=prefix + "pool1", kernel_width=2) print("After first pool: " + str(pool.get_shape())) # print("After first conv: "+str(relu.get_shape()) # 14x14 conv = helpers.conv2d( pool, name=prefix + "conv2", kernel_width=5, num_filters=64, transfer=tf.nn.relu, decay_rate=DECAY_RATE ) print("After second conv: " + str(conv.get_shape())) # pool = helpers.pool(conv, # name=prefix+"pool2", # kernel_width=2) # print("After second pool: "+str(pool.get_shape())) # # 7x7 # conv = helpers.conv2d(pool, # name=prefix+"conv3", # kernel_width=3, # num_filters=128, # transfer=tf.nn.relu, # padding='SAME', # decay_rate=DECAY_RATE) # print("After third conv: "+str(conv.get_shape())) # conv = helpers.conv2d(conv, # name=prefix+"1x1", # kernel_width=1, # num_filters=512, # transfer=tf.nn.relu, # decay_rate=DECAY_RATE) if train: conv = tf.nn.dropout(conv, 0.5, seed=SEED) # 5x5 # print("After 1x1 conv: "+str(conv.get_shape())) # size = tf.constant([7, 7]) # unpool = tf.image.resize_nearest_neighbor(conv, # size, # align_corners=None, # name=prefix+"unpool1") # print("After first unpool: "+str(unpool.get_shape())) # # 7x7 # conv = helpers.conv2d(unpool, # name=prefix+"deconv1", # kernel_width=3, # num_filters=64, # transfer=tf.nn.relu, # decay_rate=DECAY_RATE) # print("After first deconv: "+str(conv.get_shape())) # # 7x7 # size = tf.constant([14, 14]) # unpool = tf.image.resize_nearest_neighbor(conv, # size, # align_corners=None, # name=prefix+"unpool2") # print("After second unpool: "+str(unpool.get_shape())) # # 14x14 # conv = helpers.conv2d(unpool, # name=prefix+"deconv2", # kernel_width=3, # num_filters=32, # transfer=tf.nn.relu, # decay_rate=DECAY_RATE) # print("After second deconv: "+str(conv.get_shape())) size = tf.constant([28, 28]) unpool = tf.image.resize_nearest_neighbor(conv, size, align_corners=None, name=prefix + "unpool3") print("After third unpool: " + str(unpool.get_shape())) conv = helpers.conv2d( unpool, name=prefix + "deconv3", kernel_width=3, num_filters=2, transfer=tf.nn.relu, decay_rate=DECAY_RATE ) print("After third deconv: " + str(conv.get_shape())) conv_shape = conv.get_shape().as_list() # reshape = tf.reshape(conv, # [conv_shape[0],conv_shape[1]*conv_shape[2]*conv_shape[3]]) reshape = tf.reshape(conv, [conv_shape[0] * conv_shape[1] * conv_shape[2], conv_shape[3]]) return reshape
def inference(images): """Build the CIFAR-10 model. Args: images: Images returned from distorted_inputs() or inputs(). Returns: Logits. """ # We instantiate all variables using tf.get_variable() instead of # tf.Variable() in order to share variables across multiple GPU training runs. # If we only ran this model on a single GPU, we could simplify this function # by replacing all instances of tf.get_variable() with tf.Variable(). # # conv1 print("XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX") print(images.get_shape()) nr_params = 0 shape = images.get_shape().as_list() SIZE1 = tf.constant(shape[1:3]) conv1, nrp = helpers.conv2d(images, name="conv1", kernel_width=5, num_filters=64, transfer=tf.nn.relu, decay_rate=DECAY_RATE) print("conv1: "+str(conv1.get_shape())) nr_params += nrp # pool1 pool1 = helpers.pool(conv1, name="pool1", kernel_width=2, stride=2) print("pool1: "+str(pool1.get_shape())) # norm1 norm1 = tf.nn.lrn(pool1, 4, bias=1.0, alpha=0.001 / 9.0, beta=0.75, name='norm1') shape = norm1.get_shape().as_list() SIZE2 = tf.constant(shape[1:3]) # conv2 conv2, nrp = helpers.conv2d(norm1, name="conv2", kernel_width=5, num_filters=64, transfer=tf.nn.relu, decay_rate=DECAY_RATE) nr_params += nrp print("conv2: "+str(conv2.get_shape())) # norm2 norm2 = tf.nn.lrn(conv2, 4, bias=1.0, alpha=0.001 / 9.0, beta=0.75, name='norm2') pool2 = helpers.pool(norm2, name="pool2", kernel_width=3, stride=2) print("pool2: "+str(pool2.get_shape())) deconv_shape = pool2.get_shape() # conv3 conv3, nrp = helpers.conv2d(pool2, name="conv3", kernel_width=6, num_filters=128, transfer=tf.nn.relu, padding="VALID", decay_rate=DECAY_RATE) print("conv3: "+str(conv3.get_shape())) nr_params += nrp # conv 4 1x1 conv4, nrp = helpers.conv2d(conv3, name="conv4-1x1", kernel_width=1, num_filters=128, transfer=tf.nn.relu, decay_rate=DECAY_RATE) print("conv4: "+str(conv4.get_shape())) nr_params += nrp # deconv deconv1, nrp = helpers.conv2d_transpose(conv4, name="deconv1", kernel_width=6, num_filters=64, transfer=tf.nn.relu, padding="VALID", output_shape=deconv_shape, decay_rate=DECAY_RATE) print("deconv1: "+str(deconv1.get_shape())) nr_params += nrp unpool1 = tf.image.resize_nearest_neighbor(deconv1, SIZE2, align_corners=None, name="unpool1") print("unpool1: "+str(unpool1.get_shape())) # conv5 conv5, nrp = helpers.conv2d(unpool1, name="conv5", kernel_width=5, num_filters=64, transfer=tf.nn.relu, decay_rate=DECAY_RATE) print("conv5: "+str(conv5.get_shape())) nr_params += nrp unpool2 = tf.image.resize_nearest_neighbor(conv5, SIZE1, align_corners=None, name="unpool2") print("unpool2: "+str(unpool2.get_shape())) # conv6 conv6, nrp = helpers.conv2d(unpool2, name="conv6", kernel_width=5, num_filters=NUM_CLASSES, transfer=tf.nn.relu, decay_rate=DECAY_RATE) print("conv6 : "+str(conv6.get_shape())) nr_params += nrp return conv6, nr_params