Beispiel #1
0
    def G(self, inputs, labels, reuse=False, do_reshape=False):
	
	labels = tf.reshape(labels, [-1, 1, 1, 10])
			    
	#~ inputs = slim.fully_connected(inputs, 1024, scope='qwe')     
	#~ inputs = slim.fully_connected(inputs, 1024, scope='asd')     
	if inputs.get_shape()[1] != 1:
	    inputs = tf.expand_dims(inputs, 1)
	    inputs = tf.expand_dims(inputs, 1)
	inputs = conv_concat(inputs, labels, self.mode)
	
	with tf.variable_scope('generator', reuse=reuse):
            with slim.arg_scope([slim.conv2d_transpose], padding='SAME', activation_fn=None,           
                                 stride=2, weights_initializer=tf.contrib.layers.xavier_initializer()):
                with slim.arg_scope([slim.batch_norm], decay=0.95, center=True, scale=True, 
				    activation_fn=tf.nn.relu, is_training=(self.mode=='train_dsn')):
			
                    net = slim.conv2d_transpose(inputs, 512, [4, 4], padding='VALID', scope='conv_transpose1')   # (batch_size, 4, 4, 512)
                    net = slim.batch_norm(net, scope='bn1')
		    net = conv_concat(net, labels, self.mode)
                    net = slim.conv2d_transpose(net, 256, [3, 3], scope='conv_transpose2')  # (batch_size, 8, 8, 256)
                    net = slim.batch_norm(net, scope='bn2')
                    net = conv_concat(net, labels, self.mode)
		    net = slim.conv2d_transpose(net, 128, [3, 3], scope='conv_transpose3')  # (batch_size, 16, 16, 128)
                    net = slim.batch_norm(net, scope='bn3')
                    net = conv_concat(net, labels, self.mode)
		    net = slim.conv2d_transpose(net, 3, [3, 3], activation_fn=tf.tanh, scope='conv_transpose4')   # (batch_size, 32, 32, 3)
		    return net
Beispiel #2
0
    def D_g(self, images, labels, reuse=False):
	
	labels = tf.reshape(labels, [self.batch_size, 1, 1, 10])

	if images.get_shape()[3] == 3:
            images = tf.image.rgb_to_grayscale(images)
	
	images = conv_concat(images, labels, self.mode)
	
        # images: (batch, 32, 32, 1)
        with tf.variable_scope('disc_g', reuse=reuse):
            with slim.arg_scope([slim.conv2d], padding='SAME', activation_fn=None,
                                 stride=2,  weights_initializer=tf.contrib.layers.xavier_initializer()):
                with slim.arg_scope([slim.batch_norm], decay=0.95, center=True, scale=True, 
				    activation_fn=lrelu, is_training=(self.mode=='train_dsn')):
                    
                    net = slim.conv2d(images, 128, [3, 3], activation_fn=tf.nn.relu, scope='conv1')   # (batch_size, 16, 16, 128)
                    net = slim.batch_norm(net, scope='bn1')
		    net = conv_concat(net, labels, self.mode)
                    net = slim.conv2d(net, 256, [3, 3], scope='conv2')   # (batch_size, 8, 8, 256)
                    net = slim.batch_norm(net, scope='bn2')
		    net = conv_concat(net, labels, self.mode)
                    #~ net = slim.conv2d(net, 512, [3, 3], scope='conv3')   # (batch_size, 8, 8, 512)
                    #~ net = slim.batch_norm(net, scope='bn3')
		    #~ net = conv_concat(net, labels, self.mode)
                    net = slim.flatten(net)
		    net = slim.fully_connected(net,1,activation_fn=tf.sigmoid,scope='fc1')   # (batch_size, 3)
		    return net