def face_parse(image, label): conv = dict(kernel=3, padding='SAME') deconv = dict(kernel=3, padding='SAME', bias=True) subpixel = dict(kernel=3, factor=2, padding='SAME') with tf.default_args(conv=conv, deconv=deconv, subpixel=subpixel): net = image net = net.conv(16).bn().relu().conv(16).bn().relu().maxpool() net = net.conv(32).bn().relu().conv(32).bn().relu().maxpool() net = net.conv(64).bn().relu().conv(64).bn().relu().maxpool() net = net.conv(64).bn().relu().conv(64).bn().relu().maxpool() net = net.conv(64).bn().relu() net = net.deconv(64).bn().relu() net = net.subpixel().deconv(64).bn().relu().deconv(64).bn().relu() net = net.subpixel().deconv(64).bn().relu().deconv(64).bn().relu() net = net.subpixel().deconv(32).bn().relu().deconv(32).bn().relu() net = net.subpixel().deconv(16).bn().relu().deconv(11, bias=True) prob = net.softmax() summary_parse(prob) losses = tf.softmax_cross_entropy(net, label, name='losses') loss = losses.mean() return tf.dic(losses=losses, loss=loss, outputs=[prob])
def generator(z): deconv = dict(kernel=4, stride=2, padding='SAME') with tf.default_args(deconv=deconv): net = z net = net.dense(1024).bn().pleaky() net = net.dense(7 * 7 * 128).bn().pleaky() net = net.reshape((-1, 7, 7, 128)) net = net.deconv(64).bn().pleaky() net = net.deconv(1).sigmoid() return tf.summary_image(net, name='fake')
def discriminator(imgz): conv = dict(kernel=4, stride=2, padding='SAME') with tf.default_args(conv=conv): net = imgz net = net.conv(64).leaky() net = net.conv(128).leaky().flat2d() net = net.dense(1024).leaky() net = net.dense(1).squeeze() # prob = net.sigmoid() # must return logits return tf.identity(net, name='disc')
def discriminator(xx, num_cont, batch): conv = dict(kernel=4, stride=2, bias=True, padding='SAME') with tf.default_args(conv=conv): net = xx net = net.conv(64).leaky() net = net.conv(128).leaky().flat2d() net = net.dense(1024).leaky() disc = net.dense(1).squeeze() net = net.dense(128).leaky() klass = net.dense(10) cont = net[batch:].dense(num_cont).sigmoid() return disc, klass, cont
def face_parse2(image, label): conv = dict(kernel=3, padding='SAME') deconv = dict(kernel=3, padding='SAME', bias=True) # subpixel = dict(kernel=3, factor=2, padding='SAME') maxpool_where = dict(kernel=2) unpool_where = dict(kernel=2) with tf.default_args(conv=conv, deconv=deconv, maxpool_where=maxpool_where, unpool_where=unpool_where): net = image wheres = [] net, where = net.conv(16).bn().relu().conv( 16).bn().relu().maxpool_where() wheres.append(where) net, where = net.conv(32).bn().relu().conv( 32).bn().relu().maxpool_where() wheres.append(where) net, where = net.conv(64).bn().relu().conv( 64).bn().relu().maxpool_where() wheres.append(where) net, where = net.conv(64).bn().relu().conv( 64).bn().relu().maxpool_where() wheres.append(where) net = net.conv(64).bn().relu() net = net.deconv(64).bn().relu() net = net.unpool_where( wheres.pop()).deconv(64).bn().relu().deconv(64).bn().relu() net = net.unpool_where( wheres.pop()).deconv(64).bn().relu().deconv(32).bn().relu() net = net.unpool_where( wheres.pop()).deconv(32).bn().relu().deconv(16).bn().relu() net = net.unpool_where(wheres.pop()).deconv(16).bn().relu().deconv( 11, bias=True) net = tf.summary_activation(net, name='logits') prob = net.softmax() summary_parse(prob) # losses = tf.nn.sigmoid_cross_entropy_with_logits(net, label) losses = tf.softmax_cross_entropy(net, label, name='losses') loss = losses.mean() return tf.dic(loss=loss, logits=net, label=label, image=image)