Beispiel #1
0
def main(_):
    if not tf.gfile.Exists(FLAGS.logdir):
        tf.gfile.MakeDirs(FLAGS.logdir)
    logging.set_verbosity(logging.INFO)
    with tf.Session() as sess:
        if FLAGS.similarity:
            dataset = zap_data(FLAGS, False)
            all_features, all_paths = export_intermediate(FLAGS, sess, dataset)
            similarity(FLAGS, sess, all_features, all_paths)
        else:
            dataset = zap_data(FLAGS, True)
            gan(dataset, sess)
Beispiel #2
0
def main(_):
    if not tf.gfile.Exists(FLAGS.logdir):
        tf.gfile.MakeDirs(FLAGS.logdir)
    if FLAGS.sampledir and not tf.gfile.Exists(FLAGS.sampledir):
        tf.gfile.MakeDirs(FLAGS.sampledir)

    with tf.Session() as sess:
        if FLAGS.sampledir:
            sample(FLAGS, sess)
        elif FLAGS.similarity:
            dataset = zap_data(FLAGS, False)
            all_features, all_paths = export_intermediate(FLAGS, sess, dataset)
            data_json = similarity(FLAGS, sess, all_features, all_paths)
            similarity_view(data_json)
        else:
            dataset = zap_data(FLAGS, True)
            gan(dataset, sess)
Beispiel #3
0
def gan(cluster):
    # Model
    is_chief = (FLAGS.task == 0)
    local_step = 0
    server = tf.train.Server(cluster, job_name='worker', task_index=FLAGS.task)
    worker_device = '/job:worker/task:%d' % (FLAGS.task)
    with tf.device(
            tf.train.replica_device_setter(worker_device=worker_device,
                                           ps_device='/job:ps',
                                           cluster=cluster)), tf.Session():

        dataset = zap_data(FLAGS, True)
        num_global = (dataset['size'] / FLAGS.batch_size) * FLAGS.epochs
        x = tf.placeholder(tf.float32,
                           shape=[
                               None, IMAGE_SIZE['resized'][0],
                               IMAGE_SIZE['resized'][1], 3
                           ])
        dropout = tf.placeholder(tf.float32)
        d_model = discriminator(x, reuse=False, dropout=dropout)

        z = tf.placeholder(tf.float32, shape=[None, Z_DIM])
        latent_c = tf.placeholder(shape=[None, C_DIM], dtype=tf.float32)
        g_model = generator(z, latent_c)
        dg_model, q_model = discriminator(g_model,
                                          reuse=True,
                                          dropout=dropout,
                                          c_dim=C_DIM)

        d_trainer, d_loss, g_trainer, g_loss, global_step = loss(
            d_model, g_model, dg_model, q_model, latent_c)

        # Stats
        t_vars = tf.trainable_variables()
        count_params(t_vars, ['discr/', 'gen/', 'latent_c/'])
        # for v in t_vars:
        # tf.histogram_summary(v.name, v)

        # Init
        summary = tf.summary.merge_all()

        init_op = tf.global_variables_initializer()

        sv = tf.train.Supervisor(is_chief=is_chief,
                                 logdir=FLAGS.logdir,
                                 init_op=init_op,
                                 recovery_wait_secs=1,
                                 summary_op=None,
                                 global_step=global_step)

        sess_config = tf.ConfigProto(
            allow_soft_placement=True,
            log_device_placement=False,
            device_filters=['/job:ps',
                            '/job:worker/task:%d' % FLAGS.task])

        step = 0
        with sv.managed_session(server.target, config=sess_config) as sess:
            # Dataset queue
            #coord = tf.train.Coordinator()
            #threads = tf.train.start_queue_runners(coord=coord)
            #tf.train.start_queue_runners(sess=sess)
            while step < num_global and not sv.should_stop():
                z_batch = np.random.uniform(
                    -1, 1, [FLAGS.batch_size, Z_DIM]).astype(np.float32)
                c_batch = np.random.uniform(-1, 1, [FLAGS.batch_size, C_DIM])
                images, _ = sess.run(dataset['batch'])
                feed_dict = {
                    z: z_batch,
                    latent_c: c_batch,
                    x: images,
                    dropout: .5,
                }

                # Update discriminator
                start = time.time()
                _, d_loss_val = sess.run([d_trainer, d_loss],
                                         feed_dict=feed_dict)
                d_time = time.time() - start

                # Update generator
                start = time.time()
                _, g_loss_val, summary_str, step = sess.run(
                    [g_trainer, g_loss, summary, global_step],
                    feed_dict=feed_dict)
                g_time = time.time() - start

                # Log details
                if local_step % 10 == 0:
                    print(
                        "[%s, %s] Disc loss: %.3f (%.2fs), Gen Loss: %.3f (%.2fs)"
                        % (
                            step,
                            step * FLAGS.batch_size / dataset['size'],
                            d_loss_val,
                            d_time,
                            g_loss_val,
                            g_time,
                        ))
                    if is_chief:
                        sv.summary_computed(sess, summary_str)

                local_step += 1
                # Early stopping
                if np.isnan(g_loss_val) or np.isnan(d_loss_val):
                    print('Early stopping', g_loss_val, d_loss_val)
                    break

                # Finish off the filename queue coordinator.
            #coord.request_stop()
            #coord.join(threads)
            sv.request_stop()
            return
Beispiel #4
0
import tensorflow as tf
from main import discriminator
from zap50k import zap_data, IMAGE_SIZE
import itertools
import scipy.misc
import pickle
import flask

flags = tf.app.flags
FLAGS = flags.FLAGS

app = flask.Flask(__name__)

sess = tf.Session()
with sess.as_default():
    dataset = zap_data(FLAGS, False)
    x = tf.placeholder(
        tf.float32,
        shape=[None, IMAGE_SIZE['resized'][0], IMAGE_SIZE['resized'][1], 3])
    dropout = tf.placeholder(tf.float32)
    feat_model = discriminator(x, reuse=False, dropout=dropout, int_feats=True)

    all_features = np.zeros((dataset['size'], feat_model.get_shape()[1]))
    x1 = tf.placeholder(tf.float32, shape=[None, all_features.shape[1]])
    x2 = tf.placeholder(tf.float32, shape=[None, all_features.shape[1]])
    l2diff = tf.sqrt(tf.reduce_sum(tf.square(tf.subtract(x1, x2)), axis=1))

    # Init
    init_op = tf.group(tf.global_variables_initializer(),
                       tf.local_variables_initializer())
    sess.run(init_op)