Example #1
0
File: gan.py Project: tzthink/DCGAN
    init = tf.initialize_all_variables()

    with tf.Session() as sess:
        sess.run(init)
        for epoch in range(FLAGS.max_epoch):

            discriminator_loss = 0.0
            generator_loss = 0.0

            widgets = ["epoch #%d|" % epoch, Percentage(), Bar(), ETA()]
            pbar = ProgressBar(maxval=FLAGS.updates_per_epoch, widgets=widgets)
            pbar.start()
            for i in range(FLAGS.updates_per_epoch):
                pbar.update(i)
                x = rd.get_next_batch(batch_size=FLAGS.batch_size)

                _, loss_value = sess.run([train_discrimator, D_loss], {
                    input_tensor: x,
                    learning_rate: FLAGS.d_learning_rate
                })
                discriminator_loss += loss_value

                # We still need input for moving averages.
                # Need to find how to fix it.
                _, loss_value = sess.run([train_generator, G_loss], {
                    input_tensor: x,
                    learning_rate: FLAGS.g_learning_rate
                })
                generator_loss += loss_value
Example #2
0
        no_batches = int(reader.train_size / args.batch_size)

        loss = []
        acc = []
        updation_step = args.lr_change * 2
        with tqdm(total=no_batches,
                  desc="Epoch {}/{}: loss: {} acc: {}".format(
                      epoch + 1, args.epochs, loss, acc)) as pbar:
            for batch_num in range(no_batches):
                start = i
                end = i + args.batch_size
                i = start + int(args.batch_size * (1 - args.data_overlap))

                step = epoch * no_batches + batch_num

                epoch_x, epoch_y = reader.get_next_batch(start, end)
                if args.model.startswith('cnn'):
                    epoch_x = np.reshape(
                        epoch_x, (epoch_x.shape[0], timesteps, embed_size, 1))

                _, c, train_summary = sess.run(
                    [optimizer, cost_func, merged_summary_op],
                    feed_dict={
                        lr: args.learning_rate,
                        x: epoch_x,
                        y: epoch_y
                    })
                train_summary_writer.add_summary(train_summary, step)

                val_loss, val_acc, val_summary = sess.run(
                    [cost_func, accuracy, merged_summary_op],
        steps_per_epoch=int(reader.train_size / args.batch_size),
        validation_data=(val_x, val_y),
        epochs=args.epochs,
        callbacks=[logging, checkpoint, reduce_lr, early_stopping])
else:
    if not os.path.exists(log_dir):
        os.mkdir(log_dir)

    for epoch in range(args.epochs):
        num_batches = int(reader.train_size / args.batch_size)

        start_index = 0
        epoch_loss, epoch_acc = [], []
        for i in range(num_batches):
            start_index = i * args.batch_size
            epoch_x, epoch_y = reader.get_next_batch(start_index,
                                                     args.batch_size)
            [loss, acc] = model.train_on_batch(epoch_x, epoch_y)

            epoch_loss.append(loss)
            epoch_acc.append(acc)

        num_batches = int(reader.val_size / args.batch_size)
        start_index = 0
        val_loss, val_acc = [], []
        for i in range(num_batches):
            start_index = i * args.batch_size
            epoch_x, epoch_y = reader.get_next_val_batch(
                start_index, args.batch_size)
            [loss, acc] = model.test_on_batch(epoch_x, epoch_y)

            val_loss.append(loss)
Example #4
0
with tf.Session() as sess:
    summary_writer = tf.train.SummaryWriter('experiment', graph=sess.graph)

    if os.path.isfile("save/model.ckpt"):
        print("Restoring saved parameters")
        saver.restore(sess, "save/model.ckpt")
    else:
        print("Initializing parameters")
        sess.run(tf.initialize_all_variables())

    print("Initializing parameters")
    sess.run(tf.initialize_all_variables())

    for step in xrange(1, n_steps):
        batch = rd.get_next_batch(batch_size=batch_size)
        feed_dict = {x: batch}
        _, cur_loss, summary_str = sess.run([train_step, loss, summary_op],
                                            feed_dict=feed_dict)
        summary_writer.add_summary(summary_str, step)

        if step % 50 == 0:
            print "Step {0} | Loss: {1}".format(step, cur_loss)

    # check if the model works
    for i in xrange(5):
        image = rd.get_one_image()
        rd.show_a_image(image)
        image = np.reshape(image, newshape=[1, 1024])

        feed_dict = {x: image}