コード例 #1
0
def get_train_params(data_dir,
                     batch_size,
                     epochs=20,
                     test_in_each_epoch=1,
                     one_hot=False,
                     networktype='GAN_MNIST'):

    if 'img2img' in networktype:
        data_dir = data_dir + '/' + networktype.replace('_A2B', '').replace(
            '_B2A', '')
        data = custom_input_data.load_dataset(data_dir,
                                              networktype=networktype)
    else:
        data = input_data.read_data_sets(data_dir + '/' + networktype,
                                         one_hot=one_hot,
                                         reshape=False)

    train_num = data.train.num_examples  # total number of training images
    test_num = data.test.num_examples  # total number of validation images

    print('Trainset size:', train_num, 'Testset_size:', test_num)
    max_iter = int(np.ceil(epochs * train_num / batch_size))
    test_iter = int(np.ceil(test_num / batch_size))
    test_interval = int(
        train_num /
        (test_in_each_epoch * batch_size))  # test 2 times in each epoch
    disp_interval = int(test_interval * 2)
    if disp_interval == 0: disp_interval = 1

    # snapshot_interval = test_interval * 5  # save at every epoch

    return data, max_iter, test_iter, test_interval, disp_interval
コード例 #2
0
    networktype = 'WGAN_MNIST'

    batch_size = 128
    base_lr = 5e-5
    epochs = 500
    latentDim = 100
    disp_every_epoch = 5

    work_dir = expr_dir + '%s/%s/' % (
        networktype, datetime.strftime(datetime.today(), '%Y%m%d'))
    if not os.path.exists(work_dir): os.makedirs(work_dir)

    data = input_data.read_data_sets(data_dir + '/' + networktype,
                                     reshape=False)
    disp_int = disp_every_epoch * int(
        np.ceil(data.train.num_examples / batch_size))  # every two epochs

    tf.reset_default_graph()
    sess = tf.InteractiveSession()

    Gtrain_op, Dtrain_op, Dweights_clip_op, Gloss, Dloss, is_training, Zph, Xph, Gout_op = create_dcgan_trainer(
        base_lr, networktype, latentDim)
    tf.global_variables_initializer().run()

    var_list = [
        var for var in tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES)
        if (networktype.lower() in var.name.lower()) and (
            'adam' not in var.name.lower())
    ]
    saver = tf.train.Saver(var_list=var_list, max_to_keep=int(epochs * 0.1))
    # saver.restore(sess, expr_dir + 'ganMNIST/20170707/214_model.ckpt')
コード例 #3
0
    train_step_op, rec_loss_op, is_training, Xph, Xrec_op = create_cdae_trainer(
        base_lr, latentD, networktype)
    tf.global_variables_initializer().run()

    var_list = [
        var for var in tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES)
        if (networktype.lower() in var.name.lower()) and (
            'adam' not in var.name.lower())
    ]
    saver = tf.train.Saver(var_list=var_list, max_to_keep=int(epochs * .1))
    # saver.restore(sess, expr_dir + 'ganMNIST/20170707/214_model.ckpt')

    best_test_rec_loss = np.inf

    train_loss = np.zeros([max_iter, 1])
    test_loss = np.zeros([int(np.ceil(max_iter / test_int)), 1])

    for it in range(max_iter):

        if it % test_int == 0:  # Record summaries and test-set accuracy
            acc_loss = np.zeros([1, 1])
            for i_test in range(test_iter):
                X, _ = data.test.next_batch(batch_size)
                resloss = sess.run([
                    rec_loss_op,
                ],
                                   feed_dict={
                                       Xph: X,
                                       is_training: False
                                   })
                acc_loss = np.add(acc_loss, resloss)