示例#1
0
}

# Create Tensorboard Filewriter & Save Results
[fw, log_path_dt] = utils.create_tensorboard(sess, log_path)

# Start Sequential Learning
acc_pre = []
acc_curr = []
for task in range(5):
    # Reinitialize optimizers
    sess.run(
        tf.variables_initializer(model.opt_disc.variables() +
                                 model.opt_recon.variables() +
                                 model.opt_fool.variables()))
    # Load data for training
    data = datasets.split_mnist([2 * task], [2 * task + 1])
    #data = datasets.split_fashion_mnist([2 * task], [2 * task + 1])
    [train_data, train_labels] = data.get_train_samples()
    train_data = train_data / 255.0
    sess.run(iterator.initializer,
             feed_dict={
                 data_ph: train_data,
                 labels_ph: train_labels,
                 batch_size_ph: batch_size,
                 shufflebuffer_ph: train_data.shape[0],
                 epochs_ph: epochs
             })
    # Train model
    i = 0
    while True:
        try:
示例#2
0
"learning_rate": learning_rate,
"num_classes": num_classes,
"N_plot": N_plot,
"log_path": log_path}

# Create Tensorboard Filewriter & Save Results
[fw,log_path_dt]=utils.create_tensorboard(sess,log_path)

# Start Sequential Learning
acc_pre = []
acc_curr = []
for task in range(5):
    # Reinitialize optimizers
    sess.run(tf.variables_initializer(model.opt_disc.variables() + model.opt_recon.variables() + model.opt_fool.variables()))
    # Load data for training
    data = datasets.split_mnist(np.arange(2 * (task + 1)), [])
    #data = datasets.split_mnist(list(range(0, 2*(task+1))),[])
    #data = datasets.split_fashion_mnist(list(range(0, 2*(task+1))),[])
    [train_data, train_labels] = data.get_train_samples()
    train_data = train_data / 255.0
    sess.run(iterator.initializer,feed_dict={data_ph: train_data, labels_ph: train_labels, batch_size_ph: batch_size,shufflebuffer_ph: train_data.shape[0],epochs_ph: epochs})
    # Train model
    i=0
    while True:
        try:
            [_, _, _, loss, summaries] = sess.run([model.update_disc, model.update_fool, model.update_recon, model.loss, model.summaries],
                feed_dict={model.batch_size: batch_size, model.learning_rate: learning_rate,model.b_replay: False,model.repl_batch_size: batch_size})
            i += 1
            fw.add_summary(summaries, i)
            if (i%100 == 0):
                print("Task{}\tIteration: {}\tloss: {:.5}".format(task,i,loss))