# data preprocessing x, y, x_, y_, xyc_info = preprocess.XycPackage() start = time.time() with tf.Session(config=tf.ConfigProto(gpu_options=tf.GPUOptions( allow_growth=True))) as sess: if drop_rate < 0: keep_prob_info = [0.8, 0.5, 0.5] else: keep_prob_info = [ 1.0 - drop_rate / 2, 1.0 - drop_rate, 1.0 - drop_rate ] mlp = imm.TransferNN([784, 800, 800, 10], (optimizer, learning_rate), keep_prob_info=keep_prob_info) sess.run(tf.global_variables_initializer()) L_copy = [] FM = [] for i in range(no_of_task): print("") print("================= Train task #%d (%s) ================" % (i + 1, optimizer)) if i > 0: model_utils.CopyLayers( sess, mlp.Layers, mlp.Layers_dropbase) # Dropout from weight of pre-task model_utils.ZeroLayers(sess, mlp.Layers)
learning_rate = FLAGS.learning_rate epoch = int(FLAGS.epoch) batch_size = FLAGS.batch_size no_of_task = 3 no_of_node = [784,800,800,10] keep_prob_info = [0.8, 0.5, 0.5] # data preprocessing x, y, x_, y_, xyc_info = preprocess.XycPackage() start = time.time() with tf.Session(config=tf.ConfigProto(gpu_options=tf.GPUOptions(allow_growth=True))) as sess: mlp = imm.TransferNN(no_of_node, (optimizer, learning_rate), keep_prob_info=keep_prob_info) sess.run(tf.global_variables_initializer()) L_copy = [] FM = [] for i in range(no_of_task): print("") print("================= Train task #%d (%s) ================" % (i+1, optimizer)) mlp.Train(sess, x[i], y[i], x_[i], y_[i], epoch, mb=batch_size) mlp.Test(sess, [[x[i],y[i]," train"], [x_[i],y_[i]," test"]]) if mean_imm or mode_imm: L_copy.append(model_utils.CopyLayerValues(sess, mlp.Layers)) if mode_imm: