def build_graph(FLAGS, pre_structure, ext_structure, input_data, image_size, output_num, keep_prob, input_graph): #pre_structure.display_structure() #ext_structure.display_structure() with input_graph.as_default(): #------------------------------------------------ # define Local Variables #------------------------------------------------ pre_FR = pre_structure.feature_layer_num pre_FC = pre_structure.fc_layer_num pre_FA = pre_structure.feature_layer_array pre_MA = pre_structure.module_num_array pre_F = pre_structure.filter_num * 2 # due to filter number must be an even number ext_FR = ext_structure.feature_layer_num ext_FC = ext_structure.fc_layer_num ext_FA = ext_structure.feature_layer_array ext_MA = ext_structure.module_num_array # result var_list_to_learn = [] var_list_to_restore = [] var_list_to_save = [] out = None #------------------------------------------------ # define Input #------------------------------------------------ # Input with tf.name_scope('input'): x = input_data #------------------------------------------------ # define Graph #------------------------------------------------ # first conv layer i = 0 _type = "conv" out, _restore, _learn, _save = build_layer(moduel_type=_type, input_data=x, pre_module_number=pre_MA[i], ext_module_number=ext_MA[i], filter_num=pre_F, keep_prob=keep_prob, layer_name="conv_layer" + str(i), input_graph=input_graph) var_list_to_restore += _restore var_list_to_learn += _learn var_list_to_save += _save # feature abstract layer _length = min(pre_FR, ext_FR) # print("------------------debug-----------------") # pre_structure.display_structure() # ext_structure.display_structure() # print("----------------------------------------") for i in range(_length): # type if pre_FA[i] == 1: _type = "DR" elif pre_FA[i] == 0: _type = "fire" else: raise ValueError("Unknow module encode") # build out, _restore, _learn, _save = build_layer( moduel_type=_type, input_data=out, pre_module_number=pre_MA[i + 1], ext_module_number=ext_MA[i + 1], filter_num=pre_F, keep_prob=keep_prob, layer_name="feature_layer_" + str(i + 1), input_graph=input_graph) var_list_to_restore += _restore var_list_to_learn += _learn var_list_to_save += _save # dangerious area: unless you understand and clear, don't change if (pre_FR > ext_FR): _abort_out = out for i in range(ext_FR, pre_FR): # type if pre_FA[i] == 1: _type = "DR" elif pre_FA[i] == 0: _type = "fire" else: raise ValueError("Unknow module encode") # build _abort_out, _restore, _learn, _save = build_layer( moduel_type=_type, input_data=_abort_out, pre_module_number=pre_MA[i + 1], ext_module_number=0, filter_num=pre_F, keep_prob=keep_prob, layer_name="feature_layer_" + str(i + 1), input_graph=input_graph) var_list_to_restore += _restore var_list_to_save += _save if (pre_FR < ext_FR): for i in range(pre_FR, ext_FR): # type if ext_FA[i] == 1: _type = "DR" elif ext_FA[i] == 0: _type = "fire" else: raise ValueError("Unknow module encode") # build out, _restore, _learn, _save = build_layer( moduel_type=_type, input_data=out, pre_module_number=0, ext_module_number=ext_MA[i + 1], filter_num=pre_F, keep_prob=keep_prob, layer_name="feature_layer_" + str(i + 1), input_graph=input_graph) var_list_to_restore += _restore var_list_to_learn += _learn var_list_to_save += _save #print(out) # full connection layer # reshape _shape = out.shape[1:] _length = 1 for _i in _shape: _length *= int(_i) out = tf.reshape(out, [-1, _length]) #print(out) # full connection for i in range(ext_FC): tmp_out = [] _M = ext_MA[i + ext_FR + 1] for j in range(_M): _out, _weights, _biases = modules.fc_layer( input_tensor=out, filt_num=pre_F, layer_name="fc_layer_" + str(i) + "_" + str(j), keep_prob=keep_prob) tmp_out.append(_out) var_list_to_learn += _weights var_list_to_learn += _biases out = sum(tmp_out) / _M #print(out) # output layer y, output_weights, output_biases = modules.nn_layer( out, output_num, 'output_layer') var_list_to_learn += output_weights var_list_to_learn += output_biases # print(y) return y, var_list_to_restore, var_list_to_learn, var_list_to_save
def train(): ## Get imageNet dataset file queue for task1 and task2 tr_data1, tr_label1 = imagenet_data.create_file_queue( FLAGS.imagenet_data_dir1) tr_data2, tr_label2 = imagenet_data.create_file_queue( FLAGS.imagenet_data_dir2) ## TASK 1 sess = tf.InteractiveSession() # Input placeholders with tf.name_scope('input'): x = tf.placeholder(tf.float32, [None, 224 * 224 * 3], name='x-input') y_ = tf.placeholder(tf.float32, [None, 10], name='y-input') with tf.name_scope('input_reshape'): image_shaped_input = tf.reshape(x, [-1, 224, 224, 3]) tf.summary.image('input', image_shaped_input, 2) # geopath_examples geopath = modules.geopath_initializer(FLAGS.L, FLAGS.M) # fixed weights list fixed_list = np.ones((FLAGS.L, FLAGS.M), dtype=str) for i in range(FLAGS.L): for j in range(FLAGS.M): fixed_list[i, j] = '0' # Hidden Layers weights_list = np.zeros((FLAGS.L, FLAGS.M), dtype=object) biases_list = np.zeros((FLAGS.L, FLAGS.M), dtype=object) # model define layer_modules_list = np.zeros(FLAGS.M, dtype=object) # conv layer i = 0 for j in range(FLAGS.M): layer_modules_list[j], weights_list[i, j], biases_list[ i, j] = modules.conv_module(image_shaped_input, FLAGS.filt, [11, 11], geopath[i, j], 1, 'layer' + str(i + 1) + "_" + str(j + 1)) net = np.sum(layer_modules_list) / FLAGS.M # dimensionality_reduction layer i = 1 for j in range(FLAGS.M): layer_modules_list[j], weights_list[i, j], biases_list[ i, j] = modules.Dimensionality_reduction_module( net, FLAGS.filt / 2, geopath[i, j], 'layer' + str(i + 1) + "_" + str(j + 1)) net = np.sum(layer_modules_list) / FLAGS.M # res_fire layer i = 2 for j in range(FLAGS.M): layer_modules_list[j], weights_list[i, j], biases_list[ i, j] = modules.res_fire_layer( net, FLAGS.filt / 2, geopath[i, j], 'layer' + str(i + 1) + "_" + str(j + 1)) net = np.sum(layer_modules_list) / FLAGS.M # dimensionality_reduction layer i = 3 for j in range(FLAGS.M): layer_modules_list[j], weights_list[i, j], biases_list[ i, j] = modules.Dimensionality_reduction_module( net, FLAGS.filt / 2, geopath[i, j], 'layer' + str(i + 1) + "_" + str(j + 1)) net = np.sum(layer_modules_list) / FLAGS.M # reshape before full connection layer _shape = net.shape[1:] _length = 1 for _i in _shape: _length *= int(_i) net = tf.reshape(net, [-1, _length]) # model1 layer i = 4 for j in range(FLAGS.M): layer_modules_list[j], weights_list[i, j], biases_list[ i, j] = modules.module(net, FLAGS.full_connection_filt, geopath[i, j], 'layer' + str(i + 1) + "_" + str(j + 1)) net = np.sum(layer_modules_list) / FLAGS.M # output layer y, output_weights, output_biases = modules.nn_layer( net, 10, 'output_layer') # Cross Entropy with tf.name_scope('cross_entropy'): diff = tf.nn.softmax_cross_entropy_with_logits(labels=y_, logits=y) with tf.name_scope('total'): cross_entropy = tf.reduce_mean(diff) tf.summary.scalar('cross_entropy', cross_entropy) # Need to learn variables var_list_to_learn = [] + output_weights + output_biases for i in range(FLAGS.L): for j in range(FLAGS.M): if (fixed_list[i, j] == '0'): var_list_to_learn += weights_list[i, j] + biases_list[i, j] # GradientDescent with tf.name_scope('train'): train_step = tf.train.GradientDescentOptimizer( FLAGS.learning_rate).minimize(cross_entropy, var_list=var_list_to_learn) # Accuracy with tf.name_scope('accuracy'): with tf.name_scope('correct_prediction'): correct_prediction = tf.equal(tf.argmax(y, 1), tf.argmax(y_, 1)) with tf.name_scope('accuracy'): accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32)) tf.summary.scalar('accuracy', accuracy) # Merge all the summaries and write them out to /tmp/tensorflow/mnist/logs/mnist_with_summaries (by default) merged = tf.summary.merge_all() train_writer = tf.summary.FileWriter(FLAGS.log_dir + '/train1', sess.graph) test_writer = tf.summary.FileWriter(FLAGS.log_dir + '/test1') # init tf.global_variables_initializer().run() tf.local_variables_initializer().run() # start data reading queue coord = tf.train.Coordinator() threads = tf.train.start_queue_runners(sess=sess, coord=coord) # Generating randomly geopath geopath_set = np.zeros(FLAGS.candi, dtype=object) for i in range(FLAGS.candi): geopath_set[i] = modules.get_geopath(FLAGS.L, FLAGS.M, FLAGS.N) # parameters placeholders and ops var_update_ops = np.zeros(len(var_list_to_learn), dtype=object) var_update_placeholders = np.zeros(len(var_list_to_learn), dtype=object) for i in range(len(var_list_to_learn)): var_update_placeholders[i] = tf.placeholder( var_list_to_learn[i].dtype, shape=var_list_to_learn[i].get_shape()) var_update_ops[i] = var_list_to_learn[i].assign( var_update_placeholders[i]) # geopathes placeholders and ops geopath_update_ops = np.zeros((len(geopath), len(geopath[0])), dtype=object) geopath_update_placeholders = np.zeros((len(geopath), len(geopath[0])), dtype=object) for i in range(len(geopath)): for j in range(len(geopath[0])): geopath_update_placeholders[i, j] = tf.placeholder( geopath[i, j].dtype, shape=geopath[i, j].get_shape()) geopath_update_ops[i, j] = geopath[i, j].assign( geopath_update_placeholders[i, j]) acc_geo = np.zeros(FLAGS.B, dtype=float) summary_geo = np.zeros(FLAGS.B, dtype=object) for i in range(FLAGS.max_steps): # Select Candidates to Tournament compet_idx = range(FLAGS.candi) np.random.shuffle(compet_idx) compet_idx = compet_idx[:FLAGS.B] # Learning & Evaluating for j in range(len(compet_idx)): # Insert Candidate modules.geopath_insert(sess, geopath_update_placeholders, geopath_update_ops, geopath_set[compet_idx[j]], FLAGS.L, FLAGS.M) acc_geo_tr = 0 for k in range(FLAGS.T): ''' print(x.shape) print(tr_data1[k*FLAGS.batch_num:(k+1)*FLAGS.batch_num,:].shape) print(y.shape) print(tr_label1[k*FLAGS.batch_num:(k+1)*FLAGS.batch_num,:].shape) ''' tr_data1_val, tr_label1_val = imagenet_data.read_batch( sess, tr_data1, tr_label1, FLAGS.batch_num, FLAGS.imagenet_data_dir1) summary_geo_tr, _, acc_geo_tmp = sess.run( [merged, train_step, accuracy], feed_dict={ x: tr_data1_val, y_: tr_label1_val }) acc_geo_tr += acc_geo_tmp acc_geo[j] = acc_geo_tr / FLAGS.T summary_geo[j] = summary_geo_tr # Tournament winner_idx = np.argmax(acc_geo) acc = acc_geo[winner_idx] summary = summary_geo[winner_idx] # Copy and Mutation for j in range(len(compet_idx)): if (j != winner_idx): geopath_set[compet_idx[j]] = np.copy( geopath_set[compet_idx[winner_idx]]) geopath_set[compet_idx[j]] = modules.mutation( geopath_set[compet_idx[j]], FLAGS.L, FLAGS.M, FLAGS.N) train_writer.add_summary(summary, i) print('Training Accuracy at step %s: %s' % (i, acc)) if acc >= 0.5: step_task1 = i task1_optimal_path = geopath_set[compet_idx[winner_idx]] print('Task1 Optimal Path is as followed.') print(task1_optimal_path) break # Fix task1 Optimal Path for i in range(FLAGS.L): for j in range(FLAGS.M): if (task1_optimal_path[i, j] == 1.0): fixed_list[i, j] = '1' # Get variables of fixed list var_list_to_fix = [] #var_list_to_fix=[]+output_weights+output_biases; for i in range(FLAGS.L): for j in range(FLAGS.M): if (fixed_list[i, j] == '1'): var_list_to_fix += weights_list[i, j] + biases_list[i, j] var_list_fix = modules.parameters_backup(var_list_to_fix) # parameters placeholders and ops var_fix_ops = np.zeros(len(var_list_to_fix), dtype=object) var_fix_placeholders = np.zeros(len(var_list_to_fix), dtype=object) for i in range(len(var_list_to_fix)): var_fix_placeholders[i] = tf.placeholder( var_list_to_fix[i].dtype, shape=var_list_to_fix[i].get_shape()) var_fix_ops[i] = var_list_to_fix[i].assign(var_fix_placeholders[i]) ## TASK 2 # Need to learn variables var_list_to_learn = [] + output_weights + output_biases for i in range(FLAGS.L): for j in range(FLAGS.M): if (fixed_list[i, j] == '0'): var_list_to_learn += weights_list[i, j] + biases_list[i, j] for i in range(FLAGS.L): for j in range(FLAGS.M): if (fixed_list[i, j] == '1'): tmp = biases_list[i, j][0] break break # Initialization merged = tf.summary.merge_all() train_writer = tf.summary.FileWriter(FLAGS.log_dir + '/train2', sess.graph) test_writer = tf.summary.FileWriter(FLAGS.log_dir + '/test2') tf.global_variables_initializer().run() tf.local_variables_initializer().run() # Update fixed values modules.parameters_update(sess, var_fix_placeholders, var_fix_ops, var_list_fix) # GradientDescent with tf.name_scope('train'): train_step = tf.train.GradientDescentOptimizer( FLAGS.learning_rate).minimize(cross_entropy, var_list=var_list_to_learn) # Generating randomly geopath geopath_set = np.zeros(FLAGS.candi, dtype=object) for i in range(FLAGS.candi): geopath_set[i] = modules.get_geopath(FLAGS.L, FLAGS.M, FLAGS.N) # parameters placeholders and ops var_update_ops = np.zeros(len(var_list_to_learn), dtype=object) var_update_placeholders = np.zeros(len(var_list_to_learn), dtype=object) for i in range(len(var_list_to_learn)): var_update_placeholders[i] = tf.placeholder( var_list_to_learn[i].dtype, shape=var_list_to_learn[i].get_shape()) var_update_ops[i] = var_list_to_learn[i].assign( var_update_placeholders[i]) acc_geo = np.zeros(FLAGS.B, dtype=float) summary_geo = np.zeros(FLAGS.B, dtype=object) for i in range(FLAGS.max_steps): # Select Candidates to Tournament compet_idx = range(FLAGS.candi) np.random.shuffle(compet_idx) compet_idx = compet_idx[:FLAGS.B] # Learning & Evaluating for j in range(len(compet_idx)): geopath_insert = np.copy(geopath_set[compet_idx[j]]) for l in range(FLAGS.L): for m in range(FLAGS.M): if (fixed_list[l, m] == '1'): geopath_insert[l, m] = 1.0 # Insert Candidate modules.geopath_insert(sess, geopath_update_placeholders, geopath_update_ops, geopath_insert, FLAGS.L, FLAGS.M) acc_geo_tr = 0 for k in range(FLAGS.T): tr_data2_val, tr_label2_val = imagenet_data.read_batch( sess, tr_data2, tr_label2, FLAGS.batch_num, FLAGS.imagenet_data_dir2) summary_geo_tr, _, acc_geo_tmp = sess.run( [merged, train_step, accuracy], feed_dict={ x: tr_data2_val, y_: tr_label2_val }) acc_geo_tr += acc_geo_tmp acc_geo[j] = acc_geo_tr / FLAGS.T summary_geo[j] = summary_geo_tr # Tournament winner_idx = np.argmax(acc_geo) acc = acc_geo[winner_idx] summary = summary_geo[winner_idx] # Copy and Mutation for j in range(len(compet_idx)): if (j != winner_idx): geopath_set[compet_idx[j]] = np.copy( geopath_set[compet_idx[winner_idx]]) geopath_set[compet_idx[j]] = modules.mutation( geopath_set[compet_idx[j]], FLAGS.L, FLAGS.M, FLAGS.N) train_writer.add_summary(summary, i) print('Training Accuracy at step %s: %s' % (i, acc)) if acc >= 0.5: step_task2 = i task2_optimal_path = geopath_set[compet_idx[winner_idx]] print('Task2 Optimal Path is as followed.') print(task2_optimal_path) break # close data reading queue coord.request_stop() coord.join(threads) overlap = 0 for i in range(len(task1_optimal_path)): for j in range(len(task1_optimal_path[0])): if (task1_optimal_path[i, j] == task2_optimal_path[i, j]) & ( task1_optimal_path[i, j] == 1.0): overlap += 1 print("ImageNet,TASK1:" + str(step_task1) + ",TASK2:" + str(step_task2) + ", Overlap:" + str(overlap)) train_writer.close() test_writer.close()
def train(): # Import data mnist = input_data.read_data_sets(FLAGS.data_dir, one_hot=True, fake_data=FLAGS.fake_data) total_tr_data, total_tr_label = mnist.train.next_batch(mnist.train._num_examples); # Gathering a1 Data tr_data_a1=total_tr_data[(total_tr_label[:,FLAGS.a1]==1.0)]; for i in range(len(tr_data_a1)): for j in range(len(tr_data_a1[0])): rand_num=np.random.rand(); if(rand_num>=0.5): tr_data_a1[i,j]=np.minimum(tr_data_a1[i,j]+rand_num,1.0); # Gathering a2 Data tr_data_a2=total_tr_data[(total_tr_label[:,FLAGS.a2]==1.0)]; for i in range(len(tr_data_a2)): for j in range(len(tr_data_a2[0])): rand_num=np.random.rand(); if(rand_num>=0.5): tr_data_a2[i,j]=np.minimum(tr_data_a2[i,j]+rand_num,1.0); # Gathering b1 Data tr_data_b1=total_tr_data[(total_tr_label[:,FLAGS.b1]==1.0)]; for i in range(len(tr_data_b1)): for j in range(len(tr_data_b1[0])): rand_num=np.random.rand(); if(rand_num>=0.5): tr_data_b1[i,j]=np.minimum(tr_data_b1[i,j]+rand_num,1.0); # Gathering b2 Data tr_data_b2=total_tr_data[(total_tr_label[:,FLAGS.b2]==1.0)]; for i in range(len(tr_data_b2)): for j in range(len(tr_data_b2[0])): rand_num=np.random.rand(); if(rand_num>=0.5): tr_data_b2[i,j]=np.minimum(tr_data_b2[i,j]+rand_num,1.0); tr_data1=np.append(tr_data_a1,tr_data_a2,axis=0); tr_label1=np.zeros((len(tr_data1),2),dtype=float); for i in range(len(tr_data1)): if(i<len(tr_data_a1)): tr_label1[i,0]=1.0; else: tr_label1[i,1]=1.0; tr_data2=np.append(tr_data_b1,tr_data_b2,axis=0); tr_label2=np.zeros((len(tr_data2),2),dtype=float); for i in range(len(tr_data2)): if(i<len(tr_data_b1)): tr_label2[i,0]=1.0; else: tr_label2[i,1]=1.0; ## TASK 1 sess = tf.InteractiveSession() # Input placeholders with tf.name_scope('input'): x = tf.placeholder(tf.float32, [None, 784], name='x-input') y_ = tf.placeholder(tf.float32, [None, 2], name='y-input') with tf.name_scope('input_reshape'): image_shaped_input = tf.reshape(x, [-1, 28, 28, 1]) tf.summary.image('input', image_shaped_input, 2) # geopath_examples geopath=modules.geopath_initializer(FLAGS.L,FLAGS.M); # fixed weights list fixed_list=np.ones((FLAGS.L,FLAGS.M),dtype=str); for i in range(FLAGS.L): for j in range(FLAGS.M): fixed_list[i,j]='0'; # Hidden Layers weights_list=np.zeros((FLAGS.L,FLAGS.M),dtype=object) # weights_list also record conv_kernels biases_list=np.zeros((FLAGS.L,FLAGS.M),dtype=object) sum_weights_list=np.zeros((FLAGS.L,FLAGS.M), dtype=object) for i in range(FLAGS.L): for j in range(FLAGS.M): _initial1 = tf.truncated_normal(shape=[1], mean=1, stddev=0.1) _initial2 = tf.truncated_normal(shape=[1], mean=1, stddev=0.1) sum_weights_list[i,j]= [tf.Variable(_initial1), tf.Variable(_initial2)] # model define layer_modules_list=np.zeros(FLAGS.M,dtype=object) # conv layer i = 0 for j in range(FLAGS.M): layer_modules_list[j], weights_list[i,j], biases_list[i,j] = modules.conv_module(sum_weights_list[i,j][1] *image_shaped_input, FLAGS.filt, [5,5], geopath[i,j], 1, 'layer'+str(i+1)+"_"+str(j+1)) net=np.sum(map(lambda (a,b):a*b[0], zip(layer_modules_list , sum_weights_list[i])))/ FLAGS.M # res-fire layer i = 1 for j in range(FLAGS.M): layer_modules_list[j], weights_list[i,j], biases_list[i,j] = modules.res_fire_layer(sum_weights_list[i,j][1] * net, FLAGS.filt / 2, geopath[i,j], 'layer'+str(i+1)+"_"+str(j+1)) net=np.sum(map(lambda (a,b):a*b[0], zip(layer_modules_list , sum_weights_list[i])))/ FLAGS.M # dimensionality_reduction layer i = 2 for j in range(FLAGS.M): layer_modules_list[j], weights_list[i,j], biases_list[i,j] = modules.Dimensionality_reduction_module(sum_weights_list[i,j][1] * net, FLAGS.filt / 2, geopath[i,j], 'layer'+str(i+1)+"_"+str(j+1)) net=np.sum(map(lambda (a,b):a*b[0], zip(layer_modules_list , sum_weights_list[i])))/ FLAGS.M # conv layer i = 3 for j in range(FLAGS.M): layer_modules_list[j], weights_list[i,j], biases_list[i,j] = modules.conv_module(sum_weights_list[i,j][1] * net, FLAGS.filt, [5,5], geopath[i,j], 1, 'layer'+str(i+1)+"_"+str(j+1)) net=np.sum(map(lambda (a,b):a*b[0], zip(layer_modules_list , sum_weights_list[i])))/ FLAGS.M # output layer # reshape _shape = net.shape[1:] _length = 1 for _i in _shape: _length *= int(_i) net=tf.reshape(net,[-1,_length]) # sigmoid layer y, output_weights, output_biases= modules.nn_layer(net, 2, 'output_layer'); # Cross Entropy with tf.name_scope('cross_entropy'): diff = tf.nn.softmax_cross_entropy_with_logits(labels=y_, logits=y) with tf.name_scope('total'): cross_entropy = tf.reduce_mean(diff) tf.summary.scalar('cross_entropy', cross_entropy) # Need to learn variables var_list_to_learn=[]+output_weights+output_biases; for i in range(FLAGS.L): for j in range(FLAGS.M): if (fixed_list[i,j]=='0'): var_list_to_learn+=weights_list[i,j]+biases_list[i,j]+sum_weights_list[i,j] # GradientDescent with tf.name_scope('train'): train_step = tf.train.GradientDescentOptimizer(FLAGS.learning_rate).minimize(cross_entropy,var_list=var_list_to_learn); # Accuracy with tf.name_scope('accuracy'): with tf.name_scope('correct_prediction'): correct_prediction = tf.equal(tf.argmax(y, 1), tf.argmax(y_, 1)) with tf.name_scope('accuracy'): accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32)) tf.summary.scalar('accuracy', accuracy) # Merge all the summaries and write them out to /tmp/tensorflow/mnist/logs/mnist_with_summaries (by default) merged = tf.summary.merge_all() train_writer = tf.summary.FileWriter(FLAGS.log_dir + '/train1', sess.graph) test_writer = tf.summary.FileWriter(FLAGS.log_dir + '/test1') tf.global_variables_initializer().run() # Generating randomly geopath geopath_set=np.zeros(FLAGS.candi,dtype=object); for i in range(FLAGS.candi): geopath_set[i]=modules.get_geopath(FLAGS.L,FLAGS.M,FLAGS.N); # parameters placeholders and ops var_update_ops=np.zeros(len(var_list_to_learn),dtype=object); var_update_placeholders=np.zeros(len(var_list_to_learn),dtype=object); for i in range(len(var_list_to_learn)): var_update_placeholders[i]=tf.placeholder(var_list_to_learn[i].dtype,shape=var_list_to_learn[i].get_shape()); var_update_ops[i]=var_list_to_learn[i].assign(var_update_placeholders[i]); # geopathes placeholders and ops geopath_update_ops=np.zeros((len(geopath),len(geopath[0])),dtype=object); geopath_update_placeholders=np.zeros((len(geopath),len(geopath[0])),dtype=object); for i in range(len(geopath)): for j in range(len(geopath[0])): geopath_update_placeholders[i,j]=tf.placeholder(geopath[i,j].dtype,shape=geopath[i,j].get_shape()); geopath_update_ops[i,j]=geopath[i,j].assign(geopath_update_placeholders[i,j]); acc_geo=np.zeros(FLAGS.B,dtype=float); summary_geo=np.zeros(FLAGS.B,dtype=object); for i in range(FLAGS.max_steps): # Select Candidates to Tournament compet_idx=range(FLAGS.candi); np.random.shuffle(compet_idx); compet_idx=compet_idx[:FLAGS.B]; # Learning & Evaluating for j in range(len(compet_idx)): # Shuffle the data idx=range(len(tr_data1)); np.random.shuffle(idx); tr_data1=tr_data1[idx];tr_label1=tr_label1[idx]; # Insert Candidate modules.geopath_insert(sess,geopath_update_placeholders,geopath_update_ops,geopath_set[compet_idx[j]],FLAGS.L,FLAGS.M); acc_geo_tr=0; for k in range(FLAGS.T): summary_geo_tr, _, acc_geo_tmp = sess.run([merged, train_step,accuracy], feed_dict={x:tr_data1[k*FLAGS.batch_num:(k+1)*FLAGS.batch_num,:],y_:tr_label1[k*FLAGS.batch_num:(k+1)*FLAGS.batch_num,:]}); acc_geo_tr+=acc_geo_tmp; acc_geo[j]=acc_geo_tr/FLAGS.T; summary_geo[j]=summary_geo_tr; # Tournament winner_idx=np.argmax(acc_geo); acc=acc_geo[winner_idx]; summary=summary_geo[winner_idx]; # Copy and Mutation for j in range(len(compet_idx)): if(j!=winner_idx): geopath_set[compet_idx[j]]=np.copy(geopath_set[compet_idx[winner_idx]]); geopath_set[compet_idx[j]]=modules.mutation(geopath_set[compet_idx[j]],FLAGS.L,FLAGS.M,FLAGS.N); train_writer.add_summary(summary, i); print('Training Accuracy at step %s: %s' % (i, acc)); if(acc >= 0.99): print('Learning Done!!'); print('Optimal Path is as followed.'); print(geopath_set[compet_idx[winner_idx]]); task1_optimal_path=geopath_set[compet_idx[winner_idx]]; break; """ geopath_sum=np.zeros((len(geopath),len(geopath[0])),dtype=float); for j in range(len(geopath_set)): for k in range(len(geopath)): for l in range(len(geopath[0])): geopath_sum[k][l]+=geopath_set[j][k][l]; print(geopath_sum); """ # record steps to find optimal path in task1 iter_task1=i; # Fix task1 Optimal Path for i in range(FLAGS.L): for j in range(FLAGS.M): if(task1_optimal_path[i,j]==1.0): fixed_list[i,j]='1'; # Get variables of fixed list var_list_to_fix=[]; #var_list_to_fix=[]+output_weights+output_biases; for i in range(FLAGS.L): for j in range(FLAGS.M): if(fixed_list[i,j]=='1'): var_list_to_fix+=weights_list[i,j]+biases_list[i,j]; var_list_fix=modules.parameters_backup(var_list_to_fix); """ for i in range(FLAGS.L): for j in range(FLAGS.M): if(task1_optimal_path[i,j]==1.0): fixed_list[i,j]='0'; """ # parameters placeholders and ops var_fix_ops=np.zeros(len(var_list_to_fix),dtype=object); var_fix_placeholders=np.zeros(len(var_list_to_fix),dtype=object); for i in range(len(var_list_to_fix)): var_fix_placeholders[i]=tf.placeholder(var_list_to_fix[i].dtype,shape=var_list_to_fix[i].get_shape()); var_fix_ops[i]=var_list_to_fix[i].assign(var_fix_placeholders[i]); ## TASK 2 # Need to learn variables var_list_to_learn=[]+output_weights+output_biases for i in range(FLAGS.L): for j in range(FLAGS.M): if (fixed_list[i,j]=='0'): var_list_to_learn+=weights_list[i,j]+biases_list[i,j] var_list_to_learn+=sum_weights_list[i,j] ''' for i in range(FLAGS.L): for j in range(FLAGS.M): if(fixed_list[i,j]=='1'): tmp=biases_list[i,j][0]; break; break; ''' # Initialization merged = tf.summary.merge_all() train_writer = tf.summary.FileWriter(FLAGS.log_dir + '/train2', sess.graph) test_writer = tf.summary.FileWriter(FLAGS.log_dir + '/test2') tf.global_variables_initializer().run() # Update fixed values modules.parameters_update(sess,var_fix_placeholders,var_fix_ops,var_list_fix); # GradientDescent with tf.name_scope('train'): train_step = tf.train.GradientDescentOptimizer(FLAGS.learning_rate).minimize(cross_entropy,var_list=var_list_to_learn); # Generating randomly geopath geopath_set=np.zeros(FLAGS.candi,dtype=object); for i in range(FLAGS.candi): geopath_set[i]=modules.get_geopath(FLAGS.L,FLAGS.M,FLAGS.N); # parameters placeholders and ops var_update_ops=np.zeros(len(var_list_to_learn),dtype=object); var_update_placeholders=np.zeros(len(var_list_to_learn),dtype=object); for i in range(len(var_list_to_learn)): var_update_placeholders[i]=tf.placeholder(var_list_to_learn[i].dtype,shape=var_list_to_learn[i].get_shape()); var_update_ops[i]=var_list_to_learn[i].assign(var_update_placeholders[i]); acc_geo=np.zeros(FLAGS.B,dtype=float); summary_geo=np.zeros(FLAGS.B,dtype=object); for i in range(FLAGS.max_steps): # Select Candidates to Tournament compet_idx=range(FLAGS.candi); np.random.shuffle(compet_idx); compet_idx=compet_idx[:FLAGS.B]; # Learning & Evaluating for j in range(len(compet_idx)): # Shuffle the data idx=range(len(tr_data2)); np.random.shuffle(idx); tr_data2=tr_data2[idx];tr_label2=tr_label2[idx]; geopath_insert=np.copy(geopath_set[compet_idx[j]]); for l in range(FLAGS.L): for m in range(FLAGS.M): if(fixed_list[l,m]=='1'): geopath_insert[l,m]=1.0; # Insert Candidate modules.geopath_insert(sess,geopath_update_placeholders,geopath_update_ops,geopath_insert,FLAGS.L,FLAGS.M); acc_geo_tr=0; for k in range(FLAGS.T): summary_geo_tr, _, acc_geo_tmp = sess.run([merged, train_step,accuracy], feed_dict={x:tr_data2[k*FLAGS.batch_num:(k+1)*FLAGS.batch_num,:],y_:tr_label2[k*FLAGS.batch_num:(k+1)*FLAGS.batch_num,:]}); acc_geo_tr+=acc_geo_tmp; acc_geo[j]=acc_geo_tr/FLAGS.T; summary_geo[j]=summary_geo_tr; # Tournament winner_idx=np.argmax(acc_geo); acc=acc_geo[winner_idx]; summary=summary_geo[winner_idx]; # Copy and Mutation for j in range(len(compet_idx)): if(j!=winner_idx): geopath_set[compet_idx[j]]=np.copy(geopath_set[compet_idx[winner_idx]]); geopath_set[compet_idx[j]]=modules.mutation(geopath_set[compet_idx[j]],FLAGS.L,FLAGS.M,FLAGS.N); train_writer.add_summary(summary, i); print('Training Accuracy at step %s: %s' % (i, acc)); if(acc >= 0.99): print('Learning Done!!'); print('Optimal Path is as followed.'); print(geopath_set[compet_idx[winner_idx]]); task2_optimal_path=geopath_set[compet_idx[winner_idx]]; break; """ geopath_sum=np.zeros((len(geopath),len(geopath[0])),dtype=float); for j in range(len(geopath_set)): for k in range(len(geopath)): for l in range(len(geopath[0])): geopath_sum[k][l]+=geopath_set[j][k][l]; print(geopath_sum); """ iter_task2=i; overlap=0; for i in range(len(task1_optimal_path)): for j in range(len(task1_optimal_path[0])): if(task1_optimal_path[i,j]==task2_optimal_path[i,j])&(task1_optimal_path[i,j]==1.0): overlap+=1; print("Entire Iter:"+str(iter_task1+iter_task2)+",TASK1:"+str(iter_task1)+",TASK2:"+str(iter_task2)+",Overlap:"+str(overlap)); train_writer.close() test_writer.close()
def train(): # Get SVHN dataset svhn_maybe_download_and_extract() file_name = os.path.join(FLAGS.svhn_data_dir, "train_32x32.mat") train = sio.loadmat(file_name) tr_data_svhn = np.zeros((len(train['y']), 32 * 32 * 3), dtype=float) tr_label_svhn = np.zeros((len(train['y']), 10), dtype=float) for i in range(len(train['y'])): tr_data_svhn[i] = np.reshape(train['X'][:, :, :, i], [1, 32 * 32 * 3]) tr_label_svhn[i, train['y'][i][0] - 1] = 1.0 tr_data_svhn = tr_data_svhn / 255.0 file_name = os.path.join(FLAGS.svhn_data_dir, "test_32x32.mat") test = sio.loadmat(file_name) ts_data_svhn = np.zeros((len(test['y']), 32 * 32 * 3), dtype=float) ts_label_svhn = np.zeros((len(test['y']), 10), dtype=float) for i in range(len(test['y'])): ts_data_svhn[i] = np.reshape(test['X'][:, :, :, i], [1, 32 * 32 * 3]) ts_label_svhn[i, test['y'][i][0] - 1] = 1.0 ts_data_svhn = ts_data_svhn / 255.0 data_num_len_svhn = len(tr_label_svhn) print("svhn done") # Get CIFAR 10 dataset cifar10.maybe_download_and_extract() tr_label_cifar10 = np.zeros((50000, 10), dtype=float) ts_label_cifar10 = np.zeros((10000, 10), dtype=float) for i in range(1, 6): file_name = os.path.join(FLAGS.cifar_data_dir, "data_batch_" + str(i) + ".bin") f = open(file_name, "rb") data = np.reshape(bytearray(f.read()), [10000, 3073]) if (i == 1): tr_data_cifar10 = data[:, 1:] / 255.0 else: tr_data_cifar10 = np.append(tr_data_cifar10, data[:, 1:] / 255.0, axis=0) for j in range(len(data)): tr_label_cifar10[(i - 1) * 10000 + j, data[j, 0]] = 1.0 file_name = os.path.join(FLAGS.cifar_data_dir, "test_batch.bin") f = open(file_name, "rb") data = np.reshape(bytearray(f.read()), [10000, 3073]) for i in range(len(data)): ts_label_cifar10[i, data[i, 0]] = 1.0 ts_data_cifar10 = data[:, 1:] / 255.0 data_num_len_cifar10 = len(tr_label_cifar10) print(ts_label_cifar10.shape) print(ts_label_cifar10[0]) if (FLAGS.cifar_first): tr_data1 = tr_data_cifar10 tr_label1 = tr_label_cifar10 ts_data1 = ts_data_cifar10 ts_label1 = ts_label_cifar10 data_num_len1 = data_num_len_cifar10 tr_data2 = tr_data_svhn tr_label2 = tr_label_svhn ts_data2 = ts_data_svhn ts_label2 = ts_label_svhn data_num_len2 = data_num_len_svhn else: tr_data1 = tr_data_svhn tr_label1 = tr_label_svhn ts_data1 = ts_data_svhn ts_label1 = ts_label_svhn data_num_len1 = data_num_len_svhn tr_data2 = tr_data_cifar10 tr_label2 = tr_label_cifar10 ts_data2 = ts_data_cifar10 ts_label2 = ts_label_cifar10 data_num_len2 = data_num_len_cifar10 ## TASK 1 sess = tf.InteractiveSession() # Input placeholders with tf.name_scope('input'): x = tf.placeholder(tf.float32, [None, 32 * 32 * 3], name='x-input') y_ = tf.placeholder(tf.float32, [None, 10], name='y-input') with tf.name_scope('input_reshape'): image_shaped_input = tf.reshape(x, [-1, 32, 32, 3]) tf.summary.image('input', image_shaped_input, 2) # geopath_examples geopath = modules.geopath_initializer(FLAGS.L, FLAGS.M) # fixed weights list fixed_list = np.ones((FLAGS.L, FLAGS.M), dtype=str) for i in range(FLAGS.L): for j in range(FLAGS.M): fixed_list[i, j] = '0' # Hidden Layers weights_list = np.zeros((FLAGS.L, FLAGS.M), dtype=object) biases_list = np.zeros((FLAGS.L, FLAGS.M), dtype=object) sum_weights_list = np.zeros((FLAGS.L, FLAGS.M), dtype=object) for i in range(FLAGS.L): for j in range(FLAGS.M): _initial1 = tf.truncated_normal(shape=[1], mean=1, stddev=0.1) _initial2 = tf.truncated_normal(shape=[1], mean=1, stddev=0.1) sum_weights_list[i, j] = [ tf.Variable(_initial1), tf.Variable(_initial2) ] # model define layer_modules_list = np.zeros(FLAGS.M, dtype=object) # conv layer i = 0 for j in range(FLAGS.M): layer_modules_list[j], weights_list[i, j], biases_list[ i, j] = modules.conv_module(image_shaped_input, FLAGS.filt, [5, 5], geopath[i, j], 1, 'layer' + str(i + 1) + "_" + str(j + 1)) net = np.sum( map(lambda (a, b): a * b[0], zip(layer_modules_list, sum_weights_list[0]))) / FLAGS.M # res-fire layer i = 1 for j in range(FLAGS.M): layer_modules_list[j], weights_list[i, j], biases_list[ i, j] = modules.res_fire_layer( net, FLAGS.filt / 2, geopath[i, j], 'layer' + str(i + 1) + "_" + str(j + 1)) net = np.sum( map(lambda (a, b): a * b[0], zip(layer_modules_list, sum_weights_list[1]))) / FLAGS.M # dimensionality_reduction layer i = 2 for j in range(FLAGS.M): layer_modules_list[j], weights_list[i, j], biases_list[ i, j] = modules.Dimensionality_reduction_module( net, 10, geopath[i, j], 'layer' + str(i + 1) + "_" + str(j + 1)) net = np.sum( map(lambda (a, b): a * b[0], zip(layer_modules_list, sum_weights_list[2]))) / FLAGS.M # conv layer i = 3 for j in range(FLAGS.M): layer_modules_list[j], weights_list[i, j], biases_list[ i, j] = modules.conv_module(image_shaped_input, FLAGS.filt, [5, 5], geopath[i, j], 1, 'layer' + str(i + 1) + "_" + str(j + 1)) net = np.sum( map(lambda (a, b): a * b[0], zip(layer_modules_list, sum_weights_list[3]))) / FLAGS.M # output layer # reshape _shape = net.shape[1:] _length = 1 for _i in _shape: _length *= int(_i) net = tf.reshape(net, [-1, _length]) # full connection layer y, output_weights, output_biases = modules.nn_layer( net, 10, 'output_layer') # Cross Entropy with tf.name_scope('cross_entropy'): diff = tf.nn.softmax_cross_entropy_with_logits(labels=y_, logits=y) with tf.name_scope('total'): cross_entropy = tf.reduce_mean(diff) tf.summary.scalar('cross_entropy', cross_entropy) # Need to learn variables var_list_to_learn = [] + output_weights + output_biases for i in range(FLAGS.L): for j in range(FLAGS.M): if (fixed_list[i, j] == '0'): var_list_to_learn += weights_list[i, j] + biases_list[i, j] # GradientDescent with tf.name_scope('train'): train_step = tf.train.GradientDescentOptimizer( FLAGS.learning_rate).minimize(cross_entropy, var_list=var_list_to_learn) # Accuracy with tf.name_scope('accuracy'): with tf.name_scope('correct_prediction'): correct_prediction = tf.equal(tf.argmax(y, 1), tf.argmax(y_, 1)) with tf.name_scope('accuracy'): accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32)) tf.summary.scalar('accuracy', accuracy) # Merge all the summaries and write them out to /tmp/tensorflow/mnist/logs/mnist_with_summaries (by default) merged = tf.summary.merge_all() train_writer = tf.summary.FileWriter(FLAGS.log_dir + '/train1', sess.graph) test_writer = tf.summary.FileWriter(FLAGS.log_dir + '/test1') tf.global_variables_initializer().run() # Generating randomly geopath geopath_set = np.zeros(FLAGS.candi, dtype=object) for i in range(FLAGS.candi): geopath_set[i] = modules.get_geopath(FLAGS.L, FLAGS.M, FLAGS.N) # parameters placeholders and ops var_update_ops = np.zeros(len(var_list_to_learn), dtype=object) var_update_placeholders = np.zeros(len(var_list_to_learn), dtype=object) for i in range(len(var_list_to_learn)): var_update_placeholders[i] = tf.placeholder( var_list_to_learn[i].dtype, shape=var_list_to_learn[i].get_shape()) var_update_ops[i] = var_list_to_learn[i].assign( var_update_placeholders[i]) # geopathes placeholders and ops geopath_update_ops = np.zeros((len(geopath), len(geopath[0])), dtype=object) geopath_update_placeholders = np.zeros((len(geopath), len(geopath[0])), dtype=object) for i in range(len(geopath)): for j in range(len(geopath[0])): geopath_update_placeholders[i, j] = tf.placeholder( geopath[i, j].dtype, shape=geopath[i, j].get_shape()) geopath_update_ops[i, j] = geopath[i, j].assign( geopath_update_placeholders[i, j]) acc_geo = np.zeros(FLAGS.B, dtype=float) summary_geo = np.zeros(FLAGS.B, dtype=object) for i in range(FLAGS.max_steps): # Select Candidates to Tournament compet_idx = range(FLAGS.candi) np.random.shuffle(compet_idx) compet_idx = compet_idx[:FLAGS.B] # Learning & Evaluating for j in range(len(compet_idx)): # Shuffle the data idx = range(len(tr_data1)) np.random.shuffle(idx) tr_data1 = tr_data1[idx] tr_label1 = tr_label1[idx] # Insert Candidate modules.geopath_insert(sess, geopath_update_placeholders, geopath_update_ops, geopath_set[compet_idx[j]], FLAGS.L, FLAGS.M) acc_geo_tr = 0 for k in range(FLAGS.T): ''' print(x.shape) print(tr_data1[k*FLAGS.batch_num:(k+1)*FLAGS.batch_num,:].shape) print(y.shape) print(tr_label1[k*FLAGS.batch_num:(k+1)*FLAGS.batch_num,:].shape) ''' summary_geo_tr, _, acc_geo_tmp = sess.run( [merged, train_step, accuracy], feed_dict={ x: tr_data1[k * FLAGS.batch_num:(k + 1) * FLAGS.batch_num, :], y_: tr_label1[k * FLAGS.batch_num:(k + 1) * FLAGS.batch_num, :] }) acc_geo_tr += acc_geo_tmp acc_geo[j] = acc_geo_tr / FLAGS.T summary_geo[j] = summary_geo_tr # Tournament winner_idx = np.argmax(acc_geo) acc = acc_geo[winner_idx] summary = summary_geo[winner_idx] # Copy and Mutation for j in range(len(compet_idx)): if (j != winner_idx): geopath_set[compet_idx[j]] = np.copy( geopath_set[compet_idx[winner_idx]]) geopath_set[compet_idx[j]] = modules.mutation( geopath_set[compet_idx[j]], FLAGS.L, FLAGS.M, FLAGS.N) train_writer.add_summary(summary, i) print('Training Accuracy at step %s: %s' % (i, acc)) acc_task1 = acc task1_optimal_path = geopath_set[compet_idx[winner_idx]] # Fix task1 Optimal Path for i in range(FLAGS.L): for j in range(FLAGS.M): if (task1_optimal_path[i, j] == 1.0): fixed_list[i, j] = '1' # Get variables of fixed list var_list_to_fix = [] #var_list_to_fix=[]+output_weights+output_biases; for i in range(FLAGS.L): for j in range(FLAGS.M): if (fixed_list[i, j] == '1'): var_list_to_fix += weights_list[i, j] + biases_list[i, j] var_list_fix = modules.parameters_backup(var_list_to_fix) # parameters placeholders and ops var_fix_ops = np.zeros(len(var_list_to_fix), dtype=object) var_fix_placeholders = np.zeros(len(var_list_to_fix), dtype=object) for i in range(len(var_list_to_fix)): var_fix_placeholders[i] = tf.placeholder( var_list_to_fix[i].dtype, shape=var_list_to_fix[i].get_shape()) var_fix_ops[i] = var_list_to_fix[i].assign(var_fix_placeholders[i]) ## TASK 2 # Need to learn variables var_list_to_learn = [] + output_weights + output_biases for i in range(FLAGS.L): for j in range(FLAGS.M): if (fixed_list[i, j] == '0'): var_list_to_learn += weights_list[i, j] + biases_list[i, j] for i in range(FLAGS.L): for j in range(FLAGS.M): if (fixed_list[i, j] == '1'): tmp = biases_list[i, j][0] break break # Initialization merged = tf.summary.merge_all() train_writer = tf.summary.FileWriter(FLAGS.log_dir + '/train2', sess.graph) test_writer = tf.summary.FileWriter(FLAGS.log_dir + '/test2') tf.global_variables_initializer().run() # Update fixed values modules.parameters_update(sess, var_fix_placeholders, var_fix_ops, var_list_fix) # GradientDescent with tf.name_scope('train'): train_step = tf.train.GradientDescentOptimizer( FLAGS.learning_rate).minimize(cross_entropy, var_list=var_list_to_learn) # Generating randomly geopath geopath_set = np.zeros(FLAGS.candi, dtype=object) for i in range(FLAGS.candi): geopath_set[i] = modules.get_geopath(FLAGS.L, FLAGS.M, FLAGS.N) # parameters placeholders and ops var_update_ops = np.zeros(len(var_list_to_learn), dtype=object) var_update_placeholders = np.zeros(len(var_list_to_learn), dtype=object) for i in range(len(var_list_to_learn)): var_update_placeholders[i] = tf.placeholder( var_list_to_learn[i].dtype, shape=var_list_to_learn[i].get_shape()) var_update_ops[i] = var_list_to_learn[i].assign( var_update_placeholders[i]) acc_geo = np.zeros(FLAGS.B, dtype=float) summary_geo = np.zeros(FLAGS.B, dtype=object) for i in range(FLAGS.max_steps): # Select Candidates to Tournament compet_idx = range(FLAGS.candi) np.random.shuffle(compet_idx) compet_idx = compet_idx[:FLAGS.B] # Learning & Evaluating for j in range(len(compet_idx)): # Shuffle the data idx = range(len(tr_data2)) np.random.shuffle(idx) tr_data2 = tr_data2[idx] tr_label2 = tr_label2[idx] geopath_insert = np.copy(geopath_set[compet_idx[j]]) for l in range(FLAGS.L): for m in range(FLAGS.M): if (fixed_list[l, m] == '1'): geopath_insert[l, m] = 1.0 # Insert Candidate modules.geopath_insert(sess, geopath_update_placeholders, geopath_update_ops, geopath_insert, FLAGS.L, FLAGS.M) acc_geo_tr = 0 for k in range(FLAGS.T): summary_geo_tr, _, acc_geo_tmp = sess.run( [merged, train_step, accuracy], feed_dict={ x: tr_data2[k * FLAGS.batch_num:(k + 1) * FLAGS.batch_num, :], y_: tr_label2[k * FLAGS.batch_num:(k + 1) * FLAGS.batch_num, :] }) acc_geo_tr += acc_geo_tmp acc_geo[j] = acc_geo_tr / FLAGS.T summary_geo[j] = summary_geo_tr # Tournament winner_idx = np.argmax(acc_geo) acc = acc_geo[winner_idx] summary = summary_geo[winner_idx] # Copy and Mutation for j in range(len(compet_idx)): if (j != winner_idx): geopath_set[compet_idx[j]] = np.copy( geopath_set[compet_idx[winner_idx]]) geopath_set[compet_idx[j]] = modules.mutation( geopath_set[compet_idx[j]], FLAGS.L, FLAGS.M, FLAGS.N) train_writer.add_summary(summary, i) print('Training Accuracy at step %s: %s' % (i, acc)) acc_task2 = acc if (FLAGS.cifar_first): print("CIFAR10_SVHN,TASK1:" + str(acc_task1) + ",TASK2:" + str(acc_task2) + ",Done") else: print("SVHN_CIFAR10,TASK1:" + str(acc_task1) + ",TASK2:" + str(acc_task2) + ",Done") train_writer.close() test_writer.close()