Ejemplo n.º 1
0
def build_module(moduel_type, input_data, filter_num, keep_prob, module_name,
                 input_graph):
    with input_graph.as_default():
        out = None
        w = None
        b = None

        in_scale = generate_scale(input_graph, module_name + "_in")
        input_data = in_scale * input_data

        if moduel_type == "conv":
            out, w, b = modules.conv_module(input_tensor=input_data,
                                            filt_num=filter_num,
                                            kernel_size=[3, 3],
                                            is_active=1,
                                            stride=1,
                                            layer_name=module_name,
                                            keep_prob=keep_prob)
        elif moduel_type == "fire":
            out, w, b = modules.fire_layer(
                input_tensor=input_data,
                filter_num=filter_num,
                is_active=1,
                layer_name=module_name,
                keep_prob=keep_prob,
            )
        elif moduel_type == "DR":
            out, w, b = modules.Dimensionality_reduction_module(
                input_tensor=input_data, is_active=1, layer_name=module_name)
        else:
            raise ValueError("Unknow module type")
        # print('the inscale is:'+str()in_scale)
    return out, w, b, [in_scale]
Ejemplo n.º 2
0
def train():
    ## Get imageNet dataset file queue for task1 and task2
    tr_data1, tr_label1 = imagenet_data.create_file_queue(
        FLAGS.imagenet_data_dir1)
    tr_data2, tr_label2 = imagenet_data.create_file_queue(
        FLAGS.imagenet_data_dir2)

    ## TASK 1
    sess = tf.InteractiveSession()

    # Input placeholders
    with tf.name_scope('input'):
        x = tf.placeholder(tf.float32, [None, 224 * 224 * 3], name='x-input')
        y_ = tf.placeholder(tf.float32, [None, 10], name='y-input')

    with tf.name_scope('input_reshape'):
        image_shaped_input = tf.reshape(x, [-1, 224, 224, 3])
        tf.summary.image('input', image_shaped_input, 2)

    # geopath_examples
    geopath = modules.geopath_initializer(FLAGS.L, FLAGS.M)

    # fixed weights list
    fixed_list = np.ones((FLAGS.L, FLAGS.M), dtype=str)
    for i in range(FLAGS.L):
        for j in range(FLAGS.M):
            fixed_list[i, j] = '0'

    # Hidden Layers
    weights_list = np.zeros((FLAGS.L, FLAGS.M), dtype=object)
    biases_list = np.zeros((FLAGS.L, FLAGS.M), dtype=object)

    # model define
    layer_modules_list = np.zeros(FLAGS.M, dtype=object)
    # conv layer
    i = 0
    for j in range(FLAGS.M):
        layer_modules_list[j], weights_list[i, j], biases_list[
            i,
            j] = modules.conv_module(image_shaped_input, FLAGS.filt, [11, 11],
                                     geopath[i, j], 1,
                                     'layer' + str(i + 1) + "_" + str(j + 1))
    net = np.sum(layer_modules_list) / FLAGS.M
    # dimensionality_reduction layer
    i = 1
    for j in range(FLAGS.M):
        layer_modules_list[j], weights_list[i, j], biases_list[
            i, j] = modules.Dimensionality_reduction_module(
                net, FLAGS.filt / 2, geopath[i, j],
                'layer' + str(i + 1) + "_" + str(j + 1))
    net = np.sum(layer_modules_list) / FLAGS.M
    # res_fire layer
    i = 2
    for j in range(FLAGS.M):
        layer_modules_list[j], weights_list[i, j], biases_list[
            i, j] = modules.res_fire_layer(
                net, FLAGS.filt / 2, geopath[i, j],
                'layer' + str(i + 1) + "_" + str(j + 1))
    net = np.sum(layer_modules_list) / FLAGS.M
    # dimensionality_reduction layer
    i = 3
    for j in range(FLAGS.M):
        layer_modules_list[j], weights_list[i, j], biases_list[
            i, j] = modules.Dimensionality_reduction_module(
                net, FLAGS.filt / 2, geopath[i, j],
                'layer' + str(i + 1) + "_" + str(j + 1))
    net = np.sum(layer_modules_list) / FLAGS.M
    # reshape before full connection layer
    _shape = net.shape[1:]
    _length = 1
    for _i in _shape:
        _length *= int(_i)
    net = tf.reshape(net, [-1, _length])
    # model1 layer
    i = 4
    for j in range(FLAGS.M):
        layer_modules_list[j], weights_list[i, j], biases_list[
            i, j] = modules.module(net, FLAGS.full_connection_filt, geopath[i,
                                                                            j],
                                   'layer' + str(i + 1) + "_" + str(j + 1))
    net = np.sum(layer_modules_list) / FLAGS.M

    # output layer
    y, output_weights, output_biases = modules.nn_layer(
        net, 10, 'output_layer')

    # Cross Entropy
    with tf.name_scope('cross_entropy'):
        diff = tf.nn.softmax_cross_entropy_with_logits(labels=y_, logits=y)
        with tf.name_scope('total'):
            cross_entropy = tf.reduce_mean(diff)
    tf.summary.scalar('cross_entropy', cross_entropy)

    # Need to learn variables
    var_list_to_learn = [] + output_weights + output_biases
    for i in range(FLAGS.L):
        for j in range(FLAGS.M):
            if (fixed_list[i, j] == '0'):
                var_list_to_learn += weights_list[i, j] + biases_list[i, j]

    # GradientDescent
    with tf.name_scope('train'):
        train_step = tf.train.GradientDescentOptimizer(
            FLAGS.learning_rate).minimize(cross_entropy,
                                          var_list=var_list_to_learn)

    # Accuracy
    with tf.name_scope('accuracy'):
        with tf.name_scope('correct_prediction'):
            correct_prediction = tf.equal(tf.argmax(y, 1), tf.argmax(y_, 1))
        with tf.name_scope('accuracy'):
            accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
    tf.summary.scalar('accuracy', accuracy)

    # Merge all the summaries and write them out to /tmp/tensorflow/mnist/logs/mnist_with_summaries (by default)
    merged = tf.summary.merge_all()
    train_writer = tf.summary.FileWriter(FLAGS.log_dir + '/train1', sess.graph)
    test_writer = tf.summary.FileWriter(FLAGS.log_dir + '/test1')

    # init
    tf.global_variables_initializer().run()
    tf.local_variables_initializer().run()

    # start data reading queue
    coord = tf.train.Coordinator()
    threads = tf.train.start_queue_runners(sess=sess, coord=coord)

    # Generating randomly geopath
    geopath_set = np.zeros(FLAGS.candi, dtype=object)
    for i in range(FLAGS.candi):
        geopath_set[i] = modules.get_geopath(FLAGS.L, FLAGS.M, FLAGS.N)

    # parameters placeholders and ops
    var_update_ops = np.zeros(len(var_list_to_learn), dtype=object)
    var_update_placeholders = np.zeros(len(var_list_to_learn), dtype=object)
    for i in range(len(var_list_to_learn)):
        var_update_placeholders[i] = tf.placeholder(
            var_list_to_learn[i].dtype, shape=var_list_to_learn[i].get_shape())
        var_update_ops[i] = var_list_to_learn[i].assign(
            var_update_placeholders[i])

    # geopathes placeholders and ops
    geopath_update_ops = np.zeros((len(geopath), len(geopath[0])),
                                  dtype=object)
    geopath_update_placeholders = np.zeros((len(geopath), len(geopath[0])),
                                           dtype=object)
    for i in range(len(geopath)):
        for j in range(len(geopath[0])):
            geopath_update_placeholders[i, j] = tf.placeholder(
                geopath[i, j].dtype, shape=geopath[i, j].get_shape())
            geopath_update_ops[i, j] = geopath[i, j].assign(
                geopath_update_placeholders[i, j])

    acc_geo = np.zeros(FLAGS.B, dtype=float)
    summary_geo = np.zeros(FLAGS.B, dtype=object)

    for i in range(FLAGS.max_steps):
        # Select Candidates to Tournament
        compet_idx = range(FLAGS.candi)
        np.random.shuffle(compet_idx)
        compet_idx = compet_idx[:FLAGS.B]
        # Learning & Evaluating
        for j in range(len(compet_idx)):
            # Insert Candidate
            modules.geopath_insert(sess, geopath_update_placeholders,
                                   geopath_update_ops,
                                   geopath_set[compet_idx[j]], FLAGS.L,
                                   FLAGS.M)
            acc_geo_tr = 0
            for k in range(FLAGS.T):
                '''
        print(x.shape)
        print(tr_data1[k*FLAGS.batch_num:(k+1)*FLAGS.batch_num,:].shape)
        print(y.shape)
        print(tr_label1[k*FLAGS.batch_num:(k+1)*FLAGS.batch_num,:].shape)
        '''
                tr_data1_val, tr_label1_val = imagenet_data.read_batch(
                    sess, tr_data1, tr_label1, FLAGS.batch_num,
                    FLAGS.imagenet_data_dir1)
                summary_geo_tr, _, acc_geo_tmp = sess.run(
                    [merged, train_step, accuracy],
                    feed_dict={
                        x: tr_data1_val,
                        y_: tr_label1_val
                    })
                acc_geo_tr += acc_geo_tmp
            acc_geo[j] = acc_geo_tr / FLAGS.T
            summary_geo[j] = summary_geo_tr
        # Tournament
        winner_idx = np.argmax(acc_geo)
        acc = acc_geo[winner_idx]
        summary = summary_geo[winner_idx]
        # Copy and Mutation
        for j in range(len(compet_idx)):
            if (j != winner_idx):
                geopath_set[compet_idx[j]] = np.copy(
                    geopath_set[compet_idx[winner_idx]])
                geopath_set[compet_idx[j]] = modules.mutation(
                    geopath_set[compet_idx[j]], FLAGS.L, FLAGS.M, FLAGS.N)
        train_writer.add_summary(summary, i)
        print('Training Accuracy at step %s: %s' % (i, acc))

        if acc >= 0.5:
            step_task1 = i
            task1_optimal_path = geopath_set[compet_idx[winner_idx]]
            print('Task1 Optimal Path is as followed.')
            print(task1_optimal_path)
            break

    # Fix task1 Optimal Path
    for i in range(FLAGS.L):
        for j in range(FLAGS.M):
            if (task1_optimal_path[i, j] == 1.0):
                fixed_list[i, j] = '1'

    # Get variables of fixed list
    var_list_to_fix = []
    #var_list_to_fix=[]+output_weights+output_biases;
    for i in range(FLAGS.L):
        for j in range(FLAGS.M):
            if (fixed_list[i, j] == '1'):
                var_list_to_fix += weights_list[i, j] + biases_list[i, j]
    var_list_fix = modules.parameters_backup(var_list_to_fix)

    # parameters placeholders and ops
    var_fix_ops = np.zeros(len(var_list_to_fix), dtype=object)
    var_fix_placeholders = np.zeros(len(var_list_to_fix), dtype=object)
    for i in range(len(var_list_to_fix)):
        var_fix_placeholders[i] = tf.placeholder(
            var_list_to_fix[i].dtype, shape=var_list_to_fix[i].get_shape())
        var_fix_ops[i] = var_list_to_fix[i].assign(var_fix_placeholders[i])

    ## TASK 2
    # Need to learn variables
    var_list_to_learn = [] + output_weights + output_biases
    for i in range(FLAGS.L):
        for j in range(FLAGS.M):
            if (fixed_list[i, j] == '0'):
                var_list_to_learn += weights_list[i, j] + biases_list[i, j]

    for i in range(FLAGS.L):
        for j in range(FLAGS.M):
            if (fixed_list[i, j] == '1'):
                tmp = biases_list[i, j][0]
                break
        break

    # Initialization
    merged = tf.summary.merge_all()
    train_writer = tf.summary.FileWriter(FLAGS.log_dir + '/train2', sess.graph)
    test_writer = tf.summary.FileWriter(FLAGS.log_dir + '/test2')
    tf.global_variables_initializer().run()
    tf.local_variables_initializer().run()

    # Update fixed values
    modules.parameters_update(sess, var_fix_placeholders, var_fix_ops,
                              var_list_fix)

    # GradientDescent
    with tf.name_scope('train'):
        train_step = tf.train.GradientDescentOptimizer(
            FLAGS.learning_rate).minimize(cross_entropy,
                                          var_list=var_list_to_learn)

    # Generating randomly geopath
    geopath_set = np.zeros(FLAGS.candi, dtype=object)
    for i in range(FLAGS.candi):
        geopath_set[i] = modules.get_geopath(FLAGS.L, FLAGS.M, FLAGS.N)

    # parameters placeholders and ops
    var_update_ops = np.zeros(len(var_list_to_learn), dtype=object)
    var_update_placeholders = np.zeros(len(var_list_to_learn), dtype=object)
    for i in range(len(var_list_to_learn)):
        var_update_placeholders[i] = tf.placeholder(
            var_list_to_learn[i].dtype, shape=var_list_to_learn[i].get_shape())
        var_update_ops[i] = var_list_to_learn[i].assign(
            var_update_placeholders[i])

    acc_geo = np.zeros(FLAGS.B, dtype=float)
    summary_geo = np.zeros(FLAGS.B, dtype=object)
    for i in range(FLAGS.max_steps):
        # Select Candidates to Tournament
        compet_idx = range(FLAGS.candi)
        np.random.shuffle(compet_idx)
        compet_idx = compet_idx[:FLAGS.B]
        # Learning & Evaluating
        for j in range(len(compet_idx)):
            geopath_insert = np.copy(geopath_set[compet_idx[j]])
            for l in range(FLAGS.L):
                for m in range(FLAGS.M):
                    if (fixed_list[l, m] == '1'):
                        geopath_insert[l, m] = 1.0

            # Insert Candidate
            modules.geopath_insert(sess, geopath_update_placeholders,
                                   geopath_update_ops, geopath_insert, FLAGS.L,
                                   FLAGS.M)
            acc_geo_tr = 0
            for k in range(FLAGS.T):
                tr_data2_val, tr_label2_val = imagenet_data.read_batch(
                    sess, tr_data2, tr_label2, FLAGS.batch_num,
                    FLAGS.imagenet_data_dir2)
                summary_geo_tr, _, acc_geo_tmp = sess.run(
                    [merged, train_step, accuracy],
                    feed_dict={
                        x: tr_data2_val,
                        y_: tr_label2_val
                    })
                acc_geo_tr += acc_geo_tmp
            acc_geo[j] = acc_geo_tr / FLAGS.T
            summary_geo[j] = summary_geo_tr
        # Tournament
        winner_idx = np.argmax(acc_geo)
        acc = acc_geo[winner_idx]
        summary = summary_geo[winner_idx]
        # Copy and Mutation
        for j in range(len(compet_idx)):
            if (j != winner_idx):
                geopath_set[compet_idx[j]] = np.copy(
                    geopath_set[compet_idx[winner_idx]])
                geopath_set[compet_idx[j]] = modules.mutation(
                    geopath_set[compet_idx[j]], FLAGS.L, FLAGS.M, FLAGS.N)
        train_writer.add_summary(summary, i)
        print('Training Accuracy at step %s: %s' % (i, acc))

        if acc >= 0.5:
            step_task2 = i
            task2_optimal_path = geopath_set[compet_idx[winner_idx]]
            print('Task2 Optimal Path is as followed.')
            print(task2_optimal_path)
            break

    # close data reading queue
    coord.request_stop()
    coord.join(threads)

    overlap = 0
    for i in range(len(task1_optimal_path)):
        for j in range(len(task1_optimal_path[0])):
            if (task1_optimal_path[i, j] == task2_optimal_path[i, j]) & (
                    task1_optimal_path[i, j] == 1.0):
                overlap += 1
    print("ImageNet,TASK1:" + str(step_task1) + ",TASK2:" + str(step_task2) +
          ", Overlap:" + str(overlap))

    train_writer.close()
    test_writer.close()
def train():
  # Import data
  mnist = input_data.read_data_sets(FLAGS.data_dir,
                                    one_hot=True,
                                    fake_data=FLAGS.fake_data)
  total_tr_data, total_tr_label = mnist.train.next_batch(mnist.train._num_examples);
  
  # Gathering a1 Data
  tr_data_a1=total_tr_data[(total_tr_label[:,FLAGS.a1]==1.0)];
  for i in range(len(tr_data_a1)):
    for j in range(len(tr_data_a1[0])):
      rand_num=np.random.rand();
      if(rand_num>=0.5):
        tr_data_a1[i,j]=np.minimum(tr_data_a1[i,j]+rand_num,1.0);
  
  # Gathering a2 Data
  tr_data_a2=total_tr_data[(total_tr_label[:,FLAGS.a2]==1.0)];
  for i in range(len(tr_data_a2)):
    for j in range(len(tr_data_a2[0])):
      rand_num=np.random.rand();
      if(rand_num>=0.5):
        tr_data_a2[i,j]=np.minimum(tr_data_a2[i,j]+rand_num,1.0);
  
  # Gathering b1 Data
  tr_data_b1=total_tr_data[(total_tr_label[:,FLAGS.b1]==1.0)];
  for i in range(len(tr_data_b1)):
    for j in range(len(tr_data_b1[0])):
      rand_num=np.random.rand();
      if(rand_num>=0.5):
        tr_data_b1[i,j]=np.minimum(tr_data_b1[i,j]+rand_num,1.0);

  # Gathering b2 Data
  tr_data_b2=total_tr_data[(total_tr_label[:,FLAGS.b2]==1.0)];
  for i in range(len(tr_data_b2)):
    for j in range(len(tr_data_b2[0])):
      rand_num=np.random.rand();
      if(rand_num>=0.5):
        tr_data_b2[i,j]=np.minimum(tr_data_b2[i,j]+rand_num,1.0);

  tr_data1=np.append(tr_data_a1,tr_data_a2,axis=0);
  tr_label1=np.zeros((len(tr_data1),2),dtype=float);
  for i in range(len(tr_data1)):
    if(i<len(tr_data_a1)):
      tr_label1[i,0]=1.0;
    else:
      tr_label1[i,1]=1.0;

  tr_data2=np.append(tr_data_b1,tr_data_b2,axis=0);
  tr_label2=np.zeros((len(tr_data2),2),dtype=float);
  for i in range(len(tr_data2)):
    if(i<len(tr_data_b1)):
      tr_label2[i,0]=1.0;
    else:
      tr_label2[i,1]=1.0;
  
  ## TASK 1
  sess = tf.InteractiveSession()

  # Input placeholders
  with tf.name_scope('input'):
    x = tf.placeholder(tf.float32, [None, 784], name='x-input')
    y_ = tf.placeholder(tf.float32, [None, 2], name='y-input')

  with tf.name_scope('input_reshape'):
    image_shaped_input = tf.reshape(x, [-1, 28, 28, 1])
    tf.summary.image('input', image_shaped_input, 2)

  # geopath_examples
  geopath=modules.geopath_initializer(FLAGS.L,FLAGS.M);
  
  # fixed weights list
  fixed_list=np.ones((FLAGS.L,FLAGS.M),dtype=str);
  for i in range(FLAGS.L):
    for j in range(FLAGS.M):
      fixed_list[i,j]='0';    

  # Hidden Layers
  weights_list=np.zeros((FLAGS.L,FLAGS.M),dtype=object)  # weights_list also record conv_kernels
  biases_list=np.zeros((FLAGS.L,FLAGS.M),dtype=object)
  sum_weights_list=np.zeros((FLAGS.L,FLAGS.M), dtype=object)
  for i in range(FLAGS.L):
    for j in range(FLAGS.M):
      _initial1 = tf.truncated_normal(shape=[1], mean=1, stddev=0.1)
      _initial2 = tf.truncated_normal(shape=[1], mean=1, stddev=0.1)
      sum_weights_list[i,j]= [tf.Variable(_initial1), tf.Variable(_initial2)]

  # model define
  layer_modules_list=np.zeros(FLAGS.M,dtype=object)
    # conv layer
  i = 0
  for j in range(FLAGS.M):
    layer_modules_list[j], weights_list[i,j], biases_list[i,j] = modules.conv_module(sum_weights_list[i,j][1] *image_shaped_input, FLAGS.filt, [5,5], geopath[i,j], 1,  'layer'+str(i+1)+"_"+str(j+1))
  net=np.sum(map(lambda (a,b):a*b[0], zip(layer_modules_list , sum_weights_list[i])))/ FLAGS.M
    # res-fire layer
  i = 1
  for j in range(FLAGS.M):
    layer_modules_list[j], weights_list[i,j], biases_list[i,j] = modules.res_fire_layer(sum_weights_list[i,j][1] * net, FLAGS.filt / 2, geopath[i,j], 'layer'+str(i+1)+"_"+str(j+1))
  net=np.sum(map(lambda (a,b):a*b[0], zip(layer_modules_list , sum_weights_list[i])))/ FLAGS.M
    # dimensionality_reduction layer
  i = 2
  for j in range(FLAGS.M):
    layer_modules_list[j], weights_list[i,j], biases_list[i,j] = modules.Dimensionality_reduction_module(sum_weights_list[i,j][1] * net, FLAGS.filt / 2, geopath[i,j], 'layer'+str(i+1)+"_"+str(j+1))
  net=np.sum(map(lambda (a,b):a*b[0], zip(layer_modules_list , sum_weights_list[i])))/ FLAGS.M
    # conv layer
  i = 3
  for j in range(FLAGS.M):
    layer_modules_list[j], weights_list[i,j], biases_list[i,j] = modules.conv_module(sum_weights_list[i,j][1] * net, FLAGS.filt, [5,5], geopath[i,j], 1,  'layer'+str(i+1)+"_"+str(j+1))
  net=np.sum(map(lambda (a,b):a*b[0], zip(layer_modules_list , sum_weights_list[i])))/ FLAGS.M
  # output layer
    # reshape
  _shape = net.shape[1:]
  _length = 1
  for _i in _shape:
      _length *= int(_i)
  net=tf.reshape(net,[-1,_length])
    # sigmoid layer
  y, output_weights, output_biases= modules.nn_layer(net, 2, 'output_layer');

  # Cross Entropy
  with tf.name_scope('cross_entropy'):
    diff = tf.nn.softmax_cross_entropy_with_logits(labels=y_, logits=y)
    with tf.name_scope('total'):
      cross_entropy = tf.reduce_mean(diff)
  tf.summary.scalar('cross_entropy', cross_entropy)
  
  # Need to learn variables
  var_list_to_learn=[]+output_weights+output_biases;
  for i in range(FLAGS.L):
    for j in range(FLAGS.M):
      if (fixed_list[i,j]=='0'):
        var_list_to_learn+=weights_list[i,j]+biases_list[i,j]+sum_weights_list[i,j]
  
  # GradientDescent 
  with tf.name_scope('train'):
    train_step = tf.train.GradientDescentOptimizer(FLAGS.learning_rate).minimize(cross_entropy,var_list=var_list_to_learn);

  # Accuracy 
  with tf.name_scope('accuracy'):
    with tf.name_scope('correct_prediction'):
      correct_prediction = tf.equal(tf.argmax(y, 1), tf.argmax(y_, 1))
    with tf.name_scope('accuracy'):
      accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
  tf.summary.scalar('accuracy', accuracy)

  # Merge all the summaries and write them out to /tmp/tensorflow/mnist/logs/mnist_with_summaries (by default)
  merged = tf.summary.merge_all()
  train_writer = tf.summary.FileWriter(FLAGS.log_dir + '/train1', sess.graph)
  test_writer = tf.summary.FileWriter(FLAGS.log_dir + '/test1')

  tf.global_variables_initializer().run()

  # Generating randomly geopath
  geopath_set=np.zeros(FLAGS.candi,dtype=object);
  for i in range(FLAGS.candi):
    geopath_set[i]=modules.get_geopath(FLAGS.L,FLAGS.M,FLAGS.N);
  
  # parameters placeholders and ops 
  var_update_ops=np.zeros(len(var_list_to_learn),dtype=object);
  var_update_placeholders=np.zeros(len(var_list_to_learn),dtype=object);
  for i in range(len(var_list_to_learn)):
    var_update_placeholders[i]=tf.placeholder(var_list_to_learn[i].dtype,shape=var_list_to_learn[i].get_shape());
    var_update_ops[i]=var_list_to_learn[i].assign(var_update_placeholders[i]);
 
  # geopathes placeholders and ops 
  geopath_update_ops=np.zeros((len(geopath),len(geopath[0])),dtype=object);
  geopath_update_placeholders=np.zeros((len(geopath),len(geopath[0])),dtype=object);
  for i in range(len(geopath)):
    for j in range(len(geopath[0])):
      geopath_update_placeholders[i,j]=tf.placeholder(geopath[i,j].dtype,shape=geopath[i,j].get_shape());
      geopath_update_ops[i,j]=geopath[i,j].assign(geopath_update_placeholders[i,j]);
     
  acc_geo=np.zeros(FLAGS.B,dtype=float); 
  summary_geo=np.zeros(FLAGS.B,dtype=object); 
  for i in range(FLAGS.max_steps):
    # Select Candidates to Tournament
    compet_idx=range(FLAGS.candi);
    np.random.shuffle(compet_idx);
    compet_idx=compet_idx[:FLAGS.B];
    # Learning & Evaluating
    for j in range(len(compet_idx)):
      # Shuffle the data
      idx=range(len(tr_data1));
      np.random.shuffle(idx);
      tr_data1=tr_data1[idx];tr_label1=tr_label1[idx];
      # Insert Candidate
      modules.geopath_insert(sess,geopath_update_placeholders,geopath_update_ops,geopath_set[compet_idx[j]],FLAGS.L,FLAGS.M);
      acc_geo_tr=0;
      for k in range(FLAGS.T):
        summary_geo_tr, _, acc_geo_tmp = sess.run([merged, train_step,accuracy], feed_dict={x:tr_data1[k*FLAGS.batch_num:(k+1)*FLAGS.batch_num,:],y_:tr_label1[k*FLAGS.batch_num:(k+1)*FLAGS.batch_num,:]});
        acc_geo_tr+=acc_geo_tmp;
      acc_geo[j]=acc_geo_tr/FLAGS.T;
      summary_geo[j]=summary_geo_tr;
    # Tournament
    winner_idx=np.argmax(acc_geo);
    acc=acc_geo[winner_idx];
    summary=summary_geo[winner_idx];
    # Copy and Mutation
    for j in range(len(compet_idx)):
      if(j!=winner_idx):
        geopath_set[compet_idx[j]]=np.copy(geopath_set[compet_idx[winner_idx]]);
        geopath_set[compet_idx[j]]=modules.mutation(geopath_set[compet_idx[j]],FLAGS.L,FLAGS.M,FLAGS.N);
    train_writer.add_summary(summary, i);
    print('Training Accuracy at step %s: %s' % (i, acc));
    if(acc >= 0.99):
      print('Learning Done!!');
      print('Optimal Path is as followed.');
      print(geopath_set[compet_idx[winner_idx]]);
      task1_optimal_path=geopath_set[compet_idx[winner_idx]];
      break;
    """
    geopath_sum=np.zeros((len(geopath),len(geopath[0])),dtype=float);
    for j in range(len(geopath_set)):
      for k in range(len(geopath)):
        for l in range(len(geopath[0])):
          geopath_sum[k][l]+=geopath_set[j][k][l];
    print(geopath_sum);
    """    
  # record steps to find optimal path in task1
  iter_task1=i;    
  
  # Fix task1 Optimal Path
  for i in range(FLAGS.L):
    for j in range(FLAGS.M):
      if(task1_optimal_path[i,j]==1.0):
        fixed_list[i,j]='1';
  
  # Get variables of fixed list
  var_list_to_fix=[];
  #var_list_to_fix=[]+output_weights+output_biases;
  for i in range(FLAGS.L):
    for j in range(FLAGS.M):
      if(fixed_list[i,j]=='1'):
        var_list_to_fix+=weights_list[i,j]+biases_list[i,j];
  var_list_fix=modules.parameters_backup(var_list_to_fix);

  """
  for i in range(FLAGS.L):
    for j in range(FLAGS.M):
      if(task1_optimal_path[i,j]==1.0):
        fixed_list[i,j]='0';
  """

  # parameters placeholders and ops 
  var_fix_ops=np.zeros(len(var_list_to_fix),dtype=object);
  var_fix_placeholders=np.zeros(len(var_list_to_fix),dtype=object);
  for i in range(len(var_list_to_fix)):
    var_fix_placeholders[i]=tf.placeholder(var_list_to_fix[i].dtype,shape=var_list_to_fix[i].get_shape());
    var_fix_ops[i]=var_list_to_fix[i].assign(var_fix_placeholders[i]);
 
  ## TASK 2
  # Need to learn variables
  var_list_to_learn=[]+output_weights+output_biases
  for i in range(FLAGS.L):
    for j in range(FLAGS.M):
      if (fixed_list[i,j]=='0'):
        var_list_to_learn+=weights_list[i,j]+biases_list[i,j]
      var_list_to_learn+=sum_weights_list[i,j]

  '''
  for i in range(FLAGS.L):
    for j in range(FLAGS.M):
      if(fixed_list[i,j]=='1'):
        tmp=biases_list[i,j][0];
        break;
    break;
  '''

  # Initialization
  merged = tf.summary.merge_all()
  train_writer = tf.summary.FileWriter(FLAGS.log_dir + '/train2', sess.graph)
  test_writer = tf.summary.FileWriter(FLAGS.log_dir + '/test2')
  tf.global_variables_initializer().run()
  
  # Update fixed values
  modules.parameters_update(sess,var_fix_placeholders,var_fix_ops,var_list_fix);
 
  # GradientDescent
  with tf.name_scope('train'):
    train_step = tf.train.GradientDescentOptimizer(FLAGS.learning_rate).minimize(cross_entropy,var_list=var_list_to_learn);
  
  # Generating randomly geopath
  geopath_set=np.zeros(FLAGS.candi,dtype=object);
  for i in range(FLAGS.candi):
    geopath_set[i]=modules.get_geopath(FLAGS.L,FLAGS.M,FLAGS.N);
  
  # parameters placeholders and ops 
  var_update_ops=np.zeros(len(var_list_to_learn),dtype=object);
  var_update_placeholders=np.zeros(len(var_list_to_learn),dtype=object);
  for i in range(len(var_list_to_learn)):
    var_update_placeholders[i]=tf.placeholder(var_list_to_learn[i].dtype,shape=var_list_to_learn[i].get_shape());
    var_update_ops[i]=var_list_to_learn[i].assign(var_update_placeholders[i]);
  
  acc_geo=np.zeros(FLAGS.B,dtype=float); 
  summary_geo=np.zeros(FLAGS.B,dtype=object); 
  for i in range(FLAGS.max_steps):
    # Select Candidates to Tournament
    compet_idx=range(FLAGS.candi);
    np.random.shuffle(compet_idx);
    compet_idx=compet_idx[:FLAGS.B];
    # Learning & Evaluating
    for j in range(len(compet_idx)):
      # Shuffle the data
      idx=range(len(tr_data2));
      np.random.shuffle(idx);
      tr_data2=tr_data2[idx];tr_label2=tr_label2[idx];
      geopath_insert=np.copy(geopath_set[compet_idx[j]]);
      
      for l in range(FLAGS.L):
        for m in range(FLAGS.M):
          if(fixed_list[l,m]=='1'):
            geopath_insert[l,m]=1.0;
      
      # Insert Candidate
      modules.geopath_insert(sess,geopath_update_placeholders,geopath_update_ops,geopath_insert,FLAGS.L,FLAGS.M);
      acc_geo_tr=0;
      for k in range(FLAGS.T):
        summary_geo_tr, _, acc_geo_tmp = sess.run([merged, train_step,accuracy], feed_dict={x:tr_data2[k*FLAGS.batch_num:(k+1)*FLAGS.batch_num,:],y_:tr_label2[k*FLAGS.batch_num:(k+1)*FLAGS.batch_num,:]});
        acc_geo_tr+=acc_geo_tmp;
      acc_geo[j]=acc_geo_tr/FLAGS.T;
      summary_geo[j]=summary_geo_tr;
    # Tournament
    winner_idx=np.argmax(acc_geo);
    acc=acc_geo[winner_idx];
    summary=summary_geo[winner_idx];
    # Copy and Mutation
    for j in range(len(compet_idx)):
      if(j!=winner_idx):
        geopath_set[compet_idx[j]]=np.copy(geopath_set[compet_idx[winner_idx]]);
        geopath_set[compet_idx[j]]=modules.mutation(geopath_set[compet_idx[j]],FLAGS.L,FLAGS.M,FLAGS.N);
    train_writer.add_summary(summary, i);
    print('Training Accuracy at step %s: %s' % (i, acc));
    if(acc >= 0.99):
      print('Learning Done!!');
      print('Optimal Path is as followed.');
      print(geopath_set[compet_idx[winner_idx]]);
      task2_optimal_path=geopath_set[compet_idx[winner_idx]];
      break;
    """
    geopath_sum=np.zeros((len(geopath),len(geopath[0])),dtype=float);
    for j in range(len(geopath_set)):
      for k in range(len(geopath)):
        for l in range(len(geopath[0])):
          geopath_sum[k][l]+=geopath_set[j][k][l];
    print(geopath_sum);
    """

  iter_task2=i;      
  overlap=0;
  for i in range(len(task1_optimal_path)):
    for j in range(len(task1_optimal_path[0])):
      if(task1_optimal_path[i,j]==task2_optimal_path[i,j])&(task1_optimal_path[i,j]==1.0):
        overlap+=1;
  print("Entire Iter:"+str(iter_task1+iter_task2)+",TASK1:"+str(iter_task1)+",TASK2:"+str(iter_task2)+",Overlap:"+str(overlap));
 
  train_writer.close()
  test_writer.close()
def train():
    # Get SVHN dataset
    svhn_maybe_download_and_extract()
    file_name = os.path.join(FLAGS.svhn_data_dir, "train_32x32.mat")
    train = sio.loadmat(file_name)
    tr_data_svhn = np.zeros((len(train['y']), 32 * 32 * 3), dtype=float)
    tr_label_svhn = np.zeros((len(train['y']), 10), dtype=float)
    for i in range(len(train['y'])):
        tr_data_svhn[i] = np.reshape(train['X'][:, :, :, i], [1, 32 * 32 * 3])
        tr_label_svhn[i, train['y'][i][0] - 1] = 1.0
    tr_data_svhn = tr_data_svhn / 255.0

    file_name = os.path.join(FLAGS.svhn_data_dir, "test_32x32.mat")
    test = sio.loadmat(file_name)
    ts_data_svhn = np.zeros((len(test['y']), 32 * 32 * 3), dtype=float)
    ts_label_svhn = np.zeros((len(test['y']), 10), dtype=float)
    for i in range(len(test['y'])):
        ts_data_svhn[i] = np.reshape(test['X'][:, :, :, i], [1, 32 * 32 * 3])
        ts_label_svhn[i, test['y'][i][0] - 1] = 1.0
    ts_data_svhn = ts_data_svhn / 255.0
    data_num_len_svhn = len(tr_label_svhn)
    print("svhn done")
    # Get CIFAR 10  dataset
    cifar10.maybe_download_and_extract()
    tr_label_cifar10 = np.zeros((50000, 10), dtype=float)
    ts_label_cifar10 = np.zeros((10000, 10), dtype=float)
    for i in range(1, 6):
        file_name = os.path.join(FLAGS.cifar_data_dir,
                                 "data_batch_" + str(i) + ".bin")
        f = open(file_name, "rb")
        data = np.reshape(bytearray(f.read()), [10000, 3073])
        if (i == 1):
            tr_data_cifar10 = data[:, 1:] / 255.0
        else:
            tr_data_cifar10 = np.append(tr_data_cifar10,
                                        data[:, 1:] / 255.0,
                                        axis=0)
        for j in range(len(data)):
            tr_label_cifar10[(i - 1) * 10000 + j, data[j, 0]] = 1.0
    file_name = os.path.join(FLAGS.cifar_data_dir, "test_batch.bin")
    f = open(file_name, "rb")
    data = np.reshape(bytearray(f.read()), [10000, 3073])
    for i in range(len(data)):
        ts_label_cifar10[i, data[i, 0]] = 1.0
    ts_data_cifar10 = data[:, 1:] / 255.0
    data_num_len_cifar10 = len(tr_label_cifar10)

    print(ts_label_cifar10.shape)
    print(ts_label_cifar10[0])
    if (FLAGS.cifar_first):
        tr_data1 = tr_data_cifar10
        tr_label1 = tr_label_cifar10
        ts_data1 = ts_data_cifar10
        ts_label1 = ts_label_cifar10
        data_num_len1 = data_num_len_cifar10
        tr_data2 = tr_data_svhn
        tr_label2 = tr_label_svhn
        ts_data2 = ts_data_svhn
        ts_label2 = ts_label_svhn
        data_num_len2 = data_num_len_svhn
    else:
        tr_data1 = tr_data_svhn
        tr_label1 = tr_label_svhn
        ts_data1 = ts_data_svhn
        ts_label1 = ts_label_svhn
        data_num_len1 = data_num_len_svhn
        tr_data2 = tr_data_cifar10
        tr_label2 = tr_label_cifar10
        ts_data2 = ts_data_cifar10
        ts_label2 = ts_label_cifar10
        data_num_len2 = data_num_len_cifar10

    ## TASK 1
    sess = tf.InteractiveSession()

    # Input placeholders
    with tf.name_scope('input'):
        x = tf.placeholder(tf.float32, [None, 32 * 32 * 3], name='x-input')
        y_ = tf.placeholder(tf.float32, [None, 10], name='y-input')

    with tf.name_scope('input_reshape'):
        image_shaped_input = tf.reshape(x, [-1, 32, 32, 3])
        tf.summary.image('input', image_shaped_input, 2)

    # geopath_examples
    geopath = modules.geopath_initializer(FLAGS.L, FLAGS.M)

    # fixed weights list
    fixed_list = np.ones((FLAGS.L, FLAGS.M), dtype=str)
    for i in range(FLAGS.L):
        for j in range(FLAGS.M):
            fixed_list[i, j] = '0'

    # Hidden Layers
    weights_list = np.zeros((FLAGS.L, FLAGS.M), dtype=object)
    biases_list = np.zeros((FLAGS.L, FLAGS.M), dtype=object)
    sum_weights_list = np.zeros((FLAGS.L, FLAGS.M), dtype=object)
    for i in range(FLAGS.L):
        for j in range(FLAGS.M):
            _initial1 = tf.truncated_normal(shape=[1], mean=1, stddev=0.1)
            _initial2 = tf.truncated_normal(shape=[1], mean=1, stddev=0.1)
            sum_weights_list[i, j] = [
                tf.Variable(_initial1),
                tf.Variable(_initial2)
            ]

    # model define
    layer_modules_list = np.zeros(FLAGS.M, dtype=object)
    # conv layer
    i = 0
    for j in range(FLAGS.M):
        layer_modules_list[j], weights_list[i, j], biases_list[
            i,
            j] = modules.conv_module(image_shaped_input, FLAGS.filt, [5, 5],
                                     geopath[i, j], 1,
                                     'layer' + str(i + 1) + "_" + str(j + 1))
    net = np.sum(
        map(lambda (a, b): a * b[0],
            zip(layer_modules_list, sum_weights_list[0]))) / FLAGS.M
    # res-fire layer
    i = 1
    for j in range(FLAGS.M):
        layer_modules_list[j], weights_list[i, j], biases_list[
            i, j] = modules.res_fire_layer(
                net, FLAGS.filt / 2, geopath[i, j],
                'layer' + str(i + 1) + "_" + str(j + 1))
    net = np.sum(
        map(lambda (a, b): a * b[0],
            zip(layer_modules_list, sum_weights_list[1]))) / FLAGS.M
    # dimensionality_reduction layer
    i = 2
    for j in range(FLAGS.M):
        layer_modules_list[j], weights_list[i, j], biases_list[
            i, j] = modules.Dimensionality_reduction_module(
                net, 10, geopath[i, j],
                'layer' + str(i + 1) + "_" + str(j + 1))
    net = np.sum(
        map(lambda (a, b): a * b[0],
            zip(layer_modules_list, sum_weights_list[2]))) / FLAGS.M
    # conv layer
    i = 3
    for j in range(FLAGS.M):
        layer_modules_list[j], weights_list[i, j], biases_list[
            i,
            j] = modules.conv_module(image_shaped_input, FLAGS.filt, [5, 5],
                                     geopath[i, j], 1,
                                     'layer' + str(i + 1) + "_" + str(j + 1))
    net = np.sum(
        map(lambda (a, b): a * b[0],
            zip(layer_modules_list, sum_weights_list[3]))) / FLAGS.M
    # output layer
    # reshape
    _shape = net.shape[1:]
    _length = 1
    for _i in _shape:
        _length *= int(_i)
    net = tf.reshape(net, [-1, _length])
    # full connection layer
    y, output_weights, output_biases = modules.nn_layer(
        net, 10, 'output_layer')

    # Cross Entropy
    with tf.name_scope('cross_entropy'):
        diff = tf.nn.softmax_cross_entropy_with_logits(labels=y_, logits=y)
        with tf.name_scope('total'):
            cross_entropy = tf.reduce_mean(diff)
    tf.summary.scalar('cross_entropy', cross_entropy)

    # Need to learn variables
    var_list_to_learn = [] + output_weights + output_biases
    for i in range(FLAGS.L):
        for j in range(FLAGS.M):
            if (fixed_list[i, j] == '0'):
                var_list_to_learn += weights_list[i, j] + biases_list[i, j]

    # GradientDescent
    with tf.name_scope('train'):
        train_step = tf.train.GradientDescentOptimizer(
            FLAGS.learning_rate).minimize(cross_entropy,
                                          var_list=var_list_to_learn)

    # Accuracy
    with tf.name_scope('accuracy'):
        with tf.name_scope('correct_prediction'):
            correct_prediction = tf.equal(tf.argmax(y, 1), tf.argmax(y_, 1))
        with tf.name_scope('accuracy'):
            accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
    tf.summary.scalar('accuracy', accuracy)

    # Merge all the summaries and write them out to /tmp/tensorflow/mnist/logs/mnist_with_summaries (by default)
    merged = tf.summary.merge_all()
    train_writer = tf.summary.FileWriter(FLAGS.log_dir + '/train1', sess.graph)
    test_writer = tf.summary.FileWriter(FLAGS.log_dir + '/test1')
    tf.global_variables_initializer().run()

    # Generating randomly geopath
    geopath_set = np.zeros(FLAGS.candi, dtype=object)
    for i in range(FLAGS.candi):
        geopath_set[i] = modules.get_geopath(FLAGS.L, FLAGS.M, FLAGS.N)

    # parameters placeholders and ops
    var_update_ops = np.zeros(len(var_list_to_learn), dtype=object)
    var_update_placeholders = np.zeros(len(var_list_to_learn), dtype=object)
    for i in range(len(var_list_to_learn)):
        var_update_placeholders[i] = tf.placeholder(
            var_list_to_learn[i].dtype, shape=var_list_to_learn[i].get_shape())
        var_update_ops[i] = var_list_to_learn[i].assign(
            var_update_placeholders[i])

    # geopathes placeholders and ops
    geopath_update_ops = np.zeros((len(geopath), len(geopath[0])),
                                  dtype=object)
    geopath_update_placeholders = np.zeros((len(geopath), len(geopath[0])),
                                           dtype=object)
    for i in range(len(geopath)):
        for j in range(len(geopath[0])):
            geopath_update_placeholders[i, j] = tf.placeholder(
                geopath[i, j].dtype, shape=geopath[i, j].get_shape())
            geopath_update_ops[i, j] = geopath[i, j].assign(
                geopath_update_placeholders[i, j])

    acc_geo = np.zeros(FLAGS.B, dtype=float)
    summary_geo = np.zeros(FLAGS.B, dtype=object)
    for i in range(FLAGS.max_steps):
        # Select Candidates to Tournament
        compet_idx = range(FLAGS.candi)
        np.random.shuffle(compet_idx)
        compet_idx = compet_idx[:FLAGS.B]
        # Learning & Evaluating
        for j in range(len(compet_idx)):
            # Shuffle the data
            idx = range(len(tr_data1))
            np.random.shuffle(idx)
            tr_data1 = tr_data1[idx]
            tr_label1 = tr_label1[idx]
            # Insert Candidate
            modules.geopath_insert(sess, geopath_update_placeholders,
                                   geopath_update_ops,
                                   geopath_set[compet_idx[j]], FLAGS.L,
                                   FLAGS.M)
            acc_geo_tr = 0
            for k in range(FLAGS.T):
                '''
        print(x.shape)
        print(tr_data1[k*FLAGS.batch_num:(k+1)*FLAGS.batch_num,:].shape)
        print(y.shape)
        print(tr_label1[k*FLAGS.batch_num:(k+1)*FLAGS.batch_num,:].shape)
        '''
                summary_geo_tr, _, acc_geo_tmp = sess.run(
                    [merged, train_step, accuracy],
                    feed_dict={
                        x:
                        tr_data1[k * FLAGS.batch_num:(k + 1) *
                                 FLAGS.batch_num, :],
                        y_:
                        tr_label1[k * FLAGS.batch_num:(k + 1) *
                                  FLAGS.batch_num, :]
                    })
                acc_geo_tr += acc_geo_tmp
            acc_geo[j] = acc_geo_tr / FLAGS.T
            summary_geo[j] = summary_geo_tr
        # Tournament
        winner_idx = np.argmax(acc_geo)
        acc = acc_geo[winner_idx]
        summary = summary_geo[winner_idx]
        # Copy and Mutation
        for j in range(len(compet_idx)):
            if (j != winner_idx):
                geopath_set[compet_idx[j]] = np.copy(
                    geopath_set[compet_idx[winner_idx]])
                geopath_set[compet_idx[j]] = modules.mutation(
                    geopath_set[compet_idx[j]], FLAGS.L, FLAGS.M, FLAGS.N)
        train_writer.add_summary(summary, i)
        print('Training Accuracy at step %s: %s' % (i, acc))

    acc_task1 = acc
    task1_optimal_path = geopath_set[compet_idx[winner_idx]]

    # Fix task1 Optimal Path
    for i in range(FLAGS.L):
        for j in range(FLAGS.M):
            if (task1_optimal_path[i, j] == 1.0):
                fixed_list[i, j] = '1'

    # Get variables of fixed list
    var_list_to_fix = []
    #var_list_to_fix=[]+output_weights+output_biases;
    for i in range(FLAGS.L):
        for j in range(FLAGS.M):
            if (fixed_list[i, j] == '1'):
                var_list_to_fix += weights_list[i, j] + biases_list[i, j]
    var_list_fix = modules.parameters_backup(var_list_to_fix)

    # parameters placeholders and ops
    var_fix_ops = np.zeros(len(var_list_to_fix), dtype=object)
    var_fix_placeholders = np.zeros(len(var_list_to_fix), dtype=object)
    for i in range(len(var_list_to_fix)):
        var_fix_placeholders[i] = tf.placeholder(
            var_list_to_fix[i].dtype, shape=var_list_to_fix[i].get_shape())
        var_fix_ops[i] = var_list_to_fix[i].assign(var_fix_placeholders[i])

    ## TASK 2
    # Need to learn variables
    var_list_to_learn = [] + output_weights + output_biases
    for i in range(FLAGS.L):
        for j in range(FLAGS.M):
            if (fixed_list[i, j] == '0'):
                var_list_to_learn += weights_list[i, j] + biases_list[i, j]

    for i in range(FLAGS.L):
        for j in range(FLAGS.M):
            if (fixed_list[i, j] == '1'):
                tmp = biases_list[i, j][0]
                break
        break

    # Initialization
    merged = tf.summary.merge_all()
    train_writer = tf.summary.FileWriter(FLAGS.log_dir + '/train2', sess.graph)
    test_writer = tf.summary.FileWriter(FLAGS.log_dir + '/test2')
    tf.global_variables_initializer().run()

    # Update fixed values
    modules.parameters_update(sess, var_fix_placeholders, var_fix_ops,
                              var_list_fix)

    # GradientDescent
    with tf.name_scope('train'):
        train_step = tf.train.GradientDescentOptimizer(
            FLAGS.learning_rate).minimize(cross_entropy,
                                          var_list=var_list_to_learn)

    # Generating randomly geopath
    geopath_set = np.zeros(FLAGS.candi, dtype=object)
    for i in range(FLAGS.candi):
        geopath_set[i] = modules.get_geopath(FLAGS.L, FLAGS.M, FLAGS.N)

    # parameters placeholders and ops
    var_update_ops = np.zeros(len(var_list_to_learn), dtype=object)
    var_update_placeholders = np.zeros(len(var_list_to_learn), dtype=object)
    for i in range(len(var_list_to_learn)):
        var_update_placeholders[i] = tf.placeholder(
            var_list_to_learn[i].dtype, shape=var_list_to_learn[i].get_shape())
        var_update_ops[i] = var_list_to_learn[i].assign(
            var_update_placeholders[i])

    acc_geo = np.zeros(FLAGS.B, dtype=float)
    summary_geo = np.zeros(FLAGS.B, dtype=object)
    for i in range(FLAGS.max_steps):
        # Select Candidates to Tournament
        compet_idx = range(FLAGS.candi)
        np.random.shuffle(compet_idx)
        compet_idx = compet_idx[:FLAGS.B]
        # Learning & Evaluating
        for j in range(len(compet_idx)):
            # Shuffle the data
            idx = range(len(tr_data2))
            np.random.shuffle(idx)
            tr_data2 = tr_data2[idx]
            tr_label2 = tr_label2[idx]
            geopath_insert = np.copy(geopath_set[compet_idx[j]])

            for l in range(FLAGS.L):
                for m in range(FLAGS.M):
                    if (fixed_list[l, m] == '1'):
                        geopath_insert[l, m] = 1.0

            # Insert Candidate
            modules.geopath_insert(sess, geopath_update_placeholders,
                                   geopath_update_ops, geopath_insert, FLAGS.L,
                                   FLAGS.M)
            acc_geo_tr = 0
            for k in range(FLAGS.T):
                summary_geo_tr, _, acc_geo_tmp = sess.run(
                    [merged, train_step, accuracy],
                    feed_dict={
                        x:
                        tr_data2[k * FLAGS.batch_num:(k + 1) *
                                 FLAGS.batch_num, :],
                        y_:
                        tr_label2[k * FLAGS.batch_num:(k + 1) *
                                  FLAGS.batch_num, :]
                    })
                acc_geo_tr += acc_geo_tmp
            acc_geo[j] = acc_geo_tr / FLAGS.T
            summary_geo[j] = summary_geo_tr
        # Tournament
        winner_idx = np.argmax(acc_geo)
        acc = acc_geo[winner_idx]
        summary = summary_geo[winner_idx]
        # Copy and Mutation
        for j in range(len(compet_idx)):
            if (j != winner_idx):
                geopath_set[compet_idx[j]] = np.copy(
                    geopath_set[compet_idx[winner_idx]])
                geopath_set[compet_idx[j]] = modules.mutation(
                    geopath_set[compet_idx[j]], FLAGS.L, FLAGS.M, FLAGS.N)
        train_writer.add_summary(summary, i)
        print('Training Accuracy at step %s: %s' % (i, acc))

    acc_task2 = acc

    if (FLAGS.cifar_first):
        print("CIFAR10_SVHN,TASK1:" + str(acc_task1) + ",TASK2:" +
              str(acc_task2) + ",Done")
    else:
        print("SVHN_CIFAR10,TASK1:" + str(acc_task1) + ",TASK2:" +
              str(acc_task2) + ",Done")

    train_writer.close()
    test_writer.close()