def evaluate():
    """Run Eval once.

  Args:
    saver: Saver.
    summary_writer: Summary writer.
    top_k_op: Top K op.
    summary_op: Summary op.
  """

    with tf.Graph().as_default():
        # Make image placeholder
        params_op, params_op_init, params_op_set, squeeze_loss = flow_net.inputs_boundary_learn(
            batch_size, network_type="heat", noise_std=0.01)

        # Make boundary
        boundary = flow_net.inference_boundary(batch_size, 2 * [128],
                                               params_op)

        # predict steady flow on boundary
        predicted_heat = flow_net.inference_network(boundary,
                                                    network_type="heat",
                                                    keep_prob=1.0)

        # loss
        #loss = tf.reduce_sum(predicted_heat)
        loss = tf.reduce_max(predicted_heat)
        loss += squeeze_loss

        # train_op
        variables_to_train = tf.all_variables()
        variables_to_train = [
            variable for i, variable in enumerate(variables_to_train)
            if "params" in variable.name[:variable.name.index(':')]
        ]
        train_step = flow_net.train(loss,
                                    FLAGS.boundary_learn_lr,
                                    train_type="boundary_params",
                                    variables=variables_to_train)

        # init graph
        init = tf.global_variables_initializer()

        # Restore the moving average version of the learned variables for eval.
        variables_to_restore = tf.all_variables()
        variables_to_restore_boundary = [
            variable for i, variable in enumerate(variables_to_restore)
            if "boundary_network" in variable.name[:variable.name.index(':')]
        ]
        variables_to_restore_heat = [
            variable for i, variable in enumerate(variables_to_restore)
            if "heat_network" in variable.name[:variable.name.index(':')]
        ]
        saver_boundary = tf.train.Saver(variables_to_restore_boundary)
        saver_heat = tf.train.Saver(variables_to_restore_heat)

        # start ses and init
        sess = tf.Session()
        sess.run(init)
        ckpt_boundary = tf.train.get_checkpoint_state(BOUNDARY_DIR)
        ckpt_heat = tf.train.get_checkpoint_state(FLOW_DIR)
        saver_boundary.restore(sess, ckpt_boundary.model_checkpoint_path)
        saver_heat.restore(sess, ckpt_heat.model_checkpoint_path)

        graph_def = tf.get_default_graph().as_graph_def(add_shapes=True)

        params_np = (np.random.rand(1, FLAGS.nr_boundary_params) - .5) / 2.0

        sess.run(params_op_init, feed_dict={params_op_set: params_np})
        run_time = FLAGS.boundary_learn_steps

        # make store vectors for values
        plot_error = []

        # make store dir
        os.system("mkdir ./figs/boundary_learn_image_store")
        for i in tqdm(xrange(run_time)):
            l, _, = sess.run([loss, train_step], feed_dict={})
            print(l)
            plot_error.append(np.sum(l))
            if ((i + 1) % 1 == 0) or i == run_time - 1:
                # make video with opencv
                p_heat, p_boundary = sess.run([predicted_heat, boundary])

                # save plot image to make video
                fig = plt.figure()
                fig.set_size_inches(15, 5)
                a = fig.add_subplot(1, 3, 2)
                plt.imshow(p_heat[0, :, :, 0])
                #plt.title("Heat Dissipation", fontsize="x-large")
                plt.title("Heat Dissipation", fontsize=16)
                a = fig.add_subplot(1, 3, 3)
                plt.imshow(p_boundary[0, :, :, 0])
                plt.title("Heat Sink Geometry", fontsize=16)
                a = fig.add_subplot(1, 3, 1)
                plt.plot(np.array(plot_error), label="Temp at Source")
                plt.xlim(-10, 510)
                plt.xlabel("Step")
                plt.ylabel("Temp")
                plt.legend()
                plt.suptitle("Heat Sink Optimization Using Gradient Descent",
                             fontsize=20)
                plt.savefig("./figs/boundary_learn_image_store/plot_" +
                            str(i).zfill(5) + ".png")
                if run_time - i <= 100:
                    plt.savefig("./figs/" + FLAGS.boundary_learn_loss +
                                "_plot.png")
                if i == run_time - 1:
                    plt.savefig("./figs/heat_learn_gradient_decent.pdf")
                    plt.show()
                plt.close(fig)

        # generate video of plots
        os.system("rm ./figs/heat_learn_video.mp4")
        os.system(
            "cat ./figs/boundary_learn_image_store/*.png | ffmpeg -f image2pipe -r 30 -vcodec png -i - -vcodec libx264 ./figs/heat_learn_video.mp4"
        )
        os.system("rm -r ./figs/boundary_learn_image_store")
Example #2
0
def train():
  """Train ring_net for a number of steps."""
  with tf.Graph().as_default():
    # make inputs
    boundary, sflow = flow_net.inputs(FLAGS.batch_size) 
    # create and unrap network
    sflow_p = flow_net.inference(boundary, FLAGS.keep_prob) 
    # calc error
    error = flow_net.loss_image(sflow_p, sflow) 
    # train hopefuly 
    train_op = flow_net.train(error, FLAGS.learning_rate)
    # List of all Variables
    variables = tf.global_variables()

    # Build a saver
    saver = tf.train.Saver(tf.global_variables())   
    #for i, variable in enumerate(variables):
    #  print '----------------------------------------------'
    #  print variable.name[:variable.name.index(':')]

    # Summary op
    summary_op = tf.summary.merge_all()
 
    # Build an initialization operation to run below.
    init = tf.global_variables_initializer()

    # Start running operations on the Graph.
    sess = tf.Session()

    # init if this is the very time training
    sess.run(init)
 
    # init from checkpoint
    saver_restore = tf.train.Saver(variables)
    ckpt = tf.train.get_checkpoint_state(TRAIN_DIR)
    if ckpt is not None:
      print("init from " + TRAIN_DIR)
      try:
         saver_restore.restore(sess, ckpt.model_checkpoint_path)
      except:
         tf.gfile.DeleteRecursively(TRAIN_DIR)
         tf.gfile.MakeDirs(TRAIN_DIR)
         print("there was a problem using variables in checkpoint, random init will be used instead")

    # Start que runner
    tf.train.start_queue_runners(sess=sess)

    # Summary op
    graph_def = sess.graph.as_graph_def(add_shapes=True)
    summary_writer = tf.summary.FileWriter(TRAIN_DIR, graph_def=graph_def)

    for step in range(FLAGS.max_steps):
      t = time.time()
      _ , loss_value = sess.run([train_op, error],feed_dict={})
      elapsed = time.time() - t

      assert not np.isnan(loss_value), 'Model diverged with loss = NaN'

      if step%100 == 0:
        summary_str = sess.run(summary_op, feed_dict={})
        summary_writer.add_summary(summary_str, step) 
        print("loss value at " + str(loss_value))
        print("time per batch is " + str(elapsed))

      if step%1000 == 0:
        checkpoint_path = os.path.join(TRAIN_DIR, 'model.ckpt')
        saver.save(sess, checkpoint_path, global_step=step)  
        print("saved to " + TRAIN_DIR)
Example #3
0
def train():
    """Train ring_net for a number of steps."""
    with tf.Graph().as_default():
        # global step counter
        global_step = tf.get_variable('global_step', [],
                                      initializer=tf.constant_initializer(0),
                                      trainable=False)

        # make inputs
        input_dims = FLAGS.nr_boundary_params
        inputs_vector, true_boundary = flow_net.inputs_boundary(
            input_dims, FLAGS.batch_size, shape)

        # create and unrap network
        predicted_boundary = flow_net.inference_boundary(
            FLAGS.batch_size, shape, inputs_vector)

        # calc error
        error = flow_net.loss_boundary(true_boundary, predicted_boundary)

        # train hopefuly
        train_op = flow_net.train(error,
                                  FLAGS.lr,
                                  train_type="boundary_network",
                                  global_step=global_step)

        # List of all Variables
        variables = tf.global_variables()

        # Build a saver
        saver = tf.train.Saver(tf.global_variables())

        # Summary op
        summary_op = tf.summary.merge_all()

        # Build an initialization operation to run below.
        init = tf.global_variables_initializer()

        # Start running operations on the Graph.
        sess = tf.Session()

        # init if this is the very time training
        sess.run(init)

        # init from checkpoint
        variables_to_restore = tf.all_variables()
        variables_to_restore_flow = [
            variable for i, variable in enumerate(variables_to_restore)
            if ("boundary_network" in variable.name[:variable.name.index(':')])
            or ("global_step" in variable.name[:variable.name.index(':')])
        ]
        saver_restore = tf.train.Saver(variables_to_restore_flow)
        ckpt = tf.train.get_checkpoint_state(TRAIN_DIR)
        if ckpt is not None:
            print("init from " + TRAIN_DIR)
            try:
                saver_restore.restore(sess, ckpt.model_checkpoint_path)
            except:
                tf.gfile.DeleteRecursively(TRAIN_DIR)
                tf.gfile.MakeDirs(TRAIN_DIR)
                print(
                    "there was a problem using variables in checkpoint, random init will be used instead"
                )

        # Start que runner
        tf.train.start_queue_runners(sess=sess)

        # Summary op
        graph_def = sess.graph.as_graph_def(add_shapes=True)
        summary_writer = tf.summary.FileWriter(TRAIN_DIR, graph_def=graph_def)

        # make boundary dataset
        dataset = Boundary_data("../../data/",
                                size=FLAGS.obj_size,
                                dim=FLAGS.dims,
                                num_params=input_dims)
        dataset.parse_data()

        # calc number of steps left to run
        run_steps = FLAGS.max_steps - int(sess.run(global_step))
        print(sess.run(global_step))
        for step in xrange(run_steps):
            current_step = sess.run(global_step)
            t = time.time()
            batch_params, batch_boundary = dataset.minibatch(
                batch_size=FLAGS.batch_size,
                signed_distance_function=FLAGS.sdf)
            _, loss_value, gen_boundary = sess.run(
                [train_op, error, predicted_boundary],
                feed_dict={
                    inputs_vector: batch_params,
                    true_boundary: batch_boundary
                })
            elapsed = time.time() - t

            assert not np.isnan(loss_value), 'Model diverged with loss = NaN'

            if current_step % 100 == 0:
                print("loss value at " + str(loss_value))
                print("time per batch is " + str(elapsed))

            if current_step % 1000 == 0:
                summary_str = sess.run(summary_op,
                                       feed_dict={
                                           inputs_vector: batch_params,
                                           true_boundary: batch_boundary
                                       })
                summary_writer.add_summary(summary_str, current_step)
                checkpoint_path = os.path.join(TRAIN_DIR, 'model.ckpt')
                saver.save(sess, checkpoint_path, global_step=global_step)
                print("saved to " + TRAIN_DIR)
def evaluate():
    """Run Eval once.

  Args:
    saver: Saver.
    summary_writer: Summary writer.
    top_k_op: Top K op.
    summary_op: Summary op.
  """

    with tf.Graph().as_default():
        # Make image placeholder
        params_op, params_op_init, params_op_set, squeeze_loss = flow_net.inputs_boundary_learn(
            batch_size, network_type="heat", noise_std=0.01)

        # Make boundary
        boundary = flow_net.inference_boundary(batch_size, 2 * [128],
                                               params_op)

        # predict steady flow on boundary
        predicted_heat = flow_net.inference_network(boundary,
                                                    network_type="heat")

        # loss
        loss = tf.reduce_max(predicted_heat)
        loss += squeeze_loss

        # train_op
        variables_to_train = tf.all_variables()
        variables_to_train = [
            variable for i, variable in enumerate(variables_to_train)
            if "params" in variable.name[:variable.name.index(':')]
        ]
        train_step = flow_net.train(loss,
                                    FLAGS.boundary_learn_lr,
                                    train_type="boundary_params",
                                    variables=variables_to_train)

        # init graph
        init = tf.global_variables_initializer()

        # Restore the moving average version of the learned variables for eval.
        variables_to_restore = tf.all_variables()
        variables_to_restore_boundary = [
            variable for i, variable in enumerate(variables_to_restore)
            if "boundary_network" in variable.name[:variable.name.index(':')]
        ]
        variables_to_restore_heat = [
            variable for i, variable in enumerate(variables_to_restore)
            if "heat_network" in variable.name[:variable.name.index(':')]
        ]
        saver_boundary = tf.train.Saver(variables_to_restore_boundary)
        saver_heat = tf.train.Saver(variables_to_restore_heat)

        # start ses and init
        sess = tf.Session()
        sess.run(init)
        ckpt_boundary = tf.train.get_checkpoint_state(BOUNDARY_DIR)
        ckpt_heat = tf.train.get_checkpoint_state(FLOW_DIR)
        saver_boundary.restore(sess, ckpt_boundary.model_checkpoint_path)
        saver_heat.restore(sess, ckpt_heat.model_checkpoint_path)

        # make graph
        graph_def = tf.get_default_graph().as_graph_def(add_shapes=True)

        # total run time
        run_time = FLAGS.boundary_learn_steps

        # use same start for comparison
        start_params_np = (
            np.random.rand(batch_size, FLAGS.nr_boundary_params) - .5) / 2.0

        # gradient decent
        plot_error_gradient_decent = np.zeros((num_runs, run_time))
        for sim in tqdm(xrange(num_runs)):
            sess.run(params_op_init,
                     feed_dict={params_op_set: start_params_np})
            for i in tqdm(xrange(run_time)):
                plot_error_gradient_decent[sim, i] = run_heat_sink_simulation(
                    np.minimum(np.maximum(sess.run(params_op) - 0.5, -.5),
                               0.5))
                sess.run(train_step, feed_dict={})
        gradient_descent_boundary = heat_sink_boundary_2d(
            sess.run(params_op)[0], [128, 128])

        # simulated annealing
        plot_error_simulated_annealing = np.zeros(
            (len(temps), num_runs, run_time))
        for t in tqdm(xrange(len(temps))):
            for sim in tqdm(xrange(num_runs)):
                temp = temps[t]
                param_old = start_params_np
                fittness_old = run_heat_sink_simulation(param_old)
                param_new = distort_param(start_params_np, std)
                fittness_new = 0.0
                for i in tqdm(xrange(run_time)):
                    plot_error_simulated_annealing[t, sim, i] = fittness_old
                    fittness_new = run_heat_sink_simulation(param_new)
                    param_old, fittness_old, temp = simulated_annealing_step(
                        param_old,
                        fittness_old,
                        param_new,
                        fittness_new,
                        temp=temp)
                    param_new = distort_param(param_old, std)

        simulated_annealing_boundary = heat_sink_boundary_2d(
            param_old[0] + .5, [128, 128])

        x = np.arange(run_time)

        plot_error_gradient_decent_mean, plot_error_gradient_decent_std = calc_mean_and_std(
            plot_error_gradient_decent)

        fig = plt.figure()
        fig.set_size_inches(10, 5)
        a = fig.add_subplot(1, 2, 1)
        plt.imshow((simulated_annealing_boundary -
                    .5 * gradient_descent_boundary)[:, :, 0])
        plt.title("Difference in Heat Sink Design", fontsize=16)
        a = fig.add_subplot(1, 2, 2)
        plt.errorbar(x,
                     plot_error_gradient_decent_mean,
                     yerr=plot_error_gradient_decent_std,
                     lw=1.0,
                     label="Gradient Descent")
        for t in tqdm(xrange(len(temps))):
            plot_error_simulated_annealing_mean, plot_error_simulated_annealing_std = calc_mean_and_std(
                plot_error_simulated_annealing[t])
            plt.errorbar(x,
                         plot_error_simulated_annealing_mean,
                         yerr=plot_error_simulated_annealing_std,
                         lw=1.0,
                         label="Simulated Annealing temp = " + str(temps[t]))
        plt.xlabel('Step')
        plt.ylabel('Temp at Source')
        plt.suptitle("Gradient Descent vs Simulated Annealing", fontsize=20)
        plt.legend(loc="upper_left")
        plt.savefig("./figs/heat_learn_comparison.pdf")
        plt.show()
def evaluate():
    """Run Eval once.

  Args:
    saver: Saver.
    summary_writer: Summary writer.
    top_k_op: Top K op.
    summary_op: Summary op.
  """

    num_angles = 9
    max_angle = 0.30
    min_angle = -0.10
    set_params = np.array(num_angles * [FLAGS.nr_boundary_params * [0.0]])
    set_params[:, :] = 0.0
    set_params_pos = np.array(num_angles * [FLAGS.nr_boundary_params * [0.0]])
    set_params_pos[:, :] = 1.0

    for i in xrange(num_angles):
        set_params[i, 0] = -i
    set_params[:, 0] = ((max_angle - min_angle) *
                        (set_params[:, 0] / (num_angles - 1))) - min_angle

    set_params[:, 1] = 0.5
    set_params[:, 2] = 1.0
    set_params[:, -1] = 0.0

    set_params_pos[:, 0] = 0.0  # set angle to 0.0
    set_params_pos[:, 1] = 0.0  # set n_1 to .5
    set_params_pos[:, 2] = 0.0  # set n_2 to 1.0
    set_params_pos[:, -1] = 0.0  # set tail hieght to 0.0

    with tf.Graph().as_default():
        # Make image placeholder
        params_op, params_op_init, params_op_set, squeeze_loss = flow_net.inputs_boundary_learn(
            batch_size,
            set_params=set_params,
            set_params_pos=set_params_pos,
            noise_std=0.01)

        # Make boundary
        boundary = flow_net.inference_boundary(batch_size *
                                               set_params.shape[0],
                                               FLAGS.dims * [FLAGS.obj_size],
                                               params_op,
                                               full_shape=shape)
        #sharp_boundary =

        # predict steady flow on boundary
        predicted_flow = flow_net.inference_network(boundary)

        # quantities to optimize
        force = calc_force(boundary, predicted_flow[:, :, :, 2:3])
        drag_x = tf.reduce_sum(force[:, :, :, 0], axis=[1, 2]) / batch_size
        drag_y = tf.reduce_sum(force[:, :, :, 1], axis=[1, 2]) / batch_size

        drag_lift_ratio = -(drag_y / drag_x)

        # loss
        loss = -tf.reduce_sum(drag_lift_ratio)
        loss += squeeze_loss

        # train_op
        variables_to_train = tf.all_variables()
        variables_to_train = [
            variable for i, variable in enumerate(variables_to_train)
            if "params" in variable.name[:variable.name.index(':')]
        ]
        train_step = flow_net.train(loss,
                                    FLAGS.boundary_learn_lr,
                                    train_type="boundary_params",
                                    variables=variables_to_train)

        # init graph
        init = tf.global_variables_initializer()

        # Restore the moving average version of the learned variables for eval.
        variables_to_restore = tf.all_variables()
        variables_to_restore_boundary = [
            variable for i, variable in enumerate(variables_to_restore)
            if "boundary_network" in variable.name[:variable.name.index(':')]
        ]
        variables_to_restore_flow = [
            variable for i, variable in enumerate(variables_to_restore)
            if "flow_network" in variable.name[:variable.name.index(':')]
        ]
        saver_boundary = tf.train.Saver(variables_to_restore_boundary)
        saver_flow = tf.train.Saver(variables_to_restore_flow)

        # start ses and init
        sess = tf.Session()
        sess.run(init)
        ckpt_boundary = tf.train.get_checkpoint_state(BOUNDARY_DIR)
        ckpt_flow = tf.train.get_checkpoint_state(FLOW_DIR)
        saver_boundary.restore(sess, ckpt_boundary.model_checkpoint_path)
        saver_flow.restore(sess, ckpt_flow.model_checkpoint_path)

        # make graph
        graph_def = tf.get_default_graph().as_graph_def(add_shapes=True)

        # total run time
        run_time = FLAGS.boundary_learn_steps

        # use same start for comparison
        start_params_np = (
            np.random.rand(batch_size, FLAGS.nr_boundary_params) - .5)

        # gradient decent
        plot_error_gradient_decent = np.zeros((num_runs, run_time))
        for sim in tqdm(xrange(num_runs)):
            sess.run(params_op_init,
                     feed_dict={params_op_set: start_params_np})
            for i in tqdm(xrange(run_time)):
                l, _ = sess.run([loss, train_step], feed_dict={})
                if i == run_time - 1:
                    d_l_ratio = sess.run(drag_lift_ratio)
                plot_error_gradient_decent[sim, i] = np.sum(l)

        # simulated annealing
        plot_error_simulated_annealing = np.zeros(
            (len(temps), num_runs, run_time))
        for t in tqdm(xrange(len(temps))):
            for sim in tqdm(xrange(num_runs)):
                sess.run(params_op_init,
                         feed_dict={params_op_set: start_params_np})
                temp = temps[t]
                param_old = start_params_np
                param_new = distort_param(start_params_np, std)
                fittness_old = sess.run(loss)
                fittness_new = 0.0
                for i in tqdm(xrange(run_time)):
                    sess.run(params_op_init,
                             feed_dict={params_op_set: param_new})
                    fittness_new = sess.run(loss)
                    param_old, fittness_old, temp = simulated_annealing_step(
                        param_old,
                        fittness_old,
                        param_new,
                        fittness_new,
                        temp=temp)
                    param_new = distort_param(param_old, std)
                    plot_error_simulated_annealing[t, sim, i] = fittness_old

        x = np.arange(run_time)

        fig = plt.figure()
        fig.set_size_inches(5, 5)

        plot_error_gradient_decent_mean, plot_error_gradient_decent_std = calc_mean_and_std(
            plot_error_gradient_decent)
        plt.errorbar(x,
                     plot_error_gradient_decent_mean,
                     yerr=plot_error_gradient_decent_std,
                     lw=1.0,
                     label="Gradient Descent")

        for t in tqdm(xrange(len(temps))):
            plot_error_simulated_annealing_mean, plot_error_simulated_annealing_std = calc_mean_and_std(
                plot_error_simulated_annealing[t])
            #plt.errorbar(x, plot_error_simulated_annealing_mean, yerr=plot_error_simulated_annealing_std, c='g', lw=1.0, label="Simulated Annealing temp = " + str(temps[t]))
            plt.errorbar(x,
                         plot_error_simulated_annealing_mean,
                         yerr=plot_error_simulated_annealing_std,
                         lw=1.0,
                         label="Simulated Annealing temp = " + str(temps[t]))

        plt.xlabel('Step')
        plt.ylabel('Loss')
        plt.title("Optimization", fontsize=20)
        plt.legend(loc="upper_left")
        plt.savefig("./figs/learn_comparison.pdf")
        plt.show()
def evaluate():
    """Run Eval once.

  Args:
    saver: Saver.
    summary_writer: Summary writer.
    top_k_op: Top K op.
    summary_op: Summary op.
  """

    num_angles = 9
    max_angle = 0.30
    min_angle = -0.10
    set_params = np.array(num_angles * [FLAGS.nr_boundary_params * [0.0]])
    set_params[:, :] = 0.0
    set_params_pos = np.array(num_angles * [FLAGS.nr_boundary_params * [0.0]])
    set_params_pos[:, :] = 1.0

    for i in xrange(num_angles):
        set_params[i, 0] = -i
    set_params[:, 0] = ((max_angle - min_angle) *
                        (set_params[:, 0] / (num_angles - 1))) - min_angle

    set_params[:, 1] = 0.5
    set_params[:, 2] = 1.0
    set_params[:, -1] = 0.0

    set_params_pos[:, 0] = 0.0  # set angle to 0.0
    set_params_pos[:, 1] = 0.0  # set n_1 to .5
    set_params_pos[:, 2] = 0.0  # set n_2 to 1.0
    set_params_pos[:, -1] = 0.0  # set tail hieght to 0.0

    with tf.Graph().as_default():
        # Make image placeholder
        params_op, params_op_init, params_op_set, squeeze_loss = flow_net.inputs_boundary_learn(
            batch_size,
            set_params=set_params,
            set_params_pos=set_params_pos,
            noise_std=0.01)

        # Make placeholder for flow computed by lattice boltzmann solver
        solver_boundary, solver_flow = flow_net.inputs_flow(
            1, shape, FLAGS.dims)
        sharp_boundary, blaa = flow_net.inputs_flow(
            batch_size * set_params.shape[0], shape, FLAGS.dims)

        # Make boundary
        boundary = flow_net.inference_boundary(batch_size *
                                               set_params.shape[0],
                                               FLAGS.dims * [FLAGS.obj_size],
                                               params_op,
                                               full_shape=shape)

        # predict steady flow on boundary
        predicted_flow = flow_net.inference_network(boundary,
                                                    network_type="flow",
                                                    keep_prob=FLAGS.keep_prob)
        sharp_predicted_flow = flow_net.inference_network(
            sharp_boundary, network_type="flow", keep_prob=FLAGS.keep_prob)

        # quantities to optimize
        force = calc_force(boundary, predicted_flow[..., -1:])
        sharp_force = calc_force(sharp_boundary, sharp_predicted_flow[...,
                                                                      -1:])
        solver_force = calc_force(solver_boundary, solver_flow[..., -1:])
        drag_x = tf.reduce_sum(force[..., 0], axis=[1, 2]) / batch_size
        drag_y = tf.reduce_sum(force[..., 1], axis=[1, 2]) / batch_size
        sharp_drag_x = tf.reduce_sum(sharp_force[..., 0], axis=[1, 2
                                                                ]) / batch_size
        sharp_drag_y = tf.reduce_sum(sharp_force[..., 1], axis=[1, 2
                                                                ]) / batch_size
        solver_drag_x = tf.reduce_sum(solver_force[..., 0],
                                      axis=[1, 2]) / batch_size
        solver_drag_y = tf.reduce_sum(solver_force[..., 1],
                                      axis=[1, 2]) / batch_size

        drag_lift_ratio = -(drag_y / drag_x)
        sharp_drag_lift_ratio = -(sharp_drag_y / sharp_drag_x)
        solver_drag_lift_ratio = -(solver_drag_y / solver_drag_x)

        # loss
        loss = -tf.reduce_sum(drag_lift_ratio)
        #loss = -drag_y + drag_x
        #loss = -tf.reduce_sum(drag_x)
        loss += squeeze_loss

        # train_op
        variables_to_train = tf.all_variables()
        variables_to_train = [
            variable for i, variable in enumerate(variables_to_train)
            if "params" in variable.name[:variable.name.index(':')]
        ]
        train_step = flow_net.train(loss,
                                    FLAGS.boundary_learn_lr,
                                    train_type="boundary_params",
                                    variables=variables_to_train)

        # init graph
        init = tf.global_variables_initializer()

        # Restore the moving average version of the learned variables for eval.
        variables_to_restore = tf.all_variables()
        variables_to_restore_boundary = [
            variable for i, variable in enumerate(variables_to_restore)
            if "boundary_network" in variable.name[:variable.name.index(':')]
        ]
        variables_to_restore_flow = [
            variable for i, variable in enumerate(variables_to_restore)
            if "flow_network" in variable.name[:variable.name.index(':')]
        ]
        saver_boundary = tf.train.Saver(variables_to_restore_boundary)
        saver_flow = tf.train.Saver(variables_to_restore_flow)

        # start ses and init
        sess = tf.Session()
        sess.run(init)
        ckpt_boundary = tf.train.get_checkpoint_state(BOUNDARY_DIR)
        ckpt_flow = tf.train.get_checkpoint_state(FLOW_DIR)
        saver_boundary.restore(sess, ckpt_boundary.model_checkpoint_path)
        saver_flow.restore(sess, ckpt_flow.model_checkpoint_path)

        graph_def = tf.get_default_graph().as_graph_def(add_shapes=True)

        params_np = (np.random.rand(1, FLAGS.nr_boundary_params) - .5)
        #params_np = np.zeros((1,FLAGS.nr_boundary_params-1))

        sess.run(params_op_init, feed_dict={params_op_set: params_np})
        run_time = FLAGS.boundary_learn_steps

        # make store vectors for values
        plot_error = np.zeros((run_time))
        plot_drag_y = np.zeros((run_time))
        plot_drag_x = np.zeros((run_time))

        # make store dir
        os.system("mkdir ./figs/boundary_learn_image_store")
        for i in tqdm(xrange(run_time)):
            l, _, d_y, d_x = sess.run([loss, train_step, drag_y, drag_x],
                                      feed_dict={})
            plot_error[i] = np.sum(l)
            plot_drag_x[i] = np.sum(d_x[fig_pos])
            plot_drag_y[i] = np.sum(d_y[fig_pos])
            if ((i + 1) % 1 == 0) or i == run_time - 1:
                # make video with opencv
                s_params = sess.run(params_op)
                wing_boundary = []
                for p in xrange(s_params.shape[0]):
                    wing_boundary.append(
                        wing_boundary_2d(
                            s_params[p, 0], s_params[p, 1], s_params[p, 2],
                            s_params[p, 3:int((FLAGS.nr_boundary_params - 4) /
                                              2)],
                            s_params[p,
                                     int((FLAGS.nr_boundary_params - 4) /
                                         2):-1], s_params[p, -1],
                            FLAGS.dims * [FLAGS.obj_size]))
                wing_boundary = np.stack(wing_boundary)
                wing_boundary = np.pad(
                    wing_boundary, [[0, 0], [128, 128], [128, 128], [0, 0]],
                    'constant',
                    constant_values=0.0)
                #print(sharp_boundary.get_shape())
                #print(wing_boundary.shape)
                p_flow, p_boundary, d_l_ratio, sharp_d_l_ratio = sess.run(
                    [
                        sharp_predicted_flow, boundary, drag_lift_ratio,
                        sharp_drag_lift_ratio
                    ],
                    feed_dict={sharp_boundary: wing_boundary})

                # save plot image to make video
                p_pressure = p_flow[fig_pos, :, :, 2]
                p_boundary = p_boundary[fig_pos, :, :, 0]
                fig = plt.figure()
                fig.set_size_inches(15, 10)
                a = fig.add_subplot(2, 3, 1)
                plt.imshow(p_pressure)
                plt.title("Pressure", fontsize=16)
                a = fig.add_subplot(2, 3, 2)
                plt.imshow(p_boundary)
                plt.title("Boundary", fontsize=16)
                a = fig.add_subplot(2, 3, 3)
                plt.plot(plot_error, label="Sum(Lift/Drag)")
                plt.xlabel("Step")
                plt.legend()
                a = fig.add_subplot(2, 3, 4)
                plt.plot(-plot_drag_x, label="Drag Angle 0")
                plt.plot(plot_drag_y, label="Lift Angle 0")
                plt.ylim(-1.0, np.max(plot_drag_y) + 2.0)
                plt.xlabel("Step")
                plt.legend()
                a = fig.add_subplot(2, 3, 5)
                plt.plot(-np.degrees(set_params[:, 0]),
                         d_l_ratio,
                         'bo',
                         label="Lift/Drag Network")
                #plt.plot(-np.degrees(set_params[:,0]), sharp_d_l_ratio, 'ro', label="Lift/Drag Sharp")
                #if i == run_time-1:
                #  solver_d_l_ratio = run_flow_solver(sess.run(params_op), solver_boundary, solver_flow, sess, solver_drag_lift_ratio)
                #  plt.plot(-np.degrees(set_params[:,0]), solver_d_l_ratio, 'go', label="Lift/Drag Solver")
                plt.xlabel("Angle of Attack (Degrees)")
                plt.xlim(
                    min(-np.degrees(set_params[:, 0])) - 3,
                    max(-np.degrees(set_params[:, 0])) + 3)
                plt.ylim(np.min(d_l_ratio) - 1, np.max(d_l_ratio) + 2)
                plt.legend()
                plt.suptitle("2D Wing Optimization Using Gradient Descent",
                             fontsize=20)
                plt.savefig("./figs/boundary_learn_image_store/plot_" +
                            str(i).zfill(5) + ".png")
                if run_time - i <= 100:
                    plt.savefig("./figs/" + FLAGS.boundary_learn_loss +
                                "_plot.pdf")
                if i == run_time - 1:
                    plt.savefig("./figs/learn_gradient_descent.pdf")
                    plt.show()
                #plt.show()
                plt.close(fig)

        # generate video of plots
        os.system("rm ./figs/airfoil_2d_video.mp4")
        os.system(
            "cat ./figs/boundary_learn_image_store/*.png | ffmpeg -f image2pipe -r 30 -vcodec png -i - -vcodec libx264 ./figs/airfoil_2d_video.mp4"
        )
        os.system("rm -r ./figs/boundary_learn_image_store")