def evaluate():
    """Run Eval once.
  """
    with tf.Session() as sess:
        # Make image placeholder
        boundary, true_flow = flow_net.inputs_flow(batch_size=batch_size,
                                                   shape=shape,
                                                   dims=FLAGS.dims)

        # Build a Graph that computes the logits predictions from the
        # inference model.
        predicted_flow = flow_net.inference_network(boundary,
                                                    network_type="flow",
                                                    keep_prob=FLAGS.keep_prob)

        # predict force
        if FLAGS.flow_model == "xiao_network":
            sharp_boundary = tf.minimum(
                tf.maximum(tf.round(-boundary + .5), 0.0), 1.0)
        else:
            sharp_boundary = boundary
        predicted_force = calc_force(sharp_boundary, predicted_flow[..., 2:3])
        predicted_drag_x = tf.reduce_sum(predicted_force[..., 0], axis=[1, 2])
        predicted_drag_y = tf.reduce_sum(predicted_force[..., 1], axis=[1, 2])
        true_force = calc_force(sharp_boundary, true_flow[..., 2:3])
        true_drag_x = tf.reduce_sum(true_force[..., 0], axis=[1, 2])
        true_drag_y = tf.reduce_sum(true_force[..., 1], axis=[1, 2])

        # predicted max vel
        predicted_flow_norm = calc_velocity_norm(predicted_flow)
        true_flow_norm = calc_velocity_norm(true_flow)
        predicted_max_vel = tf.reduce_max(predicted_flow_norm, axis=[1, 2])
        true_max_vel = tf.reduce_max(true_flow_norm, axis=[1, 2])

        # Restore for eval
        init = tf.global_variables_initializer()
        sess.run(init)
        variables_to_restore = tf.all_variables()
        variables_to_restore_flow = [
            variable for i, variable in enumerate(variables_to_restore)
            if "flow_network" in variable.name[:variable.name.index(':')]
        ]
        saver = tf.train.Saver(variables_to_restore_flow)
        ckpt = tf.train.get_checkpoint_state(FLOW_DIR)
        saver.restore(sess, ckpt.model_checkpoint_path)
        global_step = 1

        graph_def = tf.get_default_graph().as_graph_def(add_shapes=True)

        # make vtm dataset
        dataset = Sailfish_data("../../data/",
                                size=FLAGS.obj_size,
                                dim=FLAGS.dims)
        dataset.parse_data()
        #dataset.load_data(FLAGS.dims, FLAGS.obj_size)

        # store drag data
        p_drag_x_data = []
        t_drag_x_data = []
        p_drag_y_data = []
        t_drag_y_data = []
        p_max_vel_data = []
        t_max_vel_data = []

        #for run in filenames:
        #for i in tqdm(xrange(60)):
        for i in tqdm(xrange(99)):
            # read in boundary
            batch_boundary, batch_flow = dataset.minibatch(
                train=False,
                batch_size=batch_size,
                signed_distance_function=FLAGS.sdf)

            # calc flow
            p_drag_x, t_drag_x, p_drag_y, t_drag_y, p_max_vel, t_max_vel = sess.run(
                [
                    predicted_drag_x, true_drag_x, predicted_drag_y,
                    true_drag_y, predicted_max_vel, true_max_vel
                ],
                feed_dict={
                    boundary: batch_boundary,
                    true_flow: batch_flow
                })
            #plt.imshow(sess.run(sharp_boundary, feed_dict={boundary: batch_boundary})[0,:,:,0] - batch_boundary[0,:,:,0])
            #plt.imshow(batch_boundary[0,:,:,0])
            #plt.show()
            p_drag_x_data.append(p_drag_x)
            t_drag_x_data.append(t_drag_x)
            p_drag_y_data.append(p_drag_y)
            t_drag_y_data.append(t_drag_y)
            p_max_vel_data.append(p_max_vel)
            t_max_vel_data.append(t_max_vel)

        # display it
        p_drag_x_data = np.concatenate(p_drag_x_data, axis=0)
        t_drag_x_data = np.concatenate(t_drag_x_data, axis=0)
        p_drag_y_data = np.concatenate(p_drag_y_data, axis=0)
        t_drag_y_data = np.concatenate(t_drag_y_data, axis=0)
        p_max_vel_data = np.concatenate(p_max_vel_data, axis=0)
        t_max_vel_data = np.concatenate(t_max_vel_data, axis=0)

        # save it
        np.savez("./figs/store_flow_accuracy_values/" + FLAGS.flow_model,
                 p_drag_x_data=p_drag_x_data,
                 t_drag_x_data=t_drag_x_data,
                 p_drag_y_data=p_drag_x_data,
                 t_drag_y_data=t_drag_x_data,
                 p_max_vel_data=p_max_vel_data,
                 t_max_vel_data=t_max_vel_data)

        fig = plt.figure(figsize=(15, 15))
        a = fig.add_subplot(2, 2, 1)
        font_size_axis = 6
        plt.scatter(t_drag_x_data, p_drag_x_data)
        plt.plot(t_drag_x_data, t_drag_x_data, color="red")
        plt.title("X Force", fontsize=38)
        a = fig.add_subplot(2, 2, 2)
        plt.scatter(t_drag_y_data, p_drag_y_data)
        plt.plot(t_drag_y_data, t_drag_y_data, color="red")
        plt.title("Y Force", fontsize=38)
        a = fig.add_subplot(2, 2, 3)
        plt.scatter(t_max_vel_data, p_max_vel_data)
        plt.plot(t_max_vel_data, t_max_vel_data, color="red")
        plt.title("Max Velocity", fontsize=38)
        plt.ylabel("Predicted", fontsize=26)
        plt.xlabel("True", fontsize=26)
        plt.savefig("./figs/flow_accuracy_2d.pdf")
        plt.show()
def evaluate():
    """Run Eval once.

  Args:
    saver: Saver.
    summary_writer: Summary writer.
    top_k_op: Top K op.
    summary_op: Summary op.
  """

    num_angles = 9
    max_angle = 0.10
    min_angle = -0.30
    set_params = np.array(num_angles * [FLAGS.nr_boundary_params * [0.0]])
    set_params[:, :] = 0.0
    set_params_pos = np.array(num_angles * [FLAGS.nr_boundary_params * [0.0]])
    set_params_pos[:, :] = 1.0

    for i in xrange(num_angles):
        set_params[i, 0] = -i
    set_params[:, 0] = ((max_angle - min_angle) *
                        (set_params[:, 0] / (num_angles - 1))) - min_angle
    print(set_params[:, 0])

    set_params[:, 2] = 0.5
    set_params[:, 3] = 1.0
    set_params[:, -1] = 0.0

    set_params_pos[:, 0] = 0.0  # set angle to 0.0
    set_params_pos[:, 1] = 0.0  # set angle to 0.0
    set_params_pos[:, 2] = 0.0  # set n_1 to .5
    set_params_pos[:, 3] = 0.0  # set n_2 to 1.0
    set_params_pos[:, -1] = 0.0  # set tail hieght to 0.0

    with tf.Graph().as_default():
        # Make image placeholder
        params_op, params_op_init, params_op_set, squeeze_loss = flow_net.inputs_boundary_learn(
            batch_size,
            set_params=set_params,
            set_params_pos=set_params_pos,
            noise_std=0.01)

        # Make placeholder for flow computed by lattice boltzmann solver
        solver_boundary, solver_flow = flow_net.inputs_flow(
            1, shape, FLAGS.dims)
        sharp_boundary, blaa = flow_net.inputs_flow(
            batch_size * set_params.shape[0], shape, FLAGS.dims)

        # Make boundary
        params_op_0, params_op_1, params_op_2 = tf.split(params_op, 3, axis=0)
        with tf.device('/gpu:0'):
            boundary_0 = flow_net.inference_boundary(
                batch_size * int(set_params.shape[0] / 3),
                FLAGS.dims * [FLAGS.obj_size],
                params_op_0,
                full_shape=shape)
        with tf.device('/gpu:1'):
            boundary_1 = flow_net.inference_boundary(
                batch_size * int(set_params.shape[0] / 3),
                FLAGS.dims * [FLAGS.obj_size],
                params_op_1,
                full_shape=shape)
        with tf.device('/gpu:2'):
            boundary_2 = flow_net.inference_boundary(
                batch_size * int(set_params.shape[0] / 3),
                FLAGS.dims * [FLAGS.obj_size],
                params_op_2,
                full_shape=shape)

        # predict steady flow on boundary
        grads = []
        loss_gen = []
        with tf.device('/gpu:0'):
            predicted_flow_0 = flow_net.inference_network(
                boundary_0, network_type="flow", keep_prob=FLAGS.keep_prob)
            force_0 = calc_force(boundary_0, predicted_flow_0[..., -1:])
            drag_x_0 = tf.reduce_sum(force_0[..., 0], axis=[1, 2, 3
                                                            ]) / batch_size
            drag_y_0 = tf.reduce_sum(force_0[..., 1], axis=[1, 2, 3
                                                            ]) / batch_size
            drag_z_0 = tf.reduce_sum(force_0[..., 2], axis=[1, 2, 3
                                                            ]) / batch_size
            drag_lift_ratio_0 = -(drag_x_0 / drag_z_0)
            loss_0 = -tf.reduce_sum(drag_lift_ratio_0)
            loss_0 += squeeze_loss
            variables_to_train = tf.all_variables()
            variables_to_train = [
                variable for i, variable in enumerate(variables_to_train)
                if "params" in variable.name[:variable.name.index(':')]
            ]
            loss_gen.append(loss_0)
            # store grads
            grads.append(tf.gradients(loss_gen[0], variables_to_train))
        with tf.device('/gpu:1'):
            predicted_flow_1 = flow_net.inference_network(
                boundary_1, network_type="flow", keep_prob=FLAGS.keep_prob)
            force_1 = calc_force(boundary_1, predicted_flow_1[..., -1:])
            drag_x_1 = tf.reduce_sum(force_1[..., 0], axis=[1, 2, 3
                                                            ]) / batch_size
            drag_y_1 = tf.reduce_sum(force_1[..., 1], axis=[1, 2, 3
                                                            ]) / batch_size
            drag_z_1 = tf.reduce_sum(force_1[..., 2], axis=[1, 2, 3
                                                            ]) / batch_size
            drag_lift_ratio_1 = -(drag_x_1 / drag_z_1)
            loss_1 = -tf.reduce_sum(drag_lift_ratio_1)
            loss_1 += squeeze_loss
            variables_to_train = tf.all_variables()
            variables_to_train = [
                variable for i, variable in enumerate(variables_to_train)
                if "params" in variable.name[:variable.name.index(':')]
            ]
            loss_gen.append(loss_1)
            # store grads
            grads.append(tf.gradients(loss_gen[1], variables_to_train))
        with tf.device('/gpu:2'):
            predicted_flow_2 = flow_net.inference_network(
                boundary_2, network_type="flow", keep_prob=FLAGS.keep_prob)
            force_2 = calc_force(boundary_2, predicted_flow_2[..., -1:])
            drag_x_2 = tf.reduce_sum(force_2[..., 0], axis=[1, 2, 3
                                                            ]) / batch_size
            drag_y_2 = tf.reduce_sum(force_2[..., 1], axis=[1, 2, 3
                                                            ]) / batch_size
            drag_z_2 = tf.reduce_sum(force_2[..., 2], axis=[1, 2, 3
                                                            ]) / batch_size
            drag_lift_ratio_2 = -(drag_x_2 / drag_z_2)
            loss_2 = -tf.reduce_sum(drag_lift_ratio_2)
            loss_2 += squeeze_loss
            variables_to_train = tf.all_variables()
            variables_to_train = [
                variable for i, variable in enumerate(variables_to_train)
                if "params" in variable.name[:variable.name.index(':')]
            ]
            loss_gen.append(loss_2)
            # store grads
            grads.append(tf.gradients(loss_gen[2], variables_to_train))

        # store up the loss and gradients on gpu:0
        with tf.device('/gpu:0'):
            for i in range(1, 3):
                loss_gen[0] += loss_gen[i]
                for j in range(len(grads[0])):
                    grads[0][j] += grads[i][j]

            train_step = tf.group(
                adam_updates(variables_to_train,
                             grads[0],
                             lr=FLAGS.boundary_learn_lr,
                             mom1=0.95,
                             mom2=0.9995))

        #with tf.device('/cpu:0'):
        #  sharp_predicted_flow = flow_net.inference_network(sharp_boundary, network_type="flow", keep_prob=FLAGS.keep_prob)

        # quantities to optimize
        #force = calc_force(boundary, predicted_flow[...,-1:])
        #sharp_force = calc_force(sharp_boundary, sharp_predicted_flow[...,-1:])
        #solver_force = calc_force(solver_boundary, solver_flow[...,-1:])
        #drag_x = tf.reduce_sum(force[...,0], axis=[1,2,3])/batch_size
        #drag_y = tf.reduce_sum(force[...,1], axis=[1,2,3])/batch_size
        #drag_z = tf.reduce_sum(force[...,2], axis=[1,2,3])/batch_size
        """
    sharp_drag_x = tf.reduce_sum(sharp_force[...,0], axis=[1,2,3])/batch_size
    sharp_drag_y = tf.reduce_sum(sharp_force[...,1], axis=[1,2,3])/batch_size
    sharp_drag_z = tf.reduce_sum(sharp_force[...,2], axis=[1,2,3])/batch_size
    solver_drag_x = tf.reduce_sum(solver_force[...,0], axis=[1,2,3])/batch_size
    solver_drag_y = tf.reduce_sum(solver_force[...,1], axis=[1,2,3])/batch_size
    solver_drag_z = tf.reduce_sum(solver_force[...,2], axis=[1,2,3])/batch_size
    """

        #drag_x = tf.concat([drag_x_0, drag_x_1, drag_x_2], axis=0)
        #drag_y = tf.concat([drag_y_0, drag_y_1, drag_y_2], axis=0)
        #drag_z = tf.concat([drag_z_0, drag_z_1, drag_z_2], axis=0)
        #drag_lift_ratio        = -(drag_x/drag_z)
        #sharp_drag_lift_ratio  = -(sharp_drag_x/sharp_drag_z)
        #solver_drag_lift_ratio = -(solver_drag_x/solver_drag_z)

        # loss
        #loss = - tf.abs(tf.constant([-25.0, 100.0, 200.0]) - drag_x) - drag_z
        #loss = drag_x - drag_z/2.0
        #loss = -tf.reduce_sum(drag_lift_ratio)
        #loss += squeeze_loss

        # train_op
        #variables_to_train = tf.all_variables()
        #variables_to_train = [variable for i, variable in enumerate(variables_to_train) if "params" in variable.name[:variable.name.index(':')]]
        #train_step = flow_net.train(loss, FLAGS.boundary_learn_lr, train_type="boundary_params", variables=variables_to_train)

        # init graph
        init = tf.global_variables_initializer()

        # Restore the moving average version of the learned variables for eval.
        variables_to_restore = tf.all_variables()
        variables_to_restore_boundary = [
            variable for i, variable in enumerate(variables_to_restore)
            if "boundary_network" in variable.name[:variable.name.index(':')]
        ]
        variables_to_restore_flow = [
            variable for i, variable in enumerate(variables_to_restore)
            if "flow_network" in variable.name[:variable.name.index(':')]
        ]
        saver_boundary = tf.train.Saver(variables_to_restore_boundary)
        saver_flow = tf.train.Saver(variables_to_restore_flow)

        # start ses and init
        sess = tf.Session()
        sess.run(init)
        ckpt_boundary = tf.train.get_checkpoint_state(BOUNDARY_DIR)
        ckpt_flow = tf.train.get_checkpoint_state(FLOW_DIR)
        saver_boundary.restore(sess, ckpt_boundary.model_checkpoint_path)
        saver_flow.restore(sess, ckpt_flow.model_checkpoint_path)

        graph_def = tf.get_default_graph().as_graph_def(add_shapes=True)

        params_np = (np.random.rand(1, FLAGS.nr_boundary_params) - .5) / 2.0
        #params_np = (np.random.rand(1,FLAGS.nr_boundary_params) - .5)/2.0
        #params_np = np.zeros((1,FLAGS.nr_boundary_params-1))

        sess.run(params_op_init, feed_dict={params_op_set: params_np})
        run_time = FLAGS.boundary_learn_steps

        # make store vectors for values
        plot_error = np.zeros((run_time))
        plot_drag_x = np.zeros((run_time))
        plot_drag_y = np.zeros((run_time))
        plot_drag_z = np.zeros((run_time))

        # make store dir
        os.system("mkdir ./figs/boundary_learn_image_store")
        for i in tqdm(xrange(run_time)):
            #l_0, _ = sess.run([loss_0, train_step_0], feed_dict={})
            l, _, d_x, d_y, d_z = sess.run(
                [loss_gen[0], train_step, drag_x_2, drag_y_2, drag_z_2],
                feed_dict={})
            #l_2, _ = sess.run([loss_2, train_step_2], feed_dict={})
            plot_error[i] = np.sum(l)
            plot_drag_x[i] = np.sum(d_x[fig_pos])
            plot_drag_y[i] = np.sum(d_y[fig_pos])
            plot_drag_z[i] = np.sum(d_z[fig_pos])
            if ((i + 1) % 20 == 0) or i == run_time - 1:
                # make video with opencv
                """
        s_params = sess.run(params_op)
        wing_boundary = []
        for p in xrange(s_params.shape[0]):
          wing_boundary.append(wing_boundary_3d(s_params[p,0], s_params[p,1], s_params[p,2], 
                                                s_params[p,3], s_params[p,4], s_params[p,5],
                                                s_params[p,6:int((FLAGS.nr_boundary_params-7)/3+6)],
                                                s_params[p,int((FLAGS.nr_boundary_params-7)/3+6):int(2*(FLAGS.nr_boundary_params-7)/3+6)],
                                                s_params[p,int(2*(FLAGS.nr_boundary_params-7)/3+6):-1],
                                                s_params[p,-1], FLAGS.dims*[FLAGS.obj_size]))
        wing_boundary = np.stack(wing_boundary)
        wing_boundary = np.pad(wing_boundary, [[0,0],[24,24],[24,24],[24,24],[0,0]], 'constant', constant_values=0.0)
        #print(sharp_boundary.get_shape())
        #print(wing_boundary.shape)
        p_flow, p_boundary, d_l_ratio, sharp_d_l_ratio = sess.run([sharp_predicted_flow, boundary, drag_lift_ratio, sharp_drag_lift_ratio],feed_dict={sharp_boundary: wing_boundary})
        """
                p_flow, p_boundary, d_l_ratio_0, d_l_ratio_1, d_l_ratio_2 = sess.run(
                    [
                        predicted_flow_2, boundary_2, drag_lift_ratio_0,
                        drag_lift_ratio_1, drag_lift_ratio_2
                    ])
                d_l_ratio = np.concatenate(
                    [d_l_ratio_0, d_l_ratio_1, d_l_ratio_2], axis=0)

                # save plot image to make video
                #p_pressure = p_flow[fig_pos,:,:,72,2]
                p_boundary = np.concatenate([
                    p_boundary[fig_pos, :, 71, :, 0], p_boundary[fig_pos, :, :,
                                                                 71, 0]
                ],
                                            axis=0)
                p_pressure = np.concatenate([
                    p_flow[fig_pos, :, 71, :, 3], p_flow[fig_pos, :, :, 71, 3]
                ],
                                            axis=0)
                #p_pressure = p_flow[:,:,76,:,3].reshape((p_boundary.shape[0]*p_boundary.shape[1], p_boundary.shape[2]))
                #p_boundary = p_boundary[:,:,76,:,0].reshape((p_boundary.shape[0]*p_boundary.shape[1], p_boundary.shape[2]))

                fig = plt.figure()
                fig.set_size_inches(20, 5)
                #a = fig.add_subplot(1,5,1)
                #plt.imshow(p_pressure)
                a = fig.add_subplot(1, 4, 1)
                plt.imshow(p_boundary)
                a = fig.add_subplot(1, 4, 2)
                plt.plot(plot_error, label="Sum(Lift/Drag)")
                plt.xlabel("Step")
                plt.legend()
                a = fig.add_subplot(1, 4, 3)
                plt.plot(plot_drag_x, label="Lift Angle 0")
                plt.plot(-plot_drag_z, label="Drag Angle 0")
                plt.ylim(0.0, np.max(plot_drag_x) + 30.0)
                plt.xlabel("Step")
                plt.legend()
                a = fig.add_subplot(1, 4, 4)
                plt.plot(np.degrees(set_params[:, 0]),
                         d_l_ratio,
                         'bo',
                         label="Lift/Drag Network")
                #plt.plot(-np.degrees(set_params[3:6,0]), sharp_d_l_ratio, 'ro', label="Lift/Drag Sharp")
                #if i == run_time-1:
                #  solver_d_l_ratio = run_flow_solver(sess.run(params_op), solver_boundary, solver_flow, sess, solver_drag_lift_ratio)
                #  plt.plot(-np.degrees(set_params[:,0]), solver_d_l_ratio, 'go', label="Lift/Drag Solver")
                plt.xlabel("Angle of Attack (Degrees)")
                plt.xlim(
                    min(np.degrees(set_params[:, 0])) - 3,
                    max(np.degrees(set_params[:, 0])) + 3)
                plt.ylim(np.min(d_l_ratio) - 1.0, np.max(d_l_ratio) + 1.0)
                plt.legend()
                plt.suptitle("3D Wing Optimization Using Gradient Descent",
                             fontsize=20)
                plt.savefig("./figs/boundary_learn_image_store/plot_" +
                            str(i).zfill(5) + ".png")
                if run_time - i <= 100:
                    plt.savefig("./figs/" + FLAGS.boundary_learn_loss +
                                "_plot.png")
                if i == run_time - 1:
                    plt.savefig("./figs/learn_gradient_descent.pdf")
                    plt.show()
                plt.show()
                plt.close(fig)

        np.save("figs/3d_wing_params_op", sess.run(params_op[6]))
        print(sess.run(params_op[6]))
Esempio n. 3
0
def evaluate():
    """Run Eval once.

  Args:
    saver: Saver.
    summary_writer: Summary writer.
    top_k_op: Top K op.
    summary_op: Summary op.
  """
    with tf.Graph().as_default():
        # Make image placeholder
        inputs_vector, true_boundary = flow_net.inputs_boundary(
            FLAGS.nr_boundary_params, batch_size, shape)

        # Build a Graph that computes the logits predictions from the
        # inference model.
        #inputs_vector_noise = inputs_vector + tf.random_normal(shape=tf.shape(inputs_vector), mean=0.0, stddev=0.0001, dtype=tf.float32)
        boundary = flow_net.inference_boundary(1,
                                               FLAGS.dims * [FLAGS.obj_size],
                                               inputs=inputs_vector,
                                               full_shape=shape)
        #boundary = tf.round(boundary)
        predicted_flow = flow_net.inference_network(boundary, keep_prob=1.0)

        # quantities to optimize
        force = calc_force(boundary, predicted_flow[:, :, :, 2:3])
        drag_x = tf.reduce_sum(force[:, :, :, 0])
        drag_y = tf.reduce_sum(force[:, :, :, 1])
        drag_ratio = (drag_y / drag_x)

        # init graph
        init = tf.global_variables_initializer()

        # Restore the moving average version of the learned variables for eval.
        variables_to_restore = tf.all_variables()
        variables_to_restore_boundary = [
            variable for i, variable in enumerate(variables_to_restore)
            if "boundary_network" in variable.name[:variable.name.index(':')]
        ]
        variables_to_restore_flow = [
            variable for i, variable in enumerate(variables_to_restore)
            if "flow_network" in variable.name[:variable.name.index(':')]
        ]
        saver_boundary = tf.train.Saver(variables_to_restore_boundary)
        saver_flow = tf.train.Saver(variables_to_restore_flow)

        # start ses and init
        sess = tf.Session()
        sess.run(init)
        ckpt_boundary = tf.train.get_checkpoint_state(BOUNDARY_DIR)
        ckpt_flow = tf.train.get_checkpoint_state(FLOW_DIR)
        saver_boundary.restore(sess, ckpt_boundary.model_checkpoint_path)
        saver_flow.restore(sess, ckpt_flow.model_checkpoint_path)

        graph_def = tf.get_default_graph().as_graph_def(add_shapes=True)

        params_np = get_random_params(FLAGS.nr_boundary_params, 2)
        params_np = np.expand_dims(params_np, axis=0)
        params_np[0, 0] = 0.0
        params_np[0, 1] = 0.5
        params_np[0, 2] = 1.0
        params_np[0, 4] = 0.0

        # make store vectors for values
        resolution = 320
        loss_val = np.zeros((resolution))
        max_d_ratio = np.zeros((resolution))
        d_ratio_store = None
        boundary_frame_store = []
        store_freq = int(resolution / nr_frame_saves)

        # make store dir
        for i in tqdm(xrange(resolution)):
            params_np[0, 4] += (0.3) / resolution
            velocity_norm_g = sess.run(drag_ratio,
                                       feed_dict={
                                           inputs_vector:
                                           np.concatenate(batch_size *
                                                          [params_np],
                                                          axis=0)
                                       })
            if i % store_freq == 0:
                boundary_frame_store.append(
                    sess.run(
                        boundary,
                        feed_dict={
                            inputs_vector:
                            np.concatenate(batch_size * [params_np], axis=0)
                        })[0,
                           int(FLAGS.obj_size / 2):int(3 * FLAGS.obj_size / 2),
                           int(FLAGS.obj_size / 2):int(3 * FLAGS.obj_size / 2),
                           0])
            loss_val[i] = velocity_norm_g

        fig = plt.figure(figsize=(10, 5))
        a = fig.add_subplot(1, 2, 1)
        plt.title("Boundary from Parameter Change", fontsize=16)
        boundary_frame_store = tile_frames(boundary_frame_store)
        plt.imshow(boundary_frame_store)
        #plt.tick_params(axis='both', top="off", bottom="off")
        plt.axis('off')
        a = fig.add_subplot(1, 2, 2)
        #plt.imshow(np.concatenate(boundary_frame_store, axis = 0))
        plt.plot(np.arange(resolution) / float(resolution) - .5, loss_val)
        plt.ylabel("Lift/Drag")
        plt.xlabel("Parameter Value")
        plt.title("Loss vs Parameter Value", fontsize=16)
        plt.savefig("./figs/boundary_space_explore.pdf")
        plt.show()
def evaluate():
    """Run Eval once.
  """
    with tf.Session() as sess:
        # Make image placeholder
        boundary, true_flow = flow_net.inputs_flow(batch_size=batch_size,
                                                   shape=shape,
                                                   dims=FLAGS.dims)

        # Build a Graph that computes the logits predictions from the
        # inference model.
        predicted_flow = flow_net.inference_network(boundary)

        # predict force
        predicted_force = calc_force(boundary, predicted_flow[..., 3:4])
        predicted_drag_x = tf.reduce_sum(predicted_force[..., 0],
                                         axis=[1, 2, 3])
        predicted_drag_y = tf.reduce_sum(predicted_force[..., 1],
                                         axis=[1, 2, 3])
        predicted_drag_z = tf.reduce_sum(predicted_force[..., 2],
                                         axis=[1, 2, 3])
        true_force = calc_force(boundary, true_flow[..., 3:4])
        true_drag_x = tf.reduce_sum(true_force[..., 0], axis=[1, 2, 3])
        true_drag_y = tf.reduce_sum(true_force[..., 1], axis=[1, 2, 3])
        true_drag_z = tf.reduce_sum(true_force[..., 2], axis=[1, 2, 3])

        # predicted max vel
        predicted_max_vel_x = tf.reduce_max(predicted_flow[..., 0],
                                            axis=[1, 2, 3])
        predicted_max_vel_y = tf.reduce_max(predicted_flow[..., 1],
                                            axis=[1, 2, 3])
        predicted_max_vel_z = tf.reduce_max(predicted_flow[..., 2],
                                            axis=[1, 2, 3])
        true_max_vel_x = tf.reduce_max(true_flow[..., 0], axis=[1, 2, 3])
        true_max_vel_y = tf.reduce_max(true_flow[..., 1], axis=[1, 2, 3])
        true_max_vel_z = tf.reduce_max(true_flow[..., 2], axis=[1, 2, 3])

        # Restore for eval
        init = tf.global_variables_initializer()
        sess.run(init)
        variables_to_restore = tf.all_variables()
        variables_to_restore_flow = [
            variable for i, variable in enumerate(variables_to_restore)
            if "flow_network" in variable.name[:variable.name.index(':')]
        ]
        saver = tf.train.Saver(variables_to_restore_flow)
        ckpt = tf.train.get_checkpoint_state(FLOW_DIR)
        saver.restore(sess, ckpt.model_checkpoint_path)
        global_step = 1

        graph_def = tf.get_default_graph().as_graph_def(add_shapes=True)

        # make vtm dataset
        dataset = Sailfish_data("../../data/",
                                size=FLAGS.obj_size,
                                dim=FLAGS.dims)
        dataset.parse_data()

        # store drag data
        p_drag_x_data = []
        t_drag_x_data = []
        p_drag_y_data = []
        t_drag_y_data = []
        p_drag_z_data = []
        t_drag_z_data = []
        p_max_vel_x_data = []
        t_max_vel_x_data = []
        p_max_vel_y_data = []
        t_max_vel_y_data = []
        p_max_vel_z_data = []
        t_max_vel_z_data = []

        #for run in filenames:
        for i in tqdm(xrange(80)):
            # read in boundary
            batch_boundary, batch_flow = dataset.minibatch(
                train=False,
                batch_size=batch_size,
                signed_distance_function=FLAGS.sdf)

            # calc flow
            p_drag_x, t_drag_x, p_drag_y, t_drag_y, p_drag_z, t_drag_z, p_max_vel_x, t_max_vel_x, p_max_vel_y, t_max_vel_y, p_max_vel_z, t_max_vel_z = sess.run(
                [
                    predicted_drag_x, true_drag_x, predicted_drag_y,
                    true_drag_y, predicted_drag_z, true_drag_z,
                    predicted_max_vel_x, true_max_vel_x, predicted_max_vel_y,
                    true_max_vel_y, predicted_max_vel_z, true_max_vel_z
                ],
                feed_dict={
                    boundary: batch_boundary,
                    true_flow: batch_flow
                })
            p_drag_x_data.append(p_drag_x)
            t_drag_x_data.append(t_drag_x)
            p_drag_y_data.append(p_drag_y)
            t_drag_y_data.append(t_drag_y)
            p_drag_z_data.append(p_drag_z)
            t_drag_z_data.append(t_drag_z)
            p_max_vel_x_data.append(p_max_vel_x)
            t_max_vel_x_data.append(t_max_vel_x)
            p_max_vel_y_data.append(p_max_vel_y)
            t_max_vel_y_data.append(t_max_vel_y)
            p_max_vel_z_data.append(p_max_vel_z)
            t_max_vel_z_data.append(t_max_vel_z)

        # display it
        p_drag_x_data = np.concatenate(p_drag_x_data, axis=0)
        t_drag_x_data = np.concatenate(t_drag_x_data, axis=0)
        p_drag_y_data = np.concatenate(p_drag_y_data, axis=0)
        t_drag_y_data = np.concatenate(t_drag_y_data, axis=0)
        p_drag_z_data = np.concatenate(p_drag_z_data, axis=0)
        t_drag_z_data = np.concatenate(t_drag_z_data, axis=0)
        p_max_vel_x_data = np.concatenate(p_max_vel_x_data, axis=0)
        t_max_vel_x_data = np.concatenate(t_max_vel_x_data, axis=0)
        p_max_vel_y_data = np.concatenate(p_max_vel_y_data, axis=0)
        t_max_vel_y_data = np.concatenate(t_max_vel_y_data, axis=0)
        p_max_vel_z_data = np.concatenate(p_max_vel_z_data, axis=0)
        t_max_vel_z_data = np.concatenate(t_max_vel_z_data, axis=0)
        fig = plt.figure(figsize=(18, 3))
        a = fig.add_subplot(1, 6, 1)
        plt.scatter(p_drag_x_data, t_drag_x_data)
        plt.plot(t_drag_x_data, t_drag_x_data, color="red")
        plt.title("X Force")
        a = fig.add_subplot(1, 6, 2)
        plt.scatter(p_drag_y_data, t_drag_y_data)
        plt.plot(t_drag_y_data, t_drag_y_data, color="red")
        plt.title("Y Force")
        a = fig.add_subplot(1, 6, 3)
        plt.scatter(p_drag_z_data, t_drag_z_data)
        plt.plot(t_drag_z_data, t_drag_z_data, color="red")
        plt.title("Z Force")
        a = fig.add_subplot(1, 6, 4)
        plt.scatter(p_max_vel_x_data, t_max_vel_x_data)
        plt.plot(t_max_vel_x_data, t_max_vel_x_data, color="red")
        plt.title("Max X Velocity")
        a = fig.add_subplot(1, 6, 5)
        plt.scatter(p_max_vel_y_data, t_max_vel_y_data)
        plt.plot(t_max_vel_y_data, t_max_vel_y_data, color="red")
        plt.title("Max Y Velocity")
        a = fig.add_subplot(1, 6, 6)
        plt.scatter(p_max_vel_z_data, t_max_vel_z_data)
        plt.plot(t_max_vel_z_data, t_max_vel_z_data, color="red")
        plt.title("Max Z Velocity")
        plt.savefig("./figs/flow_accuracy_3d.jpeg")
        plt.show()
def evaluate():
  """Run Eval once.

  Args:
    saver: Saver.
    summary_writer: Summary writer.
    top_k_op: Top K op.
    summary_op: Summary op.
  """
  num_angles = 4
  max_angle =  0.2
  min_angle = -0.1
  set_params          = np.array(num_angles*[FLAGS.nr_boundary_params*[0.0]])
  set_params[:,:]     = 0.0
  set_params_pos      = np.array(num_angles*[FLAGS.nr_boundary_params*[0.0]])
  set_params_pos[:,:] = 1.0

  for i in xrange(num_angles):
    set_params[i,0]      = -i 
  set_params[:,0] = ((max_angle - min_angle) * (set_params[:,0]/num_angles)) - min_angle

  set_params[:,1]      = 0.5
  set_params[:,2]      = 1.0
  set_params[:,-1]     = 0.0

  set_params_pos[:,0]  = 0.0 # set angle to 0.0
  set_params_pos[:,1]  = 0.0 # set n_1 to .5
  set_params_pos[:,2]  = 0.0 # set n_2 to 1.0
  set_params_pos[:,-1] = 0.0 # set tail hieght to 0.0

  with tf.Graph().as_default():
    # Make image placeholder
    params_op, params_op_init, params_op_set, squeeze_loss = flow_net.inputs_boundary_learn(batch_size, set_params=set_params, set_params_pos=set_params_pos, noise_std=0.001)

    # Make boundary
    boundary = flow_net.inference_boundary(batch_size*set_params.shape[0], FLAGS.dims*[FLAGS.obj_size], params_op, full_shape=shape)
    sharp_boundary = tf.round(boundary)

    # predict steady flow on boundary
    predicted_flow = flow_net.inference_network(boundary)
    predicted_sharp_flow = flow_net.inference_network(sharp_boundary)

    # quantities to optimize
    force = calc_force(boundary, predicted_flow[:,:,:,2:3])
    sharp_force = calc_force(sharp_boundary, predicted_sharp_flow[:,:,:,2:3])
    drag_x = tf.reduce_sum(force[:,:,:,0], axis=[1,2])/batch_size
    drag_y = tf.reduce_sum(force[:,:,:,1], axis=[1,2])/batch_size
    sharp_drag_x = tf.reduce_sum(sharp_force[:,:,:,0], axis=[1,2])/batch_size
    sharp_drag_y = tf.reduce_sum(sharp_force[:,:,:,1], axis=[1,2])/batch_size
    
    drag_lift_ratio = (drag_x/drag_y)
    sharp_drag_lift_ratio = (sharp_drag_x/sharp_drag_y)

    # loss
    loss = -tf.reduce_sum(drag_lift_ratio)

    # init graph
    init = tf.global_variables_initializer()

    # Restore the moving average version of the learned variables for eval.
    variables_to_restore = tf.all_variables()
    variables_to_restore_boundary = [variable for i, variable in enumerate(variables_to_restore) if "boundary_network" in variable.name[:variable.name.index(':')]]
    variables_to_restore_flow = [variable for i, variable in enumerate(variables_to_restore) if "flow_network" in variable.name[:variable.name.index(':')]]
    saver_boundary = tf.train.Saver(variables_to_restore_boundary)
    saver_flow = tf.train.Saver(variables_to_restore_flow)

    # start ses and init
    sess = tf.Session()
    sess.run(init)
    ckpt_boundary = tf.train.get_checkpoint_state(BOUNDARY_DIR)
    ckpt_flow = tf.train.get_checkpoint_state(FLOW_DIR)
    saver_boundary.restore(sess, ckpt_boundary.model_checkpoint_path)
    saver_flow.restore(sess, ckpt_flow.model_checkpoint_path)
    
    graph_def = tf.get_default_graph().as_graph_def(add_shapes=True)

    params_np = (np.random.rand(1,FLAGS.nr_boundary_params) - .5)
 
    sess.run(params_op_init, feed_dict={params_op_set: params_np})
    run_time = FLAGS.boundary_learn_steps

    # make store vectors for values
    plot_error = np.zeros((run_time))
    plot_drag_y = np.zeros((run_time))
    plot_drag_x = np.zeros((run_time))

    # make store dir
    os.system("mkdir ./figs/boundary_learn_image_store")

    # simulated annealing params
    temp = 0.1
    param_old = params_np 
    param_new = distort_param(params_np, std)
    fittness_old = sess.run(loss)
    fittness_new = 0.0

    for i in tqdm(xrange(run_time)):
      sess.run(params_op_init, feed_dict={params_op_set: param_new})
      fittness_new = sess.run(loss)
      print(fittness_new)
      print(fittness_old)
      param_old, fittness_old, temp = simulated_annealing_step(param_old, fittness_old, param_new, fittness_new, temp=temp)
      print(temp)
      param_new = distort_param(param_old, std)

      l, d_y, d_x, p_o = sess.run([loss, sharp_drag_y, sharp_drag_x, params_op], feed_dict={})
      plot_error[i] = np.sum(l)
      plot_drag_x[i] = np.sum(d_x[2])
      plot_drag_y[i] = np.sum(d_y[2])
      if (i+1) % 400 == 0:
        # make video with opencv
        velocity_norm_g, boundary_g = sess.run([predicted_sharp_flow, sharp_boundary],feed_dict={})
        d_y, d_x, l_c, p_o = sess.run([sharp_drag_y, sharp_drag_x, sharp_drag_lift_ratio, params_op], feed_dict={})
        #velocity_norm_g, boundary_g = sess.run([force, boundary],feed_dict={})
        #sflow_plot = np.concatenate([ 5.0*velocity_norm_g[0], boundary_g[0]], axis=1)
        #sflow_plot = np.uint8(grey_to_short_rainbow(sflow_plot))
        #sflow_plot = cv2.applyColorMap(sflow_plot
        #video.write(sflow_plot)
    
        # save plot image to make video
        velocity_norm_g = velocity_norm_g[2,:,:,2]
        boundary_g = boundary_g[2,:,:,0]
        fig = plt.figure()
        fig.set_size_inches(15.5, 7.5)
        a = fig.add_subplot(1,5,1)
        plt.imshow(velocity_norm_g)
        a = fig.add_subplot(1,5,2)
        plt.imshow(boundary_g)
        a = fig.add_subplot(1,5,3)
        plt.plot(plot_error, label="lift/drag")
        plt.xlabel("step")
        plt.legend()
        a = fig.add_subplot(1,5,4)
        plt.plot(plot_drag_x, label="drag_x")
        plt.plot(plot_drag_y, label="drag_y")
        plt.xlabel("step")
        plt.legend()
        a = fig.add_subplot(1,5,5)
        plt.plot(set_params[:,0], l_c, 'bo', label="lift/drag")
        plt.xlabel("angle of attack")
        plt.xlim(min(set_params[:,0])-0.03, max(set_params[:,0])+0.03)
        #plt.legend()
        plt.suptitle("Using Gradient Decent")
        plt.savefig("./figs/boundary_learn_image_store/plot_" + str(i).zfill(5) + ".png")
        if run_time - i <= 100:
          plt.savefig("./figs/" + FLAGS.boundary_learn_loss + "_plot.png")
          #plt.show()
        plt.close(fig)

    # close cv video
    video.release()
    cv2.destroyAllWindows()

    # generate video of plots
    os.system("rm ./figs/" + FLAGS.boundary_learn_loss + "_plot_video.mp4")
    os.system("cat ./figs/boundary_learn_image_store/*.png | ffmpeg -f image2pipe -r 30 -vcodec png -i - -vcodec libx264 ./figs/" + FLAGS.boundary_learn_loss + "_plot_video.mp4")
    os.system("rm -r ./figs/boundary_learn_image_store")
def evaluate():
    """Run Eval once.

  Args:
    saver: Saver.
    summary_writer: Summary writer.
    top_k_op: Top K op.
    summary_op: Summary op.
  """
    num_angles = 1
    max_angle = 0.0
    min_angle = 0.0
    set_params = np.array(num_angles * [FLAGS.nr_boundary_params * [0.0]])
    set_params[:, :] = 0.0
    set_params_pos = np.array(num_angles * [FLAGS.nr_boundary_params * [0.0]])
    set_params_pos[:, :] = 1.0

    for i in xrange(num_angles):
        set_params[i, 0] = -i
    set_params[:, 0] = ((max_angle - min_angle) *
                        (set_params[:, 0] / num_angles)) - min_angle

    set_params[:, 1] = 0.5
    set_params[:, 2] = 1.0
    set_params[:, -1] = 0.0

    set_params_pos[:, 0] = 0.0  # set angle to 0.0
    set_params_pos[:, 1] = 0.0  # set n_1 to .5
    set_params_pos[:, 2] = 0.0  # set n_2 to 1.0
    set_params_pos[:, -1] = 0.0  # set tail hieght to 0.0

    with tf.Graph().as_default():
        # Make vector placeholder
        params_op, params_op_init, params_op_set, squeeze_loss = flow_net.inputs_boundary_learn(
            batch_size, set_params=set_params, set_params_pos=set_params_pos)

        # Compute boundary
        boundary = flow_net.inference_boundary(batch_size * num_angles,
                                               FLAGS.dims * [FLAGS.obj_size],
                                               params_op,
                                               full_shape=shape)
        boundary = tf.round(boundary)

        # Build a Graph that computes the logits predictions from the
        # inference model.
        predicted_flow = flow_net.inference_network(boundary)

        # quantities to optimize
        force = calc_force(boundary, predicted_flow[:, :, :, 2:3])
        drag_x = tf.reduce_sum(force[:, :, :, 0], axis=[0, 1, 2])
        drag_y = tf.reduce_sum(force[:, :, :, 1], axis=[0, 1, 2])
        #drag_ratio = -(drag_x/drag_y)
        #drag_ratio = (drag_y/drag_x)
        drag_ratio = drag_y

        # init graph
        init = tf.global_variables_initializer()

        # Restore the moving average version of the learned variables for eval.
        variables_to_restore = tf.all_variables()
        variables_to_restore_boundary = [
            variable for i, variable in enumerate(variables_to_restore)
            if "boundary_network" in variable.name[:variable.name.index(':')]
        ]
        variables_to_restore_flow = [
            variable for i, variable in enumerate(variables_to_restore)
            if "flow_network" in variable.name[:variable.name.index(':')]
        ]
        saver_boundary = tf.train.Saver(variables_to_restore_boundary)
        saver_flow = tf.train.Saver(variables_to_restore_flow)

        # start ses and init
        sess = tf.Session()
        sess.run(init)
        ckpt_boundary = tf.train.get_checkpoint_state(BOUNDARY_DIR)
        ckpt_flow = tf.train.get_checkpoint_state(FLOW_DIR)
        saver_boundary.restore(sess, ckpt_boundary.model_checkpoint_path)
        saver_flow.restore(sess, ckpt_flow.model_checkpoint_path)

        graph_def = tf.get_default_graph().as_graph_def(add_shapes=True)

        run_time = FLAGS.boundary_learn_steps

        # make store vectors for values
        best_boundary = []
        max_d_ratio = np.zeros((run_time))
        iteration = np.arange(run_time) * batch_size
        d_ratio_store = []

        # make store dir
        os.system("mkdir ./figs/boundary_random_image_store")
        for i in tqdm(xrange(run_time)):
            input_batch = (
                np.random.rand(batch_size, FLAGS.nr_boundary_params) - .5)
            sess.run(params_op_init, feed_dict={params_op_set: input_batch})
            d_ratio, boundary_batch = sess.run([drag_ratio, boundary])
            #plt.imshow(1.0*sess.run(force)[0,:,:,1] - boundary_batch[0,3:-3,3:-3,0])
            #plt.imshow(1.0*sess.run(force)[0,:,:,1])
            #plt.show()
            d_ratio_store.append(d_ratio)
            if np.max(np.array(d_ratio_store)) <= d_ratio:
                best_boundary = boundary_batch
                best_input = input_batch
            max_d_ratio[i] = np.max(np.array(d_ratio_store))
            if i % 100 == 0:
                # make video with opencv
                sess.run(params_op_init, feed_dict={params_op_set: best_input})
                velocity_norm_g = sess.run(predicted_flow)
                #velocity_norm_g, boundary_g = sess.run([force, boundary],feed_dict={})
                #sflow_plot = np.concatenate([ 5.0*velocity_norm_g[0], boundary_g[0]], axis=1)
                #sflow_plot = np.uint8(grey_to_short_rainbow(sflow_plot))
                #sflow_plot = cv2.applyColorMap(sflow_plot
                #video.write(sflow_plot)

                # save plot image to make video
                velocity_norm_g = velocity_norm_g[fig_pos, :, :, 2]
                boundary_g = best_boundary[fig_pos, :, :, 0]
                fig = plt.figure()
                fig.set_size_inches(25.5, 7.5)
                a = fig.add_subplot(1, 4, 1)
                plt.imshow(velocity_norm_g)
                a = fig.add_subplot(1, 4, 2)
                plt.imshow(boundary_g)
                a = fig.add_subplot(1, 4, 3)
                plt.plot(iteration, max_d_ratio, label="best lift/drag")
                plt.legend(loc=4)
                a = fig.add_subplot(1, 4, 4)
                # the histogram of the data
                n, bins, patches = plt.hist(np.array(d_ratio_store),
                                            50,
                                            normed=1,
                                            facecolor='green')
                #plt.hist(d_ratio_store, 10, normed=1, facecolor='green')
                plt.xlabel("lift/drag")
                plt.ylabel("frequency")
                plt.legend()
                plt.suptitle("Using Random Search")
                plt.savefig("./figs/boundary_random_image_store/plot_" +
                            str(i).zfill(5) + ".png")
                if run_time - i <= 100:
                    plt.savefig("./figs/" + FLAGS.boundary_learn_loss +
                                "_plot.png")
                    #plt.show()
                plt.close(fig)

        # close cv video
        video.release()
        cv2.destroyAllWindows()

        # generate video of plots
        os.system("rm ./figs/random_plot_video.mp4")
        os.system(
            "cat ./figs/boundary_random_image_store/*.png | ffmpeg -f image2pipe -r 30 -vcodec png -i - -vcodec libx264 ./figs/random_plot_video.mp4"
        )
        os.system("rm -r ./figs/boundary_random_image_store")
def evaluate():
    """Run Eval once.

  Args:
    saver: Saver.
    summary_writer: Summary writer.
    top_k_op: Top K op.
    summary_op: Summary op.
  """

    num_angles = 9
    max_angle = 0.30
    min_angle = -0.10
    set_params = np.array(num_angles * [FLAGS.nr_boundary_params * [0.0]])
    set_params[:, :] = 0.0
    set_params_pos = np.array(num_angles * [FLAGS.nr_boundary_params * [0.0]])
    set_params_pos[:, :] = 1.0

    for i in xrange(num_angles):
        set_params[i, 0] = -i
    set_params[:, 0] = ((max_angle - min_angle) *
                        (set_params[:, 0] / (num_angles - 1))) - min_angle

    set_params[:, 1] = 0.5
    set_params[:, 2] = 1.0
    set_params[:, -1] = 0.0

    set_params_pos[:, 0] = 0.0  # set angle to 0.0
    set_params_pos[:, 1] = 0.0  # set n_1 to .5
    set_params_pos[:, 2] = 0.0  # set n_2 to 1.0
    set_params_pos[:, -1] = 0.0  # set tail hieght to 0.0

    with tf.Graph().as_default():
        # Make image placeholder
        params_op, params_op_init, params_op_set, squeeze_loss = flow_net.inputs_boundary_learn(
            batch_size,
            set_params=set_params,
            set_params_pos=set_params_pos,
            noise_std=0.01)

        # Make boundary
        boundary = flow_net.inference_boundary(batch_size *
                                               set_params.shape[0],
                                               FLAGS.dims * [FLAGS.obj_size],
                                               params_op,
                                               full_shape=shape)
        #sharp_boundary =

        # predict steady flow on boundary
        predicted_flow = flow_net.inference_network(boundary)

        # quantities to optimize
        force = calc_force(boundary, predicted_flow[:, :, :, 2:3])
        drag_x = tf.reduce_sum(force[:, :, :, 0], axis=[1, 2]) / batch_size
        drag_y = tf.reduce_sum(force[:, :, :, 1], axis=[1, 2]) / batch_size

        drag_lift_ratio = -(drag_y / drag_x)

        # loss
        loss = -tf.reduce_sum(drag_lift_ratio)
        loss += squeeze_loss

        # train_op
        variables_to_train = tf.all_variables()
        variables_to_train = [
            variable for i, variable in enumerate(variables_to_train)
            if "params" in variable.name[:variable.name.index(':')]
        ]
        train_step = flow_net.train(loss,
                                    FLAGS.boundary_learn_lr,
                                    train_type="boundary_params",
                                    variables=variables_to_train)

        # init graph
        init = tf.global_variables_initializer()

        # Restore the moving average version of the learned variables for eval.
        variables_to_restore = tf.all_variables()
        variables_to_restore_boundary = [
            variable for i, variable in enumerate(variables_to_restore)
            if "boundary_network" in variable.name[:variable.name.index(':')]
        ]
        variables_to_restore_flow = [
            variable for i, variable in enumerate(variables_to_restore)
            if "flow_network" in variable.name[:variable.name.index(':')]
        ]
        saver_boundary = tf.train.Saver(variables_to_restore_boundary)
        saver_flow = tf.train.Saver(variables_to_restore_flow)

        # start ses and init
        sess = tf.Session()
        sess.run(init)
        ckpt_boundary = tf.train.get_checkpoint_state(BOUNDARY_DIR)
        ckpt_flow = tf.train.get_checkpoint_state(FLOW_DIR)
        saver_boundary.restore(sess, ckpt_boundary.model_checkpoint_path)
        saver_flow.restore(sess, ckpt_flow.model_checkpoint_path)

        # make graph
        graph_def = tf.get_default_graph().as_graph_def(add_shapes=True)

        # total run time
        run_time = FLAGS.boundary_learn_steps

        # use same start for comparison
        start_params_np = (
            np.random.rand(batch_size, FLAGS.nr_boundary_params) - .5)

        # gradient decent
        plot_error_gradient_decent = np.zeros((num_runs, run_time))
        for sim in tqdm(xrange(num_runs)):
            sess.run(params_op_init,
                     feed_dict={params_op_set: start_params_np})
            for i in tqdm(xrange(run_time)):
                l, _ = sess.run([loss, train_step], feed_dict={})
                if i == run_time - 1:
                    d_l_ratio = sess.run(drag_lift_ratio)
                plot_error_gradient_decent[sim, i] = np.sum(l)

        # simulated annealing
        plot_error_simulated_annealing = np.zeros(
            (len(temps), num_runs, run_time))
        for t in tqdm(xrange(len(temps))):
            for sim in tqdm(xrange(num_runs)):
                sess.run(params_op_init,
                         feed_dict={params_op_set: start_params_np})
                temp = temps[t]
                param_old = start_params_np
                param_new = distort_param(start_params_np, std)
                fittness_old = sess.run(loss)
                fittness_new = 0.0
                for i in tqdm(xrange(run_time)):
                    sess.run(params_op_init,
                             feed_dict={params_op_set: param_new})
                    fittness_new = sess.run(loss)
                    param_old, fittness_old, temp = simulated_annealing_step(
                        param_old,
                        fittness_old,
                        param_new,
                        fittness_new,
                        temp=temp)
                    param_new = distort_param(param_old, std)
                    plot_error_simulated_annealing[t, sim, i] = fittness_old

        x = np.arange(run_time)

        fig = plt.figure()
        fig.set_size_inches(5, 5)

        plot_error_gradient_decent_mean, plot_error_gradient_decent_std = calc_mean_and_std(
            plot_error_gradient_decent)
        plt.errorbar(x,
                     plot_error_gradient_decent_mean,
                     yerr=plot_error_gradient_decent_std,
                     lw=1.0,
                     label="Gradient Descent")

        for t in tqdm(xrange(len(temps))):
            plot_error_simulated_annealing_mean, plot_error_simulated_annealing_std = calc_mean_and_std(
                plot_error_simulated_annealing[t])
            #plt.errorbar(x, plot_error_simulated_annealing_mean, yerr=plot_error_simulated_annealing_std, c='g', lw=1.0, label="Simulated Annealing temp = " + str(temps[t]))
            plt.errorbar(x,
                         plot_error_simulated_annealing_mean,
                         yerr=plot_error_simulated_annealing_std,
                         lw=1.0,
                         label="Simulated Annealing temp = " + str(temps[t]))

        plt.xlabel('Step')
        plt.ylabel('Loss')
        plt.title("Optimization", fontsize=20)
        plt.legend(loc="upper_left")
        plt.savefig("./figs/learn_comparison.pdf")
        plt.show()
def evaluate():
    """Run Eval once.

  Args:
    saver: Saver.
    summary_writer: Summary writer.
    top_k_op: Top K op.
    summary_op: Summary op.
  """

    num_angles = 9
    max_angle = 0.30
    min_angle = -0.10
    set_params = np.array(num_angles * [FLAGS.nr_boundary_params * [0.0]])
    set_params[:, :] = 0.0
    set_params_pos = np.array(num_angles * [FLAGS.nr_boundary_params * [0.0]])
    set_params_pos[:, :] = 1.0

    for i in xrange(num_angles):
        set_params[i, 0] = -i
    set_params[:, 0] = ((max_angle - min_angle) *
                        (set_params[:, 0] / (num_angles - 1))) - min_angle

    set_params[:, 1] = 0.5
    set_params[:, 2] = 1.0
    set_params[:, -1] = 0.0

    set_params_pos[:, 0] = 0.0  # set angle to 0.0
    set_params_pos[:, 1] = 0.0  # set n_1 to .5
    set_params_pos[:, 2] = 0.0  # set n_2 to 1.0
    set_params_pos[:, -1] = 0.0  # set tail hieght to 0.0

    with tf.Graph().as_default():
        # Make image placeholder
        params_op, params_op_init, params_op_set, squeeze_loss = flow_net.inputs_boundary_learn(
            batch_size,
            set_params=set_params,
            set_params_pos=set_params_pos,
            noise_std=0.01)

        # Make placeholder for flow computed by lattice boltzmann solver
        solver_boundary, solver_flow = flow_net.inputs_flow(
            1, shape, FLAGS.dims)
        sharp_boundary, blaa = flow_net.inputs_flow(
            batch_size * set_params.shape[0], shape, FLAGS.dims)

        # Make boundary
        boundary = flow_net.inference_boundary(batch_size *
                                               set_params.shape[0],
                                               FLAGS.dims * [FLAGS.obj_size],
                                               params_op,
                                               full_shape=shape)

        # predict steady flow on boundary
        predicted_flow = flow_net.inference_network(boundary,
                                                    network_type="flow",
                                                    keep_prob=FLAGS.keep_prob)
        sharp_predicted_flow = flow_net.inference_network(
            sharp_boundary, network_type="flow", keep_prob=FLAGS.keep_prob)

        # quantities to optimize
        force = calc_force(boundary, predicted_flow[..., -1:])
        sharp_force = calc_force(sharp_boundary, sharp_predicted_flow[...,
                                                                      -1:])
        solver_force = calc_force(solver_boundary, solver_flow[..., -1:])
        drag_x = tf.reduce_sum(force[..., 0], axis=[1, 2]) / batch_size
        drag_y = tf.reduce_sum(force[..., 1], axis=[1, 2]) / batch_size
        sharp_drag_x = tf.reduce_sum(sharp_force[..., 0], axis=[1, 2
                                                                ]) / batch_size
        sharp_drag_y = tf.reduce_sum(sharp_force[..., 1], axis=[1, 2
                                                                ]) / batch_size
        solver_drag_x = tf.reduce_sum(solver_force[..., 0],
                                      axis=[1, 2]) / batch_size
        solver_drag_y = tf.reduce_sum(solver_force[..., 1],
                                      axis=[1, 2]) / batch_size

        drag_lift_ratio = -(drag_y / drag_x)
        sharp_drag_lift_ratio = -(sharp_drag_y / sharp_drag_x)
        solver_drag_lift_ratio = -(solver_drag_y / solver_drag_x)

        # loss
        loss = -tf.reduce_sum(drag_lift_ratio)
        #loss = -drag_y + drag_x
        #loss = -tf.reduce_sum(drag_x)
        loss += squeeze_loss

        # train_op
        variables_to_train = tf.all_variables()
        variables_to_train = [
            variable for i, variable in enumerate(variables_to_train)
            if "params" in variable.name[:variable.name.index(':')]
        ]
        train_step = flow_net.train(loss,
                                    FLAGS.boundary_learn_lr,
                                    train_type="boundary_params",
                                    variables=variables_to_train)

        # init graph
        init = tf.global_variables_initializer()

        # Restore the moving average version of the learned variables for eval.
        variables_to_restore = tf.all_variables()
        variables_to_restore_boundary = [
            variable for i, variable in enumerate(variables_to_restore)
            if "boundary_network" in variable.name[:variable.name.index(':')]
        ]
        variables_to_restore_flow = [
            variable for i, variable in enumerate(variables_to_restore)
            if "flow_network" in variable.name[:variable.name.index(':')]
        ]
        saver_boundary = tf.train.Saver(variables_to_restore_boundary)
        saver_flow = tf.train.Saver(variables_to_restore_flow)

        # start ses and init
        sess = tf.Session()
        sess.run(init)
        ckpt_boundary = tf.train.get_checkpoint_state(BOUNDARY_DIR)
        ckpt_flow = tf.train.get_checkpoint_state(FLOW_DIR)
        saver_boundary.restore(sess, ckpt_boundary.model_checkpoint_path)
        saver_flow.restore(sess, ckpt_flow.model_checkpoint_path)

        graph_def = tf.get_default_graph().as_graph_def(add_shapes=True)

        params_np = (np.random.rand(1, FLAGS.nr_boundary_params) - .5)
        #params_np = np.zeros((1,FLAGS.nr_boundary_params-1))

        sess.run(params_op_init, feed_dict={params_op_set: params_np})
        run_time = FLAGS.boundary_learn_steps

        # make store vectors for values
        plot_error = np.zeros((run_time))
        plot_drag_y = np.zeros((run_time))
        plot_drag_x = np.zeros((run_time))

        # make store dir
        os.system("mkdir ./figs/boundary_learn_image_store")
        for i in tqdm(xrange(run_time)):
            l, _, d_y, d_x = sess.run([loss, train_step, drag_y, drag_x],
                                      feed_dict={})
            plot_error[i] = np.sum(l)
            plot_drag_x[i] = np.sum(d_x[fig_pos])
            plot_drag_y[i] = np.sum(d_y[fig_pos])
            if ((i + 1) % 1 == 0) or i == run_time - 1:
                # make video with opencv
                s_params = sess.run(params_op)
                wing_boundary = []
                for p in xrange(s_params.shape[0]):
                    wing_boundary.append(
                        wing_boundary_2d(
                            s_params[p, 0], s_params[p, 1], s_params[p, 2],
                            s_params[p, 3:int((FLAGS.nr_boundary_params - 4) /
                                              2)],
                            s_params[p,
                                     int((FLAGS.nr_boundary_params - 4) /
                                         2):-1], s_params[p, -1],
                            FLAGS.dims * [FLAGS.obj_size]))
                wing_boundary = np.stack(wing_boundary)
                wing_boundary = np.pad(
                    wing_boundary, [[0, 0], [128, 128], [128, 128], [0, 0]],
                    'constant',
                    constant_values=0.0)
                #print(sharp_boundary.get_shape())
                #print(wing_boundary.shape)
                p_flow, p_boundary, d_l_ratio, sharp_d_l_ratio = sess.run(
                    [
                        sharp_predicted_flow, boundary, drag_lift_ratio,
                        sharp_drag_lift_ratio
                    ],
                    feed_dict={sharp_boundary: wing_boundary})

                # save plot image to make video
                p_pressure = p_flow[fig_pos, :, :, 2]
                p_boundary = p_boundary[fig_pos, :, :, 0]
                fig = plt.figure()
                fig.set_size_inches(15, 10)
                a = fig.add_subplot(2, 3, 1)
                plt.imshow(p_pressure)
                plt.title("Pressure", fontsize=16)
                a = fig.add_subplot(2, 3, 2)
                plt.imshow(p_boundary)
                plt.title("Boundary", fontsize=16)
                a = fig.add_subplot(2, 3, 3)
                plt.plot(plot_error, label="Sum(Lift/Drag)")
                plt.xlabel("Step")
                plt.legend()
                a = fig.add_subplot(2, 3, 4)
                plt.plot(-plot_drag_x, label="Drag Angle 0")
                plt.plot(plot_drag_y, label="Lift Angle 0")
                plt.ylim(-1.0, np.max(plot_drag_y) + 2.0)
                plt.xlabel("Step")
                plt.legend()
                a = fig.add_subplot(2, 3, 5)
                plt.plot(-np.degrees(set_params[:, 0]),
                         d_l_ratio,
                         'bo',
                         label="Lift/Drag Network")
                #plt.plot(-np.degrees(set_params[:,0]), sharp_d_l_ratio, 'ro', label="Lift/Drag Sharp")
                #if i == run_time-1:
                #  solver_d_l_ratio = run_flow_solver(sess.run(params_op), solver_boundary, solver_flow, sess, solver_drag_lift_ratio)
                #  plt.plot(-np.degrees(set_params[:,0]), solver_d_l_ratio, 'go', label="Lift/Drag Solver")
                plt.xlabel("Angle of Attack (Degrees)")
                plt.xlim(
                    min(-np.degrees(set_params[:, 0])) - 3,
                    max(-np.degrees(set_params[:, 0])) + 3)
                plt.ylim(np.min(d_l_ratio) - 1, np.max(d_l_ratio) + 2)
                plt.legend()
                plt.suptitle("2D Wing Optimization Using Gradient Descent",
                             fontsize=20)
                plt.savefig("./figs/boundary_learn_image_store/plot_" +
                            str(i).zfill(5) + ".png")
                if run_time - i <= 100:
                    plt.savefig("./figs/" + FLAGS.boundary_learn_loss +
                                "_plot.pdf")
                if i == run_time - 1:
                    plt.savefig("./figs/learn_gradient_descent.pdf")
                    plt.show()
                #plt.show()
                plt.close(fig)

        # generate video of plots
        os.system("rm ./figs/airfoil_2d_video.mp4")
        os.system(
            "cat ./figs/boundary_learn_image_store/*.png | ffmpeg -f image2pipe -r 30 -vcodec png -i - -vcodec libx264 ./figs/airfoil_2d_video.mp4"
        )
        os.system("rm -r ./figs/boundary_learn_image_store")