def main(_):
    # Configuration.
    num_unrolls = FLAGS.num_steps

    if FLAGS.seed:
        tf.set_random_seed(FLAGS.seed)

    # Problem.
    problem, net_config, net_assignments = util.get_config(
        FLAGS.problem, FLAGS.path)

    optimizer = meta.MetaOptimizer(**net_config)
    meta_loss = optimizer.meta_loss(problem,
                                    1,
                                    net_assignments=net_assignments)
    _, update, reset, cost_op, _ = meta_loss

    with ms.MonitoredSession() as sess:
        # Prevent accidental changes to the graph.
        tf.get_default_graph().finalize()

        total_time = 0
        total_cost = 0
        for _ in xrange(FLAGS.num_epochs):
            # Training.
            time, cost = util.run_epoch(sess, cost_op, [update], reset,
                                        num_unrolls)
            total_time += time
            total_cost += cost

        # Results.
        util.print_stats("Epoch {}".format(FLAGS.num_epochs), total_cost,
                         total_time, FLAGS.num_epochs)
示例#2
0
    def testConvolutional(self):
        """Tests L2L applied to problem with convolutions."""
        kernel_shape = 4

        def convolutional_problem():
            conv = nn.Conv2D(output_channels=1,
                             kernel_shape=kernel_shape,
                             stride=1,
                             name="conv")
            output = conv(tf.random_normal((100, 100, 3, 10)))
            return tf.reduce_sum(output)

        net_config = {
            "conv": {
                "net": "KernelDeepLSTM",
                "net_options": {
                    "kernel_shape": [kernel_shape] * 2,
                    "layers": (5, )
                },
            },
        }
        optimizer = meta.MetaOptimizer(**net_config)
        minimize_ops = optimizer.meta_minimize(convolutional_problem,
                                               3,
                                               net_assignments=[("conv",
                                                                 ["conv/w"])])
        with self.test_session() as sess:
            sess.run(tf.global_variables_initializer())
            train(sess, minimize_ops, 1, 2)
示例#3
0
    def testSaveAndLoad(self):
        """Tests saving and loading a meta-optimizer."""
        layers = (2, 3)
        net_options = {"layers": layers, "initializer": "zeros"}
        num_unrolls = 2
        num_epochs = 1

        problem = problems.simple()

        # Original optimizer.
        with tf.Graph().as_default() as g1:
            optimizer = meta.MetaOptimizer(net=dict(
                net="CoordinateWiseDeepLSTM", net_options=net_options))
            minimize_ops = optimizer.meta_minimize(problem, 3)

        with self.test_session(graph=g1) as sess:
            sess.run(tf.global_variables_initializer())
            train(sess, minimize_ops, 1, 2)

            # Save optimizer.
            tmp_dir = tempfile.mkdtemp()
            save_result = optimizer.save(sess, path=tmp_dir)
            net_path = next(iter(save_result))

            # Retrain original optimizer.
            cost, x = train(sess, minimize_ops, num_unrolls, num_epochs)

        # Load optimizer and retrain in a new session.
        with tf.Graph().as_default() as g2:
            optimizer = meta.MetaOptimizer(
                net=dict(net="CoordinateWiseDeepLSTM",
                         net_options=net_options,
                         net_path=net_path))
            minimize_ops = optimizer.meta_minimize(problem, 3)

        with self.test_session(graph=g2) as sess:
            sess.run(tf.global_variables_initializer())
            cost_loaded, x_loaded = train(sess, minimize_ops, num_unrolls,
                                          num_epochs)

        # The last cost should be the same.
        self.assertAlmostEqual(cost, cost_loaded, places=3)
        self.assertAlmostEqual(x[0], x_loaded[0], places=3)

        # Cleanup.
        os.remove(net_path)
        os.rmdir(tmp_dir)
示例#4
0
def main(_):
    # Configuration.
    num_unrolls = FLAGS.num_steps
    if FLAGS.seed:
        tf.set_random_seed(FLAGS.seed)

    # Problem.
    problem, net_config, net_assignments = util.get_config(FLAGS.problem, FLAGS.path)

    # Optimizer setup.
    if FLAGS.optimizer == "Adam":
        cost_op = problem()
        problem_vars = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES)
        problem_reset = tf.variables_initializer(problem_vars)

        optimizer = tf.train.AdamOptimizer(FLAGS.learning_rate)
        optimizer_reset = tf.variables_initializer(optimizer.get_slot_names())
        update = optimizer.minimize(cost_op)
        reset = [problem_reset, optimizer_reset]
    elif FLAGS.optimizer == "L2L":
        if FLAGS.path is None:
            logging.warning("Evaluating untrained L2L optimizer")
        optimizer = meta.MetaOptimizer(**net_config)
        meta_loss = optimizer.meta_loss(problem, 1, net_assignments=net_assignments)
        _, update, reset, cost_op, _ = meta_loss
    else:
        raise ValueError("{} is not a valid optimizer".format(FLAGS.optimizer))

    config = tf.ConfigProto()
    config.gpu_options.allow_growth = True
    with ms.MonitoredSession() as sess:
        sess.run(reset)
        # Prevent accidental changes to the graph.
        tf.get_default_graph().finalize()

        total_time = 0
        total_cost = 0
        loss_record = []
        for e in xrange(FLAGS.num_epochs):
            # Training.
            time, cost = util.run_eval_epoch(sess, cost_op, [update], num_unrolls)
            total_time += time
            total_cost += sum(cost) / num_unrolls
            loss_record += cost

        # Results.
        util.print_stats("Epoch {}".format(FLAGS.num_epochs), total_cost,
                         total_time, FLAGS.num_epochs)

    if FLAGS.output_path is not None:
        if not os.path.exists(FLAGS.output_path):
            os.mkdir(FLAGS.output_path)
    output_file = '{}/{}_eval_loss_record.pickle-{}'.format(FLAGS.output_path, FLAGS.optimizer, FLAGS.problem)
    with open(output_file, 'wb') as l_record:
        pickle.dump(loss_record, l_record)
    print("Saving evaluate loss record {}".format(output_file))
示例#5
0
def main(_):
    # Configuration.
    num_unrolls = FLAGS.num_steps

    if FLAGS.seed:
        tf.set_random_seed(FLAGS.seed)

    # Problem.
    problem, net_config, net_assignments = util.get_config(
        FLAGS.problem,
        main_parade_path,
        first_batch_parade_path,
        path=FLAGS.path)

    # Optimizer setup.
    if FLAGS.optimizer == "Adam":
        cost_op = problem()
        problem_vars = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES)
        problem_reset = tf.variables_initializer(problem_vars)

        optimizer = tf.train.AdamOptimizer(FLAGS.learning_rate)
        # optimizer = tf.train.GradientDescentOptimizer(FLAGS.learning_rate)
        optimizer_reset = tf.variables_initializer(optimizer.get_slot_names())
        grads_and_vars = optimizer.compute_gradients(cost_op)
        grads, v = zip(*grads_and_vars)
        grads, _ = tf.clip_by_global_norm(grads, 1.)
        update = optimizer.apply_gradients(zip(grads, v))
        # update = optimizer.minimize(cost_op)
        reset = [problem_reset, optimizer_reset]
    elif FLAGS.optimizer == "L2L":
        if FLAGS.path is None:
            logging.warning("Evaluating untrained L2L optimizer")
        optimizer = meta.MetaOptimizer(**net_config)
        meta_loss = optimizer.meta_loss(problem,
                                        1,
                                        net_assignments=net_assignments)
        _, update, reset, cost_op, _ = meta_loss
    else:
        raise ValueError("{} is not a valid optimizer".format(FLAGS.optimizer))

    with ms.MonitoredSession() as sess:
        # Prevent accidental changes to the graph.
        tf.get_default_graph().finalize()

        total_time = 0
        total_cost = 0
        for i in xrange(FLAGS.num_epochs):
            # Training.
            time, cost = util.run_epoch(sess, cost_op, [update], reset,
                                        num_unrolls)
            total_time += time
            total_cost += cost

        # Results.
        util.print_stats("Epoch {}".format(FLAGS.num_epochs), total_cost,
                         total_time, FLAGS.num_epochs)
示例#6
0
 def testMultiOptimizer(self, net_assignments, net_config):
     """Tests different variable->net mappings in multi-optimizer problem."""
     problem = problems.simple_multi_optimizer(num_dims=2)
     optimizer = meta.MetaOptimizer(**net_config)
     minimize_ops = optimizer.meta_minimize(problem,
                                            3,
                                            net_assignments=net_assignments)
     with self.test_session() as sess:
         sess.run(tf.global_variables_initializer())
         train(sess, minimize_ops, 1, 2)
示例#7
0
 def testSecondDerivatives(self):
     """Tests second derivatives for simple problem."""
     problem = problems.simple()
     optimizer = meta.MetaOptimizer(
         net=dict(net="CoordinateWiseDeepLSTM", net_options={"layers": ()}))
     minimize_ops = optimizer.meta_minimize(problem,
                                            3,
                                            second_derivatives=True)
     with self.test_session() as sess:
         sess.run(tf.global_variables_initializer())
         train(sess, minimize_ops, 1, 2)
示例#8
0
 def testSimple(self):
     """Tests L2L applied to simple problem."""
     problem = problems.simple()
     optimizer = meta.MetaOptimizer(net=dict(
         net="CoordinateWiseDeepLSTM",
         net_options={
             "layers": (),
             # Initializing the network to zeros makes learning more stable.
             "initializer": "zeros"
         }))
     minimize_ops = optimizer.meta_minimize(problem, 20, learning_rate=1e-2)
     # L2L should solve the simple problem is less than 500 epochs.
     with self.test_session() as sess:
         sess.run(tf.initialize_all_variables())
         cost, _ = train(sess, minimize_ops, 500, 5)
     self.assertLess(cost, 1e-5)
示例#9
0
def main(_):
  # Configuration.
  num_unrolls = FLAGS.num_steps // FLAGS.unroll_length

  if FLAGS.save_path is not None:
    if os.path.exists(FLAGS.save_path):
      raise ValueError("Folder {} already exists".format(FLAGS.save_path))
    else:
      os.mkdir(FLAGS.save_path)

  # Problem.
  problem, net_config, net_assignments = util.get_config(FLAGS.problem)

  # Optimizer setup.
  optimizer = meta.MetaOptimizer(**net_config)
  minimize = optimizer.meta_minimize(
      problem, FLAGS.unroll_length,
      learning_rate=FLAGS.learning_rate,
      net_assignments=net_assignments,
      second_derivatives=FLAGS.second_derivatives)
  step, update, reset, cost_op, _ = minimize
  no_op = tf.no_op()
  with ms.MonitoredSession() as sess:
    # Prevent accidental changes to the graph.
    tf.get_default_graph().finalize()

    trainable_vars = tf.trainable_variables()
    print('trainable variables')
    for v in trainable_vars:
        print("parameter:", v.name, "device:", v.device, "shape:", v.get_shape())

    best_evaluation = float("inf")
    total_time = 0
    total_cost = 0
    curr_step = 0
    for e in xrange(FLAGS.num_epochs):
        #time, curr_loss = util.run_epoch(sess, cost_op, [update, step], no_op, 1)
        curr_loss = sess.run([cost_op, update, step])[0]
        total_cost += curr_loss
        curr_step += 1
        if curr_step % 100 ==0:
            print('step:%d,loss:%f' % (curr_step,total_cost/100))
            total_cost = 0

        if curr_step % FLAGS.reset_interval == 0:
            print('reset states')
            sess.run(reset)
示例#10
0
    def testWhileLoopProblem(self):
        """Tests L2L applied to problem with while loop."""
        def while_loop_problem():
            x = tf.get_variable("x", shape=[], initializer=tf.ones_initializer)

            # Strange way of squaring the variable.
            _, x_squared = tf.while_loop(cond=lambda t, _: t < 1,
                                         body=lambda t, x: (t + 1, x * x),
                                         loop_vars=(0, x),
                                         name="loop")
            return x_squared

        optimizer = meta.MetaOptimizer(
            net=dict(net="CoordinateWiseDeepLSTM", net_options={"layers": ()}))
        minimize_ops = optimizer.meta_minimize(while_loop_problem, 3)
        with self.test_session() as sess:
            sess.run(tf.initialize_all_variables())
            train(sess, minimize_ops, 1, 2)
示例#11
0
    def testResults(self):
        """Tests reproducibility of Torch results."""
        problem = problems.simple()
        optimizer = meta.MetaOptimizer(net=dict(net="CoordinateWiseDeepLSTM",
                                                net_options={
                                                    "layers": (),
                                                    "initializer": "zeros"
                                                }))
        minimize_ops = optimizer.meta_minimize(problem, 5)
        with self.test_session() as sess:
            sess.run(tf.global_variables_initializer())
            cost, final_x = train(sess, minimize_ops, 1, 2)

        # Torch results
        torch_cost = 0.7325327
        torch_final_x = 0.8559

        self.assertAlmostEqual(cost, torch_cost, places=4)
        self.assertAlmostEqual(final_x[0], torch_final_x, places=4)
示例#12
0
def main(_):
    # Configuration.
    num_unrolls = FLAGS.num_steps // FLAGS.unroll_length
    problem, net_config, net_assignments = util.get_config(FLAGS.problem)
    optimizer = meta.MetaOptimizer(**net_config)
    if FLAGS.save_path is not None:
        if not os.path.exists(FLAGS.save_path):
            os.mkdir(FLAGS.save_path)
            path = None
#      raise ValueError("Folder {} already exists".format(FLAGS.save_path))
        else:
            if os.path.exists('{}/loss-record.pickle'.format(FLAGS.save_path)):
                path = FLAGS.save_path
            else:
                path = None
    # Problem.

    # Optimizer setup.

    minimize = optimizer.meta_minimize(
        problem,
        FLAGS.unroll_length,
        learning_rate=FLAGS.learning_rate,
        net_assignments=net_assignments,
        model_path=path,
        second_derivatives=FLAGS.second_derivatives)

    step, update, reset, cost_op, x_final, test, fc_weights, fc_bias, fc_va = minimize
    #  saver=tf.train.Saver()
    with ms.MonitoredSession() as sess:
        # Prevent accidental changes to the graph.
        tf.get_default_graph().finalize()
        #    Step=[step for i in range(len(cost_op))]
        best_evaluation = float("inf")
        total_time = 0
        total_cost = 0
        loss_record = []
        constants = []
        for e in xrange(FLAGS.num_epochs):
            # Training.
            time, cost, constant, Weights = util.run_epoch(
                sess, cost_op, [update, step], reset, num_unrolls, test,
                [fc_weights, fc_bias, fc_va])
            cost = sum(cost) / len(cost_op)
            total_time += time
            total_cost += cost
            loss_record.append(cost)
            constants.append(constant)
            # Logging.
            if (e + 1) % FLAGS.log_period == 0:
                util.print_stats("Epoch {}".format(e + 1), total_cost,
                                 total_time, FLAGS.log_period)
                total_time = 0
                total_cost = 0

            # Evaluation.
            if (e + 1) % FLAGS.evaluation_period == 0:
                eval_cost = 0
                eval_time = 0
                for _ in xrange(FLAGS.evaluation_epochs):
                    time, cost, constant, weights = util.run_epoch(
                        sess, cost_op, [update, step], reset, num_unrolls,
                        test, [fc_weights, fc_bias, fc_va])
                    #          cost/=len(cost_op)
                    eval_time += time
                    eval_cost += sum(cost) / len(cost_op)

                util.print_stats("EVALUATION", eval_cost, eval_time,
                                 FLAGS.evaluation_epochs)

                if FLAGS.save_path is not None and eval_cost < best_evaluation:
                    print("Removing previously saved meta-optimizer")
                    for f in os.listdir(FLAGS.save_path):
                        os.remove(os.path.join(FLAGS.save_path, f))
                    print("Saving meta-optimizer to {}".format(
                        FLAGS.save_path))
                    #          saver.save(sess,'./quadratic/quadratic.ckpt',global_step = e)
                    optimizer.save(sess, FLAGS.save_path)
                    with open(FLAGS.save_path + '/loss_record.pickle',
                              'wb') as l_record:
                        record = {'loss_record':loss_record, 'fc_weights':sess.run(weights[0]), \
                            'fc_bias':sess.run(weights[1]), 'fc_va':sess.run(weights[2]), 'constant':sess.run(constant)}
                        pickle.dump(record, l_record)
                    best_evaluation = eval_cost
示例#13
0
def main(_):
  # Configuration.
  num_unrolls = FLAGS.num_steps // FLAGS.unroll_length

  # if FLAGS.save_path is not None:
  #   if os.path.exists(FLAGS.save_path):
  #     raise ValueError("Folder {} already exists".format(FLAGS.save_path))
  #   else:
  #     os.mkdir(FLAGS.save_path)

  # Problem.
  problem, net_config, net_assignments = util.get_config(
      FLAGS.problem, main_parade_path, first_batch_parade_path)

  # Optimizer setup.
  optimizer = meta.MetaOptimizer(**net_config)
  minimize = optimizer.meta_minimize(
      problem, FLAGS.unroll_length,
      learning_rate=FLAGS.learning_rate,
      net_assignments=net_assignments,
      second_derivatives=FLAGS.second_derivatives)
  step, update, reset, cost_op, _ = minimize

  with ms.MonitoredSession() as sess:
    # Prevent accidental changes to the graph.
    tf.get_default_graph().finalize()
    writer = tf.summary.FileWriter('summary')
    writer.add_graph(tf.get_default_graph())
    best_evaluation = float("inf")
    total_time = 0
    total_cost = 0
    for e in xrange(FLAGS.num_epochs):
      # Training.
      time, cost = util.run_epoch(sess, cost_op, [update, step], reset,
                                  num_unrolls)
      total_time += time
      total_cost += cost

      # Logging.
      if (e + 1) % FLAGS.log_period == 0:
        util.print_stats("Epoch {}".format(e + 1), total_cost, total_time,
                         FLAGS.log_period)
        total_time = 0
        total_cost = 0

      # Evaluation.
      if (e + 1) % FLAGS.evaluation_period == 0:
        eval_cost = 0
        eval_time = 0
        for _ in xrange(FLAGS.evaluation_epochs):
          time, cost = util.run_epoch(sess, cost_op, [update], reset,
                                      num_unrolls)
          eval_time += time
          eval_cost += cost

        util.print_stats("EVALUATION", eval_cost, eval_time,
                         FLAGS.evaluation_epochs)

        if FLAGS.save_path is not None and eval_cost < best_evaluation:
          print("Removing previously saved meta-optimizer")
          for f in os.listdir(FLAGS.save_path):
            os.remove(os.path.join(FLAGS.save_path, f))
          print("Saving meta-optimizer to {}".format(FLAGS.save_path))
          optimizer.save(sess, FLAGS.save_path)
          best_evaluation = eval_cost
示例#14
0
def main(_):
  # Configuration.
  num_unrolls = FLAGS.num_steps // FLAGS.unroll_length

  # Problem.
  problem, net_config, net_assignments = util.get_config(FLAGS.problem)

  # Optimizer setup.
  optimizer = meta.MetaOptimizer(**net_config)
  minimize = optimizer.meta_minimize(
      problem, FLAGS.unroll_length,
      learning_rate=FLAGS.learning_rate,
      net_assignments=net_assignments,
      second_derivatives=FLAGS.second_derivatives)

  step, loss, update, reset, cost_op, farray, lropt, _ = minimize

  with tf.Session(config=tf.ConfigProto(log_device_placement=True)) as sess:
    # Prevent accidental changes to the graph.
    graph_writer = tf.summary.FileWriter(logs_path, sess.graph)
    sess.run(tf.global_variables_initializer())
    best_evaluation = float("inf")
    start = timer()
    losstrain = []
    lrtrain = []
    losseval = []
    plotlosstrain = []
    plotlrtrain = []
    plotlosseval = []
    for e in range(FLAGS.num_epochs):
      cost, trainloss, lropttrain = util.run_epoch(sess, cost_op, farray, lropt, [step, update], reset, num_unrolls)
      print(cost)
      losstrain.append(cost)
      lrtrain.append(lropttrain)
      util.print_stats("Training Epoch {}".format(e), trainloss, timer() - start)
      saver = tf.train.Saver()
      if (e + 1) % FLAGS.logging_period == 0:
          plotlosstrain.append(cost)
          plotlrtrain.append(lropttrain)

      if (e + 1) % FLAGS.evaluation_period == 0:
        for _ in range(FLAGS.evaluation_epochs):
          evalcost, evaloss, _ = util.run_epoch(sess, cost_op, farray, lropt, [update], reset, num_unrolls)
          losseval.append(evalcost)
        if save_path is not None and evaloss < best_evaluation:
          print("Saving meta-optimizer to {}".format(save_path))
          saver.save(sess, save_path + '/model.ckpt', global_step=e + 1)
          best_evaluation = evaloss
          plotlosseval.append(evalcost)
    slengths = np.arange(FLAGS.num_steps)
    slengthlr = np.arange(FLAGS.num_steps - num_unrolls)
    np.savetxt(save_path + '/plotlosstrain.out', plotlosstrain, delimiter=',')
    np.savetxt(save_path + '/plotlrtrain.out', plotlrtrain, delimiter=',')
    np.savetxt(save_path + '/plotlosseval.out', plotlosseval, delimiter=',')
    np.savetxt(save_path + '/losstrain.out', losstrain, delimiter=',')
    np.savetxt(save_path + '/lrtrain.out', plotlosstrain, delimiter=',')
    np.savetxt(save_path + '/losseval.out', losseval, delimiter=',')
    plt.figure(figsize=(8, 5))
    plt.plot(slengths, np.mean(plotlosstrain, 0), 'r-', label='Training Loss')
    plt.xlabel('Epoch')
    plt.ylabel('Training Loss')
    plt.legend()
    savefig(save_path + '/Training.png')
    plt.close()
    plt.figure(figsize=(8, 5))
    plt.plot(slengths, np.mean(plotlosseval, 0), 'b-', label='Validation Loss')
    plt.xlabel('Epoch')
    plt.ylabel('Validation Loss')
    plt.legend()
    savefig(save_path + '/Validation.png')
    plt.close()
    plt.figure(figsize=(8, 5))
    plt.plot(slengthlr, np.mean(plotlrtrain, 0), 'r-', label='Learning Rate')
    plt.xlabel('Epoch')
    plt.ylabel('Average Learning Rate')
    plt.legend()
    savefig(save_path + '/LearningRate.png')
    plt.close()
    graph_writer.close()
def main(_):
    # Configuration.
    num_unrolls = FLAGS.num_steps

    if FLAGS.seed:
        tf.set_random_seed(FLAGS.seed)

    # Problem.
    problem, net_config, net_assignments = util.get_config(
        FLAGS.problem, FLAGS.path, FLAGS.problem_path)

    # Optimizer setup.
    if FLAGS.optimizer == "Adam":
        cost_op = problem()
        problem_vars = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES)
        problem_reset = tf.variables_initializer(problem_vars)
        x_op = problem_vars

        optimizer = tf.train.AdamOptimizer(FLAGS.learning_rate)
        optimizer_reset = tf.variables_initializer(optimizer.get_slot_names())
        update = optimizer.minimize(cost_op)
        reset = [problem_reset, optimizer_reset]

    elif FLAGS.optimizer == "SGD":
        cost_op = problem()
        problem_vars = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES)
        problem_reset = tf.variables_initializer(problem_vars)
        x_op = problem_vars

        optimizer = tf.train.GradientDescentOptimizer(FLAGS.learning_rate)
        optimizer_reset = tf.variables_initializer(optimizer.get_slot_names())
        update = optimizer.minimize(cost_op)
        reset = [problem_reset, optimizer_reset]

    elif FLAGS.optimizer == "RMSProp":
        cost_op = problem()
        problem_vars = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES)
        problem_reset = tf.variables_initializer(problem_vars)
        x_op = problem_vars

        optimizer = tf.train.RMSPropOptimizer(FLAGS.learning_rate)
        optimizer_reset = tf.variables_initializer(optimizer.get_slot_names())
        update = optimizer.minimize(cost_op)
        reset = [problem_reset, optimizer_reset]

    elif FLAGS.optimizer == "Momentum":
        cost_op = problem()
        problem_vars = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES)
        problem_reset = tf.variables_initializer(problem_vars)
        x_op = problem_vars

        optimizer = tf.train.MomentumOptimizer(FLAGS.learning_rate,
                                               momentum=0.9)
        optimizer_reset = tf.variables_initializer(optimizer.get_slot_names())
        update = optimizer.minimize(cost_op)
        reset = [problem_reset, optimizer_reset]

    elif FLAGS.optimizer == "NAG":
        cost_op = problem()
        problem_vars = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES)
        problem_reset = tf.variables_initializer(problem_vars)
        x_op = problem_vars

        optimizer = tf.train.MomentumOptimizer(FLAGS.learning_rate,
                                               momentum=0.1,
                                               use_nesterov=True)
        optimizer_reset = tf.variables_initializer(optimizer.get_slot_names())
        update = optimizer.minimize(cost_op)
        reset = [problem_reset, optimizer_reset]

    elif FLAGS.optimizer == "L2L":
        if FLAGS.path is None:
            logging.warning("Evaluating untrained L2L optimizer")
        optimizer = meta.MetaOptimizer(**net_config)
        meta_loss = optimizer.meta_loss(problem,
                                        1,
                                        net_assignments=net_assignments)
        _, update, reset, cost_op, x_op = meta_loss
    else:
        raise ValueError("{} is not a valid optimizer".format(FLAGS.optimizer))

    with ms.MonitoredSession() as sess:
        # Prevent accidental changes to the graph.
        tf.get_default_graph().finalize()

        total_time = 0
        total_cost = 0
        for _ in xrange(FLAGS.num_epochs):
            # Training.
            time, cost, x_values = util.run_epoch(sess, cost_op, x_op,
                                                  [update], reset, num_unrolls)
            total_time += time
            total_cost += cost

        x_values = np.swapaxes(np.squeeze(x_values), 0, 1)
        if FLAGS.problem.find('wav') != -1:
            np.save(os.path.join('results', '{}_wav'.format(FLAGS.optimizer)),
                    x_values)
        else:
            np.save(os.path.join('results', '{}'.format(FLAGS.optimizer)),
                    x_values)

        # print("x_values shape: {}".format(x_values.shape))
        # print("x_values: {}".format(x_values))
        # np.savetxt(os.path.join('results', '{}.txt'.format(FLAGS.optimizer)), x_values, fmt='%f')
        # Results.
        util.print_stats(
            "Epoch {}, Optimizer {}".format(FLAGS.num_epochs, FLAGS.optimizer),
            total_cost, total_time, FLAGS.num_epochs)
示例#16
0
def main(_):
  # Configuration.
  if FLAGS.seed:
    tf.set_random_seed(FLAGS.seed)

  # Problem.
  problem, net_config, net_assignments = util.get_config(
      FLAGS.problem, FLAGS.path)

  state = None
  # Optimizer setup.
  if FLAGS.optimizer == "Adam":
    cost_op = problem()
    problem_vars = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES)
    problem_reset = tf.variables_initializer(problem_vars)

    optimizer = tf.train.AdamOptimizer(FLAGS.learning_rate)
    optimizer_reset = tf.variables_initializer(optimizer.get_slot_names())
    update = optimizer.minimize(cost_op)
    reset = [problem_reset, optimizer_reset]
  elif FLAGS.optimizer == "SGD_MOM":
    cost_op = problem()
    problem_vars = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES)
    problem_reset = tf.variables_initializer(problem_vars)

    optimizer = tf.train.MomentumOptimizer(FLAGS.learning_rate, 0.9)
    optimizer_reset = tf.variables_initializer(optimizer.get_slot_names())
    update = optimizer.minimize(cost_op)
    reset = [problem_reset, optimizer_reset]
  elif FLAGS.optimizer == "L2L":
    if FLAGS.path is None:
      logging.warning("Evaluating untrained L2L optimizer")
    cost_op = problem()
    optimizer = meta.MetaOptimizer(**net_config)
    if FLAGS.load_trained_model:
      # optimizer.load_states([pickle.load(open(os.path.join(FLAGS.model_path, "states.p"), "rb"))])
      # optimizer.load_states([pickle.load(open("./init_state.p", "rb"))])
      meta_loss = optimizer.meta_loss(problem, 1, net_assignments=net_assignments, load_states=False)
      # _, update, reset, cost_op, _ = meta_loss
      _, update, reset, _, _, state = meta_loss
    else:
      meta_loss = optimizer.meta_loss(problem, 1, net_assignments=net_assignments, load_states=False)
      # _, update, reset, cost_op, _ = meta_loss
      _, update, reset, _, _, state = meta_loss 
  else:
    raise ValueError("{} is not a valid optimizer".format(FLAGS.optimizer))

  process_id = os.getpid()
  exp_folder = os.path.join("exp", str(process_id))

  if not os.path.isdir(exp_folder):
    os.mkdir(exp_folder)

  writer = tf.summary.FileWriter(exp_folder)
  summaries = tf.summary.merge_all()

  if FLAGS.problem == "mnist":
    var_name_mlp = [
        "mlp/linear_0/w:0", "mlp/linear_0/b:0", "mlp/linear_1/w:0",
        "mlp/linear_1/b:0"
    ]
  else:
    var_name_mlp = []

  problem_vars = tf.get_collection(tf.GraphKeys.VARIABLES)

  if var_name_mlp:
    saver_vars = [vv for vv in problem_vars if vv.name in var_name_mlp]
  else:
    saver_vars = problem_vars

  saver = tf.train.Saver(saver_vars)

  with ms.MonitoredSession() as sess:
    # a quick hack!
    regular_sess = sess._sess._sess._sess._sess

    # Prevent accidental changes to the graph.
    tf.get_default_graph().finalize()

    # print("Initial loss = {}".format(sess.run(cost_op)))
    # raw_input("wait")

    if FLAGS.load_trained_model == True:
      print("We are loading trained model here!")
      saver.restore(regular_sess, os.path.join(FLAGS.model_path, "model"))

    # init_state = regular_sess.run(optimizer.init_state)
    # cost_val = regular_sess.run(cost_op)
    # import pdb; pdb.set_trace()

    total_time = 0
    total_cost = 0
    for step in xrange(FLAGS.num_epochs):      
      time, cost = util.run_epoch_eval(
          sess,
          cost_op, 
          [update],
          reset,
          FLAGS.num_steps,
          summary_op=summaries,
          summary_writer=writer,
          run_reset=False)
      writer.flush()

      total_time += time
      total_cost += cost

    saver.save(regular_sess, os.path.join(exp_folder, "model"))
    pickle.dump(final_states, open(os.path.join(exp_folder, "states.p"), "wb"))

    # Results.
    util.print_stats("Epoch {}".format(FLAGS.num_epochs), total_cost,
                     total_time, FLAGS.num_epochs)

    # we have to run in the end to skip the error
    regular_sess.run(reset)
示例#17
0
# Optimizer setup.
if optimizer == "Adam":
    cost_op = problem()
    problem_vars = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES)
    problem_reset = tf.variables_initializer(problem_vars)

    optimizer = tf.train.AdamOptimizer(learning_rate)
    optimizer_reset = tf.variables_initializer(optimizer.get_slot_names())
    update = optimizer.minimize(cost_op) # argmin(*)
    reset = [problem_reset, optimizer_reset]

elif optimizer == "L2L":
    if path is None:
        logging.warning("Evaluating untrained L2L optimizer")
    optimizer = meta.MetaOptimizer(**net_config)
    meta_loss = optimizer.meta_loss(problem, 1, net_assignments=net_assignments)
    _, update, reset, cost_op, problem_vars = meta_loss

else:
    raise ValueError("{} is not a valid optimizer".format(optimizer))

sess = ms.MonitoredSession()# as sess:
# Prevent accidental changes to the graph.
tf.get_default_graph().finalize()

total_time = 0
total_cost = 0

for i in xrange(num_epochs):
    # Training.
示例#18
0
def main(_):
    # Configuration.
    if FLAGS.seed:
        tf.set_random_seed(FLAGS.seed)

    num_unrolls = FLAGS.num_steps // FLAGS.unroll_length

    if FLAGS.seed:
        tf.set_random_seed(FLAGS.seed)

    if FLAGS.save_path is not None:
        if os.path.exists(FLAGS.save_path):
            # raise ValueError("Folder {} already exists".format(FLAGS.save_path))
            pass
        else:
            os.mkdir(FLAGS.save_path)

    # Problem.
    problem, net_config, net_assignments = util.get_config(FLAGS.problem)
    loss_op = problem()

    # Optimizer setup.
    optimizer = meta.MetaOptimizer(**net_config)
    minimize = optimizer.meta_minimize(
        problem,
        FLAGS.unroll_length,
        learning_rate=FLAGS.learning_rate,
        net_assignments=net_assignments,
        second_derivatives=FLAGS.second_derivatives)
    step, update, reset, cost_op, _ = minimize

    if FLAGS.problem == "mnist":
        var_name_mlp = [
            "mlp/linear_0/w:0", "mlp/linear_0/b:0", "mlp/linear_1/w:0",
            "mlp/linear_1/b:0"
        ]
    else:
        var_name_mlp = []

    problem_vars = tf.get_collection(tf.GraphKeys.VARIABLES)

    if var_name_mlp:
        saver_vars = [vv for vv in problem_vars if vv.name in var_name_mlp]
    else:
        saver_vars = problem_vars

    saver = tf.train.Saver(saver_vars)

    process_id = os.getpid()
    exp_folder = os.path.join(FLAGS.save_path, str(process_id))
    writer = tf.summary.FileWriter(exp_folder)

    if not os.path.isdir(exp_folder):
        os.mkdir(exp_folder)

    with ms.MonitoredSession() as sess:
        # a quick hack!
        regular_sess = sess._sess._sess._sess._sess

        # Prevent accidental changes to the graph.
        tf.get_default_graph().finalize()

        # print("Initial loss = {}".format(sess.run(loss_op)))
        # raw_input("wait")

        if FLAGS.load_trained_model == True:
            print("We are loading trained model here!")
            saver.restore(regular_sess, FLAGS.model_path)

        # init_state = regular_sess.run(optimizer.init_state)
        # pickle.dump(init_state, open("init_state.p", "wb"))

        best_evaluation = float("inf")
        total_time = 0
        total_cost = 0
        for e in xrange(FLAGS.num_epochs):
            # Training.
            time, cost = util.run_epoch(sess, cost_op, [update, step], reset,
                                        num_unrolls, e, writer)
            total_time += time
            total_cost += cost
            writer.flush()

            # Logging.
            if (e + 1) % FLAGS.log_period == 0:
                util.print_stats("Epoch {}".format(e + 1), total_cost,
                                 total_time, FLAGS.log_period)
                total_time = 0
                total_cost = 0

            # Evaluation.
            if (e + 1) % FLAGS.evaluation_period == 0 or e == 0:
                eval_cost = 0
                eval_time = 0
                for _ in xrange(FLAGS.evaluation_epochs):
                    time, cost = util.run_epoch_val(sess, cost_op, [update],
                                                    reset, num_unrolls, e,
                                                    writer)

                    eval_time += time
                    eval_cost += cost

                util.print_stats("EVALUATION", eval_cost, eval_time,
                                 FLAGS.evaluation_epochs)

                if FLAGS.save_path is not None and eval_cost < best_evaluation:
                    # print("Removing previously saved meta-optimizer")
                    # for f in os.listdir(FLAGS.save_path):
                    #   os.remove(os.path.join(FLAGS.save_path, f))
                    # print("Saving meta-optimizer to {}".format(FLAGS.save_path))
                    # optimizer.save(sess, FLAGS.save_path)
                    optimizer.save(sess, exp_folder, e + 1)
                    best_evaluation = eval_cost
示例#19
0
def main(_):
    # Configuration.
    num_unrolls = FLAGS.num_steps

    if FLAGS.seed:
        tf.set_random_seed(FLAGS.seed)

    # Problem.
    problem, net_config, net_assignments = util.get_config(
        FLAGS.problem, FLAGS.path)

    # Optimizer setup.
    if FLAGS.optimizer == "Adam":
        cost_op = problem()
        problem_vars = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES)
        problem_reset = tf.variables_initializer(problem_vars)

        optimizer = tf.train.AdamOptimizer(FLAGS.learning_rate)
        optimizer_reset = tf.variables_initializer(optimizer.get_slot_names())
        update = optimizer.minimize(cost_op)
        reset = [problem_reset, optimizer_reset]
    elif FLAGS.optimizer == "L2L":
        if FLAGS.path is None:
            logging.warning("Evaluating untrained L2L optimizer")
        optimizer = meta.MetaOptimizer(**net_config)
        meta_loss = optimizer.meta_loss(problem,
                                        1,
                                        net_assignments=net_assignments,
                                        model_path=FLAGS.path)
        loss, update, reset, cost_op, x_final, constant = meta_loss
    else:
        raise ValueError("{} is not a valid optimizer".format(FLAGS.optimizer))
    with ms.MonitoredSession() as sess:
        # Prevent accidental changes to the graph.
        tf.get_default_graph().finalize()
        min_loss_record = []
        all_time_loss_record = []
        total_time = 0
        total_cost = 0
        #    pdb.set_trace()
        #    print(constant)
        x_record = [[sess.run(item) for item in x_final]]
        for _ in xrange(FLAGS.num_epochs):
            # Training.
            time, cost, constants = util.eval_run_epoch(
                sess, cost_op, [update], reset, num_unrolls, x_final, constant)
            total_time += time
            total_cost += min(cost)
            all_time_loss_record.append(cost)
            min_loss_record.append(min(cost))


#      pdb.set_trace
#      print(x_finals)
#x_record = x_record + x_finals
        with open('./{}/evaluate_record.pickle'.format(FLAGS.path),
                  'wb') as l_record:
            record = {'all_time_loss_record':all_time_loss_record,'min_loss_record':min_loss_record,\
                      'constants':[sess.run(item) for item in constants],\
                      }
            pickle.dump(record, l_record)
        # Results.
        util.print_stats("Epoch {}".format(FLAGS.num_epochs), total_cost,
                         total_time, FLAGS.num_epochs)