Exemple #1
0
def main():
    config_file = open('./config.json')
    config = json.load(config_file,
                       object_hook=lambda d: namedtuple('x', d.keys())
                       (*d.values()))

    param_names = ['voltage', 'flow_rate', 'pressure']
    param_range = [(0.0, 5.0), (1.0, 12.0), (10, 100)]
    func = RealReaction(num_dim=3,
                        param_range=param_range,
                        param_names=param_names,
                        direction='max',
                        logger=None)

    cell = rnn.StochasticRNNCell(cell=rnn.LSTM,
                                 kwargs={'hidden_size': config.hidden_size},
                                 nlayers=config.num_layers,
                                 reuse=config.reuse)
    optimizer = StepOptimizer(cell=cell,
                              func=func,
                              ndim=config.num_params,
                              nsteps=config.num_steps,
                              ckpt_path=config.save_path,
                              logger=logger,
                              constraints=config.constraints)
    x_array, y_array = optimizer.run()
Exemple #2
0
def main():
    config_file = open('./config.json')
    config = json.load(config_file,
                       object_hook=lambda d: namedtuple('x', d.keys())
                       (*d.values()))

    if config.opt_direction is 'max':
        problem_type = 'concave'
    else:
        problem_type = 'convex'

    if config.constraints:
        func = ConstraintQuadraticEval(num_dim=config.num_params,
                                       random=config.instrument_error,
                                       ptype=problem_type)
    else:
        func = QuadraticEval(num_dim=config.num_params,
                             random=config.instrument_error,
                             ptype=problem_type)

    if config.policy == 'srnn':
        cell = rnn.StochasticRNNCell(cell=rnn.LSTM,
                                     kwargs={
                                         'hidden_size': config.hidden_size,
                                         'use_batch_norm_h': config.batch_norm,
                                         'use_batch_norm_x': config.batch_norm,
                                         'use_batch_norm_c': config.batch_norm,
                                     },
                                     nlayers=config.num_layers,
                                     reuse=config.reuse)
    if config.policy == 'rnn':
        cell = rnn.MultiInputRNNCell(cell=rnn.LSTM,
                                     kwargs={
                                         'hidden_size': config.hidden_size,
                                         'use_batch_norm_h': config.batch_norm,
                                         'use_batch_norm_x': config.batch_norm,
                                         'use_batch_norm_c': config.batch_norm,
                                     },
                                     nlayers=config.num_layers,
                                     reuse=config.reuse)

    optimizer = StepOptimizer(cell=cell,
                              func=func,
                              ndim=config.num_params,
                              nsteps=config.num_steps,
                              ckpt_path=config.save_path,
                              logger=logger,
                              constraints=config.constraints)
    x_array, y_array = optimizer.run()

    # np.savetxt('./scratch/nn_y.csv', y_array, delimiter=',')
    # np.save('./scratch/nn_x.npy', y_array)
    plt.figure(1)
    plt.plot(y_array)
    plt.show()
    fig2 = plt.figure(2)
    ax2 = fig2.add_subplot(111, projection='3d')
    ax2.plot(x_array[:, 0], x_array[:, 1], x_array[:, 2])
    fig2.show()
    plt.show()
Exemple #3
0
def main():
    config_file = open('./config.json')
    config = json.load(config_file,
                       object_hook=lambda d: namedtuple('x', d.keys())
                       (*d.values()))

    # update number of parameters to all those considred in the data set
    param_names = []
    param_range = []
    for col in state_space:
        param_names.append(col)
        param_range.append((state_space[col].min(), state_space[col].max()))

    func = RealReaction(num_dim=len(param_names),
                        param_range=param_range,
                        param_names=param_names,
                        direction='max',
                        logger=None)

    cell = rnn.StochasticRNNCell(cell=rnn.LSTM,
                                 kwargs={'hidden_size': config.hidden_size},
                                 nlayers=config.num_layers,
                                 reuse=config.reuse)
    # Assumes that previous step exists
    # e.g. current_step = 2 means that the first step is in place
    next_states = []
    for baseline_num in range(1, 10):
        # print (config.sav)
        df = pd.read_csv(
            './ckpt/baseline/baseline_{}/trace.csv'.format(baseline_num))
        l = list(df['step'])
        current_step = len(l) + 1
        save_path_of_previous_step = "./ckpt/baseline/baseline_{}/step{}".format(
            baseline_num, current_step - 1)
        save_path = './ckpt/baseline/baseline_{}/step{}'.format(
            baseline_num, current_step)
        if not os.path.exists(save_path):
            os.makedirs(save_path)
        optimizer = StepOptimizer(cell=cell,
                                  func=func,
                                  ndim=config.num_params,
                                  nsteps=1,
                                  save_path=save_path,
                                  ckpt_path=save_path_of_previous_step,
                                  logger=logger,
                                  constraints=config.constraints)

        print(save_path_of_previous_step)
        x_array, y_array = optimizer.run(l[-1])

        l.append(optimizer.next_idx)
        pd.DataFrame(l).to_csv(
            './ckpt/baseline/baseline_{}/trace.csv'.format(baseline_num))
        next_states.append(optimizer.next_idx)

    pritn(next_states)
Exemple #4
0
def load_model(sess, config, logger):
    assert (os.path.exists(config.save_path))

    if config.opt_direction == 'max':
        problem_type = 'concave'
    else:
        problem_type = 'convex'

    if config.reaction_type == 'quad' and config.constraints == False:
        rxn_yeild = reactions.Quadratic(batch_size=config.batch_size,
                                        num_dims=config.num_params,
                                        ptype=problem_type,
                                        random=config.instrument_error)
    elif config.reaction_type == 'quad' and config.constraints == True:
        rxn_yeild = reactions.ConstraintQuadratic(
            batch_size=config.batch_size,
            num_dims=config.num_params,
            ptype=problem_type,
            random=config.instrument_error)
    elif config.reaction_type == 'gmm':
        rxn_yeild = reactions.GMM(batch_size=config.batch_size,
                                  num_dims=config.num_params,
                                  random=config.instrument_error,
                                  cov=config.norm_cov)

    if config.policy == 'srnn':
        cell = rnn.StochasticRNNCell(cell=rnn.LSTM,
                                     kwargs={
                                         'hidden_size': config.hidden_size,
                                         'use_batch_norm_h': config.batch_norm,
                                         'use_batch_norm_x': config.batch_norm,
                                         'use_batch_norm_c': config.batch_norm,
                                     },
                                     nlayers=config.num_layers,
                                     reuse=config.reuse)
    if config.policy == 'rnn':
        cell = rnn.MultiInputRNNCell(cell=rnn.LSTM,
                                     kwargs={
                                         'hidden_size': config.hidden_size,
                                         'use_batch_norm_h': config.batch_norm,
                                         'use_batch_norm_x': config.batch_norm,
                                         'use_batch_norm_c': config.batch_norm,
                                     },
                                     nlayers=config.num_layers,
                                     reuse=config.reuse)
    model = Optimizer(cell=cell,
                      logger=logger,
                      func=rxn_yeild,
                      ndim=config.num_params,
                      batch_size=config.batch_size,
                      unroll_len=config.unroll_length,
                      lr=config.learning_rate,
                      loss_type=config.loss_type,
                      optimizer=config.optimizer,
                      trainable_init=config.trainable_init,
                      direction=config.opt_direction,
                      constraints=config.constraints,
                      discount_factor=config.discount_factor)

    ckpt = tf.train.get_checkpoint_state(config.save_path)
    if ckpt and ckpt.model_checkpoint_path:
        logger.info('Reading model parameters from {}.'.format(
            ckpt.model_checkpoint_path))
        model.saver.restore(sess, ckpt.model_checkpoint_path)

    return model