Пример #1
0
 def model_fn(features, labels, mode):
     model = plx.models.Regressor(mode,
                                  graph_fn=graph_fn,
                                  loss=MeanSquaredErrorConfig(),
                                  optimizer=SGDConfig(learning_rate=0.001),
                                  summaries='all')
     return model(features, labels)
Пример #2
0
def model_fn(features, labels, mode):
    model = plx.models.Regressor(
        mode,
        graph_fn=graph_fn,
        loss=AbsoluteDifferenceConfig(),
        optimizer=SGDConfig(learning_rate=0.5, decay_type='exponential_decay', decay_steps=10),
        summaries='all',
        name='xor')
    return model(features, labels)
Пример #3
0
 def model_fn(features, labels, mode):
     model = plx.models.DDQNModel(
         mode,
         graph_fn=graph_fn,
         loss=HuberLossConfig(),
         num_states=env.num_states,
         num_actions=env.num_actions,
         optimizer=SGDConfig(learning_rate=0.01),
         exploration_config=DecayExplorationConfig(),
         target_update_frequency=10,
         summaries='all')
     return model(features, labels)
Пример #4
0
 def test_sgd_config(self):
     config_dict = {
         'learning_rate': 0.001,
         'decay_type': "",
         'decay_rate': 0.,
         'decay_steps': 100,
         'start_decay_at': 0,
         'stop_decay_at': 1e10,
         'min_learning_rate': 1e-12,
         'staircase': False,
         'global_step': None,
         'use_locking': False,
         'name': 'optimizer'
     }
     config = SGDConfig.from_dict(config_dict)
     assert_equal_dict(config.to_dict(), config_dict)