示例#1
0
    def test_build_gradients_summaries(self):
        x = {'x': tf.placeholder(tf.float32, [2, 89])}
        y = tf.constant([[1], [1]])

        model = BaseModel(plx.Modes.TRAIN,
                          graph_fn=self.get_dummy_graph_fn(),
                          loss_config=LossConfig(module='log_loss'),
                          optimizer_config=OptimizerConfig(module='adadelta'),
                          model_type=BaseModel.Types.CLASSIFIER,
                          eval_metrics_config=[],
                          summaries=['gradients'],
                          name='test')

        model(x, y, None, None)

        # Only gradients are created
        summaries_by_names = get_tracked(
            collection=tf.GraphKeys.SUMMARIES_BY_NAMES)
        for s_name in summaries_by_names.keys():
            assert 'Gradient' in s_name
示例#2
0
    def test_build_variables_summaries(self):
        x = {'x': tf.placeholder(tf.float32, [2, 89])}
        y = tf.constant([[1], [1]])

        model = BaseModel(plx.Modes.TRAIN,
                          graph_fn=self.get_dummy_graph_fn(),
                          loss_config=LossConfig(module='log_loss'),
                          optimizer_config=OptimizerConfig(module='adadelta'),
                          model_type=BaseModel.Types.CLASSIFIER,
                          eval_metrics_config=[],
                          summaries=['variables'],
                          name='test')

        model(x, y, None, None)

        # Only var are created
        variable_names = {var.op.name for var in tf.trainable_variables()}
        summaries_names = set(
            get_tracked(collection=tf.GraphKeys.SUMMARIES_BY_NAMES).keys())
        assert variable_names == summaries_names
示例#3
0
    def test_build_no_summaries(self):
        x = {'x': tf.placeholder(tf.float32, [2, 89])}
        y = tf.constant([[1], [1]])

        model = BaseModel(plx.ModeKeys.TRAIN,
                          graph_fn=self.get_dummy_graph_fn(),
                          loss_config=LossConfig(name='log_loss'),
                          optimizer_config=OptimizerConfig(name='Adadelta'),
                          model_type=BaseModel.Types.CLASSIFIER,
                          eval_metrics_config=[],
                          summaries=[],
                          name='test',
                          params=None)

        model(x, y, None, None)

        # Only activations are created
        summaries_by_names = get_tracked(
            collection=tf.GraphKeys.SUMMARIES_BY_NAMES)
        assert summaries_by_names == {}
示例#4
0
    def test_does_not_build_learning_rate_summaries_if_no_decay(self):
        x = {'x': tf.placeholder(tf.float32, [2, 89])}
        y = tf.constant([[1], [1]])

        model = BaseModel(plx.ModeKeys.TRAIN,
                          graph_fn=self.get_dummy_graph_fn(),
                          loss_config=LossConfig(name='log_loss'),
                          optimizer_config=OptimizerConfig(name='Adadelta'),
                          model_type=BaseModel.Types.CLASSIFIER,
                          eval_metrics_config=[],
                          summaries=['learning_rate'],
                          name='test',
                          params=None)

        model(x, y, None, None)

        # Only var are created
        summaries_names = list(
            get_tracked(collection=tf.GraphKeys.SUMMARIES_BY_NAMES).keys())
        assert len(summaries_names) == 0
示例#5
0
    def test_build_all_summaries(self):
        training.create_global_step()
        config = ModelConfig(loss_config=LossConfig(name='log_loss'),
                             optimizer_config=OptimizerConfig(
                                 name='Adadelta',
                                 decay_type='exponential_decay'))

        x = {'source_ids': tf.placeholder(tf.float32, [2, 89])}
        y = tf.constant([[1], [1]])

        model = BaseModel(plx.ModeKeys.TRAIN,
                          graph_fn=self.get_dummy_graph_fn(),
                          config=config,
                          model_type=BaseModel.Types.CLASSIFIER,
                          summaries='all',
                          name='test',
                          params=None)

        model(x, y, None, None)

        # Only var are created
        learning_rate_summaries = 0
        activations_summaries = 0
        gradients_summaries = 0
        loss_summaries = 0

        for s_name in get_tracked(
                collection=tf.GraphKeys.SUMMARIES_BY_NAMES).keys():
            if 'learning_rate' in s_name:
                learning_rate_summaries += 1
            elif 'Activation' in s_name:
                activations_summaries += 1
            elif 'Loss' in s_name:
                loss_summaries += 1
            elif 'Gradient' in s_name:
                gradients_summaries += 1

        assert learning_rate_summaries > 0
        assert activations_summaries > 0
        assert gradients_summaries > 0
        assert loss_summaries > 0
示例#6
0
    def test_build_loss_summaries(self):
        config = ModelConfig(loss_config=LossConfig(name='log_loss'),
                             optimizer_config=OptimizerConfig(name='Adadelta'))

        x = {'source_ids': tf.placeholder(tf.float32, [2, 89])}
        y = tf.constant([[1], [1]])

        model = BaseModel(plx.ModeKeys.TRAIN,
                          graph_fn=self.get_dummy_graph_fn(),
                          config=config,
                          model_type=BaseModel.Types.CLASSIFIER,
                          summaries=['loss'],
                          name='test',
                          params=None)

        model(x, y, None, None)

        # Only loss are created
        summaries_by_names = get_tracked(
            collection=tf.GraphKeys.SUMMARIES_BY_NAMES)
        for s_name in summaries_by_names.keys():
            assert 'Loss' in s_name
示例#7
0
 def __init__(self,
              mode,
              graph_fn,
              loss_config=None,
              optimizer_config=None,
              eval_metrics_config=None,
              summaries='all',
              clip_gradients=0.5,
              clip_embed_gradients=0.1,
              name="Regressor"):
     loss_config = loss_config or LossConfig(module='mean_squared_error')
     super(Regressor,
           self).__init__(mode=mode,
                          name=name,
                          model_type=self.Types.REGRESSOR,
                          graph_fn=graph_fn,
                          loss_config=loss_config,
                          optimizer_config=optimizer_config,
                          eval_metrics_config=eval_metrics_config,
                          summaries=summaries,
                          clip_gradients=clip_gradients,
                          clip_embed_gradients=clip_embed_gradients)
示例#8
0
    def test_build_learning_rate_summaries(self):
        training.create_global_step()
        x = {'x': tf.placeholder(tf.float32, [2, 89])}
        y = tf.constant([[1], [1]])

        model = BaseModel(plx.Modes.TRAIN,
                          graph_fn=self.get_dummy_graph_fn(),
                          loss_config=LossConfig(module='log_loss'),
                          optimizer_config=OptimizerConfig(
                              module='adadelta',
                              decay_type='exponential_decay'),
                          model_type=BaseModel.Types.CLASSIFIER,
                          eval_metrics_config=[],
                          summaries=['learning_rate'],
                          name='test')

        model(x, y, None, None)

        # Only var are created
        summaries_names = list(
            get_tracked(collection=tf.GraphKeys.SUMMARIES_BY_NAMES).keys())
        assert len(summaries_names) == 1
        assert summaries_names[0] == 'learning_rate'
示例#9
0
    def __init__(self,
                 mode,
                 encoder_fn,
                 decoder_fn,
                 bridge_fn,
                 loss_config=None,
                 optimizer_config=None,
                 summaries='all',
                 eval_metrics_config=None,
                 clip_gradients=0.5,
                 clip_embed_gradients=0.1,
                 name="Generator"):
        optimizer_config = optimizer_config or OptimizerConfig(
            'adadelta', learning_rate=0.4)
        loss_config = loss_config or LossConfig(module='sigmoid_cross_entropy')
        self._check_subgraph_fn(function=encoder_fn,
                                function_name='encoder_fn')
        self._check_subgraph_fn(function=decoder_fn,
                                function_name='decoder_fn')
        self._check_bridge_fn(function=bridge_fn)
        self._encode_fn = encoder_fn
        self._decoder_fn = decoder_fn
        self._bridge_fn = bridge_fn

        graph_fn = self._build_graph_fn()

        super(Generator,
              self).__init__(mode=mode,
                             name=name,
                             model_type=self.Types.GENERATOR,
                             graph_fn=graph_fn,
                             loss_config=loss_config,
                             optimizer_config=optimizer_config,
                             eval_metrics_config=eval_metrics_config,
                             summaries=summaries,
                             clip_gradients=clip_gradients,
                             clip_embed_gradients=clip_embed_gradients)
示例#10
0
文件: base.py 项目: chandu088/p
    def __init__(self, mode, graph_fn, num_states, num_actions, loss_config=None,
                 optimizer_config=None, eval_metrics_config=None, discount=0.97,
                 exploration_config=None, use_target_graph=True, target_update_frequency=5,
                 is_continuous=False, dueling='mean', use_expert_demo=False, summaries='all',
                 clip_gradients=0.5, clip_embed_gradients=0.1, name="Model"):
        self.num_states = num_states
        self.num_actions = num_actions
        self.exploration_config = exploration_config
        self.discount = discount
        self.use_target_graph = use_target_graph
        self.target_update_frequency = target_update_frequency
        self.is_continuous = is_continuous
        self.dueling = dueling
        self.use_expert_demo = use_expert_demo
        loss_config = loss_config or LossConfig(module='huber_loss')

        super(BaseQModel, self).__init__(
            mode=mode, name=name, model_type=self.Types.RL, graph_fn=graph_fn,
            loss_config=loss_config, optimizer_config=optimizer_config,
            eval_metrics_config=eval_metrics_config, summaries=summaries,
            clip_gradients=clip_gradients, clip_embed_gradients=clip_embed_gradients)

        self._train_graph = None
        self._target_graph = None