def test_build_learning_rate_summaries(self): training.create_global_step() config = ModelConfig(loss_config=LossConfig(name='log_loss'), optimizer_config=OptimizerConfig( name='Adadelta', decay_type='exponential_decay')) x = {'source_ids': tf.placeholder(tf.float32, [2, 89])} y = tf.constant([[1], [1]]) model = BaseModel(plx.ModeKeys.TRAIN, graph_fn=self.get_dummy_graph_fn(), config=config, model_type=BaseModel.Types.CLASSIFIER, summaries=['learning_rate'], name='test', params=None) model(x, y, None, None) # Only var are created summaries_names = list( get_tracked(collection=tf.GraphKeys.SUMMARIES_BY_NAMES).keys()) assert len(summaries_names) == 1 assert summaries_names[0] == 'learning_rate'
def __init__(self, mode, model_type, graph_fn, loss_config, optimizer_config=None, eval_metrics_config=None, summaries='all', clip_gradients=0.5, clip_embed_gradients=0.1, name="Model"): # Check if mode corresponds to the correct model if mode in [Modes.GENERATE, Modes.ENCODE ] and model_type != self.Types.GENERATOR: raise TypeError( "Current model type `{}` does not support passed mode `{}`.". format(model_type, mode)) super(BaseModel, self).__init__(mode, name, self.ModuleType.MODEL) self.loss_config = loss_config self.optimizer_config = optimizer_config or OptimizerConfig( 'adam', learning_rate=0.001) self.eval_metrics_config = eval_metrics_config or [] self.model_type = model_type self.summaries = summarizer.SummaryOptions.validate(summaries) assert model_type in self.Types.VALUES, "`model_type` provided is unsupported." self._clip_gradients = clip_gradients self._clip_embed_gradients = clip_embed_gradients self._grads_and_vars = None self._total_loss = None self._loss = None self._check_subgraph_fn(function=graph_fn, function_name='graph_fn') self._graph_fn = graph_fn
def __init__(self, mode, graph_fn, num_states, num_actions, loss_config=None, optimizer_config=None, eval_metrics_config=None, is_deterministic=False, is_continuous=False, summaries='all', clip_gradients=0.5, clip_embed_gradients=0.1, name="Model"): optimizer_config = optimizer_config or OptimizerConfig( 'adam', learning_rate=0.004) super(VPGModel, self).__init__(mode=mode, name=name, graph_fn=graph_fn, num_states=num_states, num_actions=num_actions, loss_config=loss_config, optimizer_config=optimizer_config, eval_metrics_config=eval_metrics_config, is_deterministic=is_deterministic, is_continuous=is_continuous, summaries=summaries, clip_gradients=clip_gradients, clip_embed_gradients=clip_embed_gradients)
def test_return_estimator_spec(self): x = {'x': tf.placeholder(tf.float32, [2, 89])} y = tf.constant([[1], [1]]) model = BaseModel(plx.Modes.TRAIN, graph_fn=self.get_dummy_graph_fn(), loss_config=LossConfig(module='log_loss'), optimizer_config=OptimizerConfig(module='adadelta'), model_type=BaseModel.Types.CLASSIFIER, eval_metrics_config=[], summaries=['learning_rate'], name='test') assert isinstance(model(x, y, None, None), EstimatorSpec)
def test_return_estimator_spec(self): config = ModelConfig(loss_config=LossConfig(name='log_loss'), optimizer_config=OptimizerConfig(name='Adadelta')) x = {'source_ids': tf.placeholder(tf.float32, [2, 89])} y = tf.constant([[1], [1]]) model = BaseModel(plx.ModeKeys.TRAIN, graph_fn=self.get_dummy_graph_fn(), config=config, model_type=BaseModel.Types.CLASSIFIER, summaries=['learning_rate'], name='test', params=None) assert isinstance(model(x, y, None, None), EstimatorSpec)
def test_handle_predict_mode(self): x = {'x': tf.placeholder(tf.float32, [2, 89])} y = tf.constant([[1], [1]]) model = BaseModel(plx.Modes.PREDICT, graph_fn=self.get_dummy_graph_fn(), loss_config=LossConfig(module='log_loss'), optimizer_config=OptimizerConfig(module='adadelta'), model_type=BaseModel.Types.CLASSIFIER, eval_metrics_config=[], summaries=['learning_rate'], name='test') specs = model(x, y, None, None) assert specs.loss is None assert specs.predictions is not None assert 'losses' not in specs.predictions assert specs.train_op is None
def test_handle_eval_mode(self): x = {'source_ids': tf.placeholder(tf.float32, [2, 89])} y = tf.constant([[1], [1]]) model = BaseModel(plx.ModeKeys.EVAL, graph_fn=self.get_dummy_graph_fn(), loss_config=LossConfig(name='log_loss'), optimizer_config=OptimizerConfig(name='Adadelta'), eval_metrics_config=[], model_type=BaseModel.Types.CLASSIFIER, summaries=['learning_rate'], name='test', params=None) specs = model(x, y, None, None) assert specs.loss is not None assert specs.predictions is not None assert 'losses' in specs.predictions assert specs.train_op is None
def test_build_no_summaries(self): x = {'x': tf.placeholder(tf.float32, [2, 89])} y = tf.constant([[1], [1]]) model = BaseModel(plx.Modes.TRAIN, graph_fn=self.get_dummy_graph_fn(), loss_config=LossConfig(module='log_loss'), optimizer_config=OptimizerConfig(module='adadelta'), model_type=BaseModel.Types.CLASSIFIER, eval_metrics_config=[], summaries=[], name='test') model(x, y, None, None) # Only activations are created summaries_by_names = get_tracked( collection=tf.GraphKeys.SUMMARIES_BY_NAMES) assert summaries_by_names == {}
def test_does_not_build_learning_rate_summaries_if_no_decay(self): x = {'x': tf.placeholder(tf.float32, [2, 89])} y = tf.constant([[1], [1]]) model = BaseModel(plx.Modes.TRAIN, graph_fn=self.get_dummy_graph_fn(), loss_config=LossConfig(module='log_loss'), optimizer_config=OptimizerConfig(module='adadelta'), model_type=BaseModel.Types.CLASSIFIER, eval_metrics_config=[], summaries=['learning_rate'], name='test') model(x, y, None, None) # Only var are created summaries_names = list( get_tracked(collection=tf.GraphKeys.SUMMARIES_BY_NAMES).keys()) assert len(summaries_names) == 0
def __init__(self, mode, encoder_fn, decoder_fn, bridge_fn, loss_config=None, optimizer_config=None, summaries='all', eval_metrics_config=None, clip_gradients=0.5, clip_embed_gradients=0.1, name="Generator"): optimizer_config = optimizer_config or OptimizerConfig('adadelta', learning_rate=0.4) loss_config = loss_config or LossConfig(module='sigmoid_cross_entropy') self._check_subgraph_fn(function=encoder_fn, function_name='encoder_fn') self._check_subgraph_fn(function=decoder_fn, function_name='decoder_fn') self._check_bridge_fn(function=bridge_fn) self._encode_fn = encoder_fn self._decoder_fn = decoder_fn self._bridge_fn = bridge_fn graph_fn = self._build_graph_fn() super(Generator, self).__init__( mode=mode, name=name, model_type=self.Types.GENERATOR, graph_fn=graph_fn, loss_config=loss_config, optimizer_config=optimizer_config, eval_metrics_config=eval_metrics_config, summaries=summaries, clip_gradients=clip_gradients, clip_embed_gradients=clip_embed_gradients)
def test_build_variables_summaries(self): x = {'x': tf.placeholder(tf.float32, [2, 89])} y = tf.constant([[1], [1]]) model = BaseModel(plx.Modes.TRAIN, graph_fn=self.get_dummy_graph_fn(), loss_config=LossConfig(module='log_loss'), optimizer_config=OptimizerConfig(module='adadelta'), model_type=BaseModel.Types.CLASSIFIER, eval_metrics_config=[], summaries=['variables'], name='test') model(x, y, None, None) # Only var are created variable_names = {var.op.name for var in tf.trainable_variables()} summaries_names = set( get_tracked(collection=tf.GraphKeys.SUMMARIES_BY_NAMES).keys()) assert variable_names == summaries_names
def test_build_all_summaries(self): training.create_global_step() config = ModelConfig(loss_config=LossConfig(name='log_loss'), optimizer_config=OptimizerConfig( name='Adadelta', decay_type='exponential_decay')) x = {'source_ids': tf.placeholder(tf.float32, [2, 89])} y = tf.constant([[1], [1]]) model = BaseModel(plx.ModeKeys.TRAIN, graph_fn=self.get_dummy_graph_fn(), config=config, model_type=BaseModel.Types.CLASSIFIER, summaries='all', name='test', params=None) model(x, y, None, None) # Only var are created learning_rate_summaries = 0 activations_summaries = 0 gradients_summaries = 0 loss_summaries = 0 for s_name in get_tracked( collection=tf.GraphKeys.SUMMARIES_BY_NAMES).keys(): if 'learning_rate' in s_name: learning_rate_summaries += 1 elif 'Activation' in s_name: activations_summaries += 1 elif 'Loss' in s_name: loss_summaries += 1 elif 'Gradient' in s_name: gradients_summaries += 1 assert learning_rate_summaries > 0 assert activations_summaries > 0 assert gradients_summaries > 0 assert loss_summaries > 0
def test_build_gradients_summaries(self): x = {'source_ids': tf.placeholder(tf.float32, [2, 89])} y = tf.constant([[1], [1]]) model = BaseModel(plx.ModeKeys.TRAIN, graph_fn=self.get_dummy_graph_fn(), loss_config=LossConfig(name='log_loss'), optimizer_config=OptimizerConfig(name='Adadelta'), model_type=BaseModel.Types.CLASSIFIER, eval_metrics_config=[], summaries=['gradients'], name='test', params=None) model(x, y, None, None) # Only gradients are created summaries_by_names = get_tracked( collection=tf.GraphKeys.SUMMARIES_BY_NAMES) for s_name in summaries_by_names.keys(): assert 'Gradient' in s_name
def test_build_no_summaries(self): config = ModelConfig(loss_config=LossConfig(name='log_loss'), optimizer_config=OptimizerConfig(name='Adadelta')) x = {'source_ids': tf.placeholder(tf.float32, [2, 89])} y = tf.constant([[1], [1]]) model = BaseModel(plx.ModeKeys.TRAIN, graph_fn=self.get_dummy_graph_fn(), config=config, model_type=BaseModel.Types.CLASSIFIER, summaries=[], name='test', params=None) model(x, y, None, None) # Only activations are created summaries_by_names = get_tracked( collection=tf.GraphKeys.SUMMARIES_BY_NAMES) assert summaries_by_names == {}