def _dynamic_rnn_model_fn(features, labels, mode): """The model to be passed to an `Estimator`.""" with ops.name_scope(name): sequence_length = features.get(sequence_length_key) sequence_input = build_sequence_input(features, sequence_feature_columns, context_feature_columns) dropout = (dropout_keep_probabilities if mode == model_fn.ModeKeys.TRAIN else None) # This class promises to use the cell type selected by that function. cell = rnn_common.construct_rnn_cell(num_units, cell_type, dropout) initial_state = dict_to_state_tuple(features, cell) rnn_activations, final_state = construct_rnn( initial_state, sequence_input, cell, target_column.num_label_columns, dtype=dtype, parallel_iterations=parallel_iterations, swap_memory=swap_memory) loss = None # Created below for modes TRAIN and EVAL. if prediction_type == rnn_common.PredictionType.MULTIPLE_VALUE: prediction_dict = rnn_common.multi_value_predictions( rnn_activations, target_column, problem_type, predict_probabilities) if mode != model_fn.ModeKeys.INFER: loss = _multi_value_loss( rnn_activations, labels, sequence_length, target_column, features) elif prediction_type == rnn_common.PredictionType.SINGLE_VALUE: prediction_dict = _single_value_predictions( rnn_activations, sequence_length, target_column, problem_type, predict_probabilities) if mode != model_fn.ModeKeys.INFER: loss = _single_value_loss( rnn_activations, labels, sequence_length, target_column, features) state_dict = state_tuple_to_dict(final_state) prediction_dict.update(state_dict) eval_metric_ops = None if mode != model_fn.ModeKeys.INFER: eval_metric_ops = rnn_common.get_eval_metric_ops( problem_type, prediction_type, sequence_length, prediction_dict, labels) train_op = None if mode == model_fn.ModeKeys.TRAIN: train_op = optimizers.optimize_loss( loss=loss, global_step=None, # Get it internally. learning_rate=learning_rate, optimizer=optimizer, clip_gradients=gradient_clipping_norm, summaries=optimizers.OPTIMIZER_SUMMARIES) output_alternatives = _get_output_alternatives(prediction_type, problem_type, prediction_dict) return model_fn.ModelFnOps(mode=mode, predictions=prediction_dict, loss=loss, train_op=train_op, eval_metric_ops=eval_metric_ops, output_alternatives=output_alternatives)
def _rnn_model_fn(features, labels, mode): """The model to be passed to an `Estimator`.""" with ops.name_scope(name): dropout = (dropout_keep_probabilities if mode == model_fn.ModeKeys.TRAIN else None) cell = lstm_cell(num_units, num_rnn_layers, dropout) batch = _read_batch( cell=cell, features=features, labels=labels, mode=mode, num_unroll=num_unroll, num_rnn_layers=num_rnn_layers, batch_size=batch_size, sequence_feature_columns=sequence_feature_columns, context_feature_columns=context_feature_columns, num_threads=num_threads, queue_capacity=queue_capacity, seed=seed) sequence_features = batch.sequences context_features = batch.context if mode != model_fn.ModeKeys.INFER: labels = sequence_features.pop(rnn_common.RNNKeys.LABELS_KEY) inputs = _prepare_inputs_for_rnn(sequence_features, context_features, sequence_feature_columns, num_unroll) state_name = _get_lstm_state_names(num_rnn_layers) rnn_activations, final_state = construct_state_saving_rnn( cell=cell, inputs=inputs, num_label_columns=target_column.num_label_columns, state_saver=batch, state_name=state_name) loss = None # Created below for modes TRAIN and EVAL. prediction_dict = rnn_common.multi_value_predictions( rnn_activations, target_column, problem_type, predict_probabilities) if mode != model_fn.ModeKeys.INFER: loss = _multi_value_loss(rnn_activations, labels, batch.length, target_column, features) eval_metric_ops = None if mode != model_fn.ModeKeys.INFER: default_metrics = _get_default_metrics(problem_type, batch.length) eval_metric_ops = estimator._make_metrics_ops( # pylint: disable=protected-access default_metrics, features, labels, prediction_dict) state_dict = state_tuple_to_dict(final_state) prediction_dict.update(state_dict) train_op = None if mode == model_fn.ModeKeys.TRAIN: train_op = optimizers.optimize_loss( loss=loss, global_step=None, # Get it internally. learning_rate=learning_rate, optimizer=optimizer, clip_gradients=gradient_clipping_norm, summaries=optimizers.OPTIMIZER_SUMMARIES) return model_fn.ModelFnOps(mode=mode, predictions=prediction_dict, loss=loss, train_op=train_op, eval_metric_ops=eval_metric_ops)
def _rnn_model_fn(features, labels, mode): """The model to be passed to an `Estimator`.""" with ops.name_scope(name): dropout = (dropout_keep_probabilities if mode == model_fn.ModeKeys.TRAIN else None) cell = rnn_common.construct_rnn_cell(num_units, cell_type, dropout) batch = _read_batch( cell=cell, features=features, labels=labels, mode=mode, num_unroll=num_unroll, batch_size=batch_size, sequence_feature_columns=sequence_feature_columns, context_feature_columns=context_feature_columns, num_threads=num_threads, queue_capacity=queue_capacity, seed=seed) sequence_features = batch.sequences context_features = batch.context if mode != model_fn.ModeKeys.INFER: labels = sequence_features.pop(rnn_common.RNNKeys.LABELS_KEY) inputs = _prepare_inputs_for_rnn(sequence_features, context_features, sequence_feature_columns, num_unroll) state_name = _get_state_names(cell) rnn_activations, final_state = construct_state_saving_rnn( cell=cell, inputs=inputs, num_label_columns=target_column.num_label_columns, state_saver=batch, state_name=state_name) loss = None # Created below for modes TRAIN and EVAL. prediction_dict = rnn_common.multi_value_predictions( rnn_activations, target_column, problem_type, predict_probabilities) if mode != model_fn.ModeKeys.INFER: loss = _multi_value_loss(rnn_activations, labels, batch.length, target_column, features) eval_metric_ops = None if mode != model_fn.ModeKeys.INFER: eval_metric_ops = rnn_common.get_eval_metric_ops( problem_type, rnn_common.PredictionType.MULTIPLE_VALUE, batch.length, prediction_dict, labels) state_dict = state_tuple_to_dict(final_state) prediction_dict.update(state_dict) train_op = None if mode == model_fn.ModeKeys.TRAIN: train_op = optimizers.optimize_loss( loss=loss, global_step=None, # Get it internally. learning_rate=learning_rate, optimizer=optimizer, clip_gradients=gradient_clipping_norm, summaries=optimizers.OPTIMIZER_SUMMARIES) return model_fn.ModelFnOps(mode=mode, predictions=prediction_dict, loss=loss, train_op=train_op, eval_metric_ops=eval_metric_ops)