Exemple #1
0
    def my_model_fn(mode):
      self.assertEqual(mode, model_fn_lib.ModeKeys.EVAL)

      def stage1(features):
        w1 = variable_scope.get_variable("w1", initializer=1.0)
        partial = w1 * features
        return partial

      def stage2(partial):
        w2 = variable_scope.get_variable("w2", initializer=1.0)
        partial = w2 * partial
        return partial

      def stage3(partial):
        squared = partial * partial
        return partial, squared

      def eval_metrics_fn(partial, squared):
        return {
            "mean": metrics_impl.mean(partial),
            "loss": squared,
        }

      return IPUPipelineEstimatorSpec(
          mode,
          computational_stages=[stage1, stage2, stage3],
          device_mapping=[0, 1, 0],
          eval_metrics_fn=eval_metrics_fn,
          gradient_accumulation_count=gradient_accumulation_count)
Exemple #2
0
    def my_model_fn(mode):
      def stage1(global_step, features, labels):
        w = variable_scope.get_variable(name="w", initializer=initial_w)
        partial = w * features
        return global_step, partial, labels

      def stage2(global_step, partial, labels):
        loss = partial + labels
        return global_step, loss

      def optimizer_function(global_step, loss):
        lr = 0.1 - 0.001 * global_step
        opt = gradient_descent.GradientDescentOptimizer(lr)
        return pipelining_ops.OptimizerFunctionOutput(opt, loss)

      def eval_metrics_fn(global_step, loss):
        return {
            "global_step_observed": metrics_impl.mean(global_step),
            "loss": loss,
        }

      global_step_input = math_ops.cast(training_util.get_global_step(),
                                        dtype=np.float32)

      return IPUPipelineEstimatorSpec(
          mode,
          computational_stages=[stage1, stage2],
          optimizer_function=optimizer_function,
          eval_metrics_fn=eval_metrics_fn,
          inputs=[global_step_input],
          gradient_accumulation_count=gradient_accumulation_count)
Exemple #3
0
    def my_model_fn(mode):
      def stage1(features):
        w1 = variable_scope.get_variable("w1", initializer=1.0)
        partial = w1 * features
        return partial

      def stage2(partial):
        squared = partial * partial

        if arg_type is list:
          return [partial, squared]

        assert arg_type is dict
        # Pass in reverse order just to check that they are passed by name.
        return {"squared": squared, "partial": partial}

      def eval_metrics_fn(partial, squared):
        return {
            "mean": metrics_impl.mean(partial),
            "loss": squared,
        }

      return IPUPipelineEstimatorSpec(
          mode,
          computational_stages=[stage1, stage2],
          eval_metrics_fn=eval_metrics_fn,
          gradient_accumulation_count=gradient_accumulation_count)
Exemple #4
0
    def model_fn_with_zero_stages(mode):
      def optimizer_function():
        pass

      return IPUPipelineEstimatorSpec(mode,
                                      computational_stages=[],
                                      gradient_accumulation_count=1,
                                      optimizer_function=optimizer_function)
Exemple #5
0
    def model_fn_without_optimizer_function(mode):
      def stage1(features, labels):
        return features, labels

      def stage2(partial, labels):
        return partial + labels

      return IPUPipelineEstimatorSpec(mode,
                                      computational_stages=[stage1, stage2],
                                      gradient_accumulation_count=1)
Exemple #6
0
    def my_model_fn(mode):
      def stage1(features):
        w = variable_scope.get_variable("w", initializer=1)
        partial = w * features
        return partial

      def stage2(partial):
        prediction = partial * partial
        return prediction, partial

      return IPUPipelineEstimatorSpec(mode,
                                      computational_stages=[stage1, stage2],
                                      gradient_accumulation_count=4)
Exemple #7
0
    def model_fn_without_loss(mode):
      def stage1(features):
        return features * features

      def stage2(partial):
        return partial + partial

      def eval_metrics_fn(prediction):
        return {"prediction": metrics_impl.mean(prediction)}

      return IPUPipelineEstimatorSpec(mode,
                                      computational_stages=[stage1, stage2],
                                      eval_metrics_fn=eval_metrics_fn,
                                      gradient_accumulation_count=4)
Exemple #8
0
    def my_model_fn(mode):
      self.assertEqual(model_fn_lib.ModeKeys.PREDICT, mode)

      def stage1(features):
        w = variable_scope.get_variable("w", initializer=1)
        partial = w * features
        return partial

      def stage2(partial):
        prediction = partial * partial
        return prediction

      return IPUPipelineEstimatorSpec(
          mode,
          computational_stages=[stage1, stage2],
          gradient_accumulation_count=gradient_accumulation_count)
Exemple #9
0
    def my_model_fn(mode):
      self.assertEqual(model_fn_lib.ModeKeys.TRAIN, mode)

      def stage1(features, labels):
        w = variable_scope.get_variable(name="w", initializer=initial_w)
        partial = w * features
        return partial, labels

      def stage2(partial, labels):
        loss = partial + labels
        return loss

      def optimizer_function(loss):
        opt = gradient_descent.GradientDescentOptimizer(learning_rate)
        return pipelining_ops.OptimizerFunctionOutput(opt, loss)

      return IPUPipelineEstimatorSpec(
          mode,
          computational_stages=[stage1, stage2],
          optimizer_function=optimizer_function,
          gradient_accumulation_count=gradient_accumulation_count)