コード例 #1
0
  def __init__(self,
               num_features,
               input_window_size,
               output_window_size,
               hidden_layer_sizes=None):
    """Construct the flat prediction model.

    Args:
      num_features: number of input features per time step.
      input_window_size: Number of past time steps of data to look at when doing
        the regression.
      output_window_size: Number of future time steps to predict. Note that
        setting it to > 1 empirically seems to give a better fit.
      hidden_layer_sizes: list of sizes of hidden layers.
    """
    super(FlatPredictionModel, self).__init__()
    self._input_flatten = core.Flatten()
    self._output_flatten = core.Flatten()
    if hidden_layer_sizes:
      self._hidden_layers = sequential.Sequential([
          core.Dense(layer_size, activation=nn_ops.relu)
          for layer_size in hidden_layer_sizes])
    else:
      self._hidden_layers = None
    self._mean_transform = core.Dense(num_features * output_window_size,
                                      name="predicted_mean")
    self._covariance_transform = core.Dense(num_features * output_window_size,
                                            name="log_sigma_square")
    self._prediction_shape = [-1, output_window_size, num_features]
コード例 #2
0
    def benchmark_layers_core_flatten_overhead(self):

        layer = core.Flatten()
        x = ops.convert_to_tensor([[[1.]]])

        def fn():
            layer(x)

        self._run(fn, 10000)
    def build_layer_fn(x, w_initializer, b_initializer):
      x = keras_core.Flatten()(x)
      layer = keras_core.Dense(
          units=3,
          kernel_initializer=w_initializer,
          bias_initializer=b_initializer)
      net = layer.apply(x)
      expected_normalized_vars = {'keras.layers.Dense.kernel': layer.kernel}
      expected_not_normalized_vars = {'keras.layers.Dense.bias': layer.bias}

      return net, expected_normalized_vars, expected_not_normalized_vars
コード例 #4
0
def mnist_model(input_shape):
  """Creates a MNIST model."""
  model = sequential_model_lib.Sequential()

  # Adding custom pass-through layer to visualize input images.
  model.add(LayerForImageSummary())

  model.add(
      conv_layer_lib.Conv2D(
          32, kernel_size=(3, 3), activation='relu', input_shape=input_shape))
  model.add(conv_layer_lib.Conv2D(64, (3, 3), activation='relu'))
  model.add(pool_layer_lib.MaxPooling2D(pool_size=(2, 2)))
  model.add(layer_lib.Dropout(0.25))
  model.add(layer_lib.Flatten())
  model.add(layer_lib.Dense(128, activation='relu'))
  model.add(layer_lib.Dropout(0.5))
  model.add(layer_lib.Dense(NUM_CLASSES, activation='softmax'))

  # Adding custom pass-through layer for summary recording.
  model.add(LayerForHistogramSummary())
  return model
コード例 #5
0
 def testIgnoreSaveCounter(self):
   checkpoint_directory = self.get_temp_dir()
   checkpoint_prefix = os.path.join(checkpoint_directory, "ckpt")
   with self.cached_session() as session:
     # Create and save a model using Saver() before using a Checkpoint. This
     # generates a snapshot without the Checkpoint's `save_counter`.
     model = sequential.Sequential()
     model.add(core.Flatten(input_shape=(1,)))
     model.add(core.Dense(1))
     name_saver = saver_lib.Saver(model.trainable_variables)
     save_path = name_saver.save(
         sess=session, save_path=checkpoint_prefix, global_step=1)
     # Checkpoint.restore must successfully load that checkpoint.
     ckpt = trackable_utils.Checkpoint(model=model)
     status = ckpt.restore(save_path)
     status.assert_existing_objects_matched()
     # It should, however, refuse to load a checkpoint where an unrelated
     # `save_counter` variable is missing.
     model.layers[1].var = variables_lib.Variable(0., name="save_counter")
     status = ckpt.restore(save_path)
     with self.assertRaises(AssertionError):
       status.assert_existing_objects_matched()
コード例 #6
0
    def testMobiletNetV2Fit(self, tensor_debug_mode):
        """Test training Keras MobileNetV2 works with dumping."""
        # Use a large circular-buffer to make sure we capture all the executed ops.
        writer = dumping_callback.enable_dump_debug_info(
            self.dump_root,
            tensor_debug_mode=tensor_debug_mode,
            circular_buffer_size=100000)
        model = mobilenet_v2.MobileNetV2(input_shape=(32, 32, 3),
                                         alpha=0.1,
                                         weights=None)
        y = model.layers[22].output
        y = core.Flatten()(y)
        y = core.Dense(1)(y)
        model = models.Model(inputs=model.inputs, outputs=y)

        batch_size = 2
        xs = np.zeros([batch_size] + list(model.input_shape[1:]))
        ys = np.zeros([batch_size] + list(model.output_shape[1:]))
        model.compile(optimizer="sgd", loss="mse")
        epochs = 1
        history = model.fit(xs, ys, epochs=epochs, verbose=0)
        self.assertLen(history.history["loss"], epochs)

        writer.FlushNonExecutionFiles()
        writer.FlushExecutionFiles()

        stack_frame_by_id = self._readAndCheckSourceFilesAndStackFrames()
        (context_ids, op_types, op_name_to_op_type,
         _) = self._readAndCheckGraphsFile(stack_frame_by_id)
        # Simply assert that graph are recorded and refrain from asserting on the
        # internal details of the Keras model.
        self.assertTrue(context_ids)
        self.assertTrue(op_types)
        self.assertTrue(op_name_to_op_type)

        if context.executing_eagerly():
            # NOTE(b/142486213): Execution of the TF function happens with
            # Session.run() in v1 graph mode, hence it doesn't get logged to the
            # .execution file.
            executed_op_types, _, _, _, _ = self._readAndCheckExecutionFile()
            self.assertTrue(executed_op_types)

        (op_names, _, _, tensor_values
         ) = self._readAndCheckGraphExecutionTracesFile(context_ids)
        executed_op_types = [
            op_name_to_op_type[op_name] for op_name in op_names
        ]
        # These are the ops that we can safely assume to have been executed during
        # the model's fit() call.
        self.assertIn("Conv2D", executed_op_types)
        self.assertIn("Relu6", executed_op_types)
        self.assertIn("Conv2DBackpropFilter", executed_op_types)
        self.assertIn("Relu6Grad", executed_op_types)
        if tensor_debug_mode == "NO_TENSOR":
            # Under the default NO_TENSOR tensor-debug mode, the tensor_proto ought to
            # be an empty float32 tensor.
            for tensor_value in tensor_values:
                self.assertEqual(tensor_value.dtype, np.float32)
                self.assertEqual(tensor_value.shape, (0, ))
        elif tensor_debug_mode == "FULL_TENSOR":
            conv2d_values = [
                tensor_values[i] for i, op_type in enumerate(executed_op_types)
                if op_type == "Conv2D"
            ]
            self.assertTrue(conv2d_values)
            for conv2d_value in conv2d_values:
                self.assertGreater(len(conv2d_value.shape), 1)
                self.assertEqual(conv2d_value.shape[0], batch_size)
            relu6_values = [
                tensor_values[i] for i, op_type in enumerate(executed_op_types)
                if op_type == "Relu6"
            ]
            self.assertTrue(relu6_values)
            for relu6_value in relu6_values:
                self.assertGreater(len(relu6_value.shape), 1)
                self.assertEqual(relu6_value.shape[0], batch_size)
            conv2d_bp_filter_values = [
                tensor_values[i] for i, op_type in enumerate(executed_op_types)
                if op_type == "Conv2DBackpropFilter"
            ]
            self.assertTrue(conv2d_bp_filter_values)
            for conv2d_bp_filter_value in conv2d_bp_filter_values:
                self.assertGreater(len(conv2d_bp_filter_value.shape), 1)
            relu6_grad_values = [
                tensor_values[i] for i, op_type in enumerate(executed_op_types)
                if op_type == "Relu6Grad"
            ]
            self.assertTrue(relu6_grad_values)
            for relu6_grad_value in relu6_grad_values:
                self.assertGreater(len(relu6_grad_value.shape), 1)