示例#1
0
  def test_get_losses(self):

    class MyLayer(keras.layers.Layer):

      def build(self, input_shape):
        self.a = self.add_variable('a',
                                   (1, 1),
                                   'float32',
                                   trainable=False)
        self.b = self.add_variable('b',
                                   (1, 1),
                                   'float32',
                                   trainable=False)
        self.add_loss(math_ops.reduce_sum(self.a))
        self.built = True

      def call(self, inputs):
        self.add_loss(math_ops.reduce_sum(inputs),
                      inputs=True)
        return inputs + 1

    x1 = input_layer_lib.Input(shape=(1,))
    layer = MyLayer()
    _ = layer.apply(x1)

    self.assertEqual(len(layer.losses), 2)
    self.assertEqual(len(layer.get_losses_for(x1)), 1)
    self.assertEqual(len(layer.get_losses_for(None)), 1)

    x2 = input_layer_lib.Input(shape=(1,))
    y2 = layer.apply(x2)

    self.assertEqual(len(layer.losses), 3)
    self.assertEqual(len(layer.get_losses_for(x1)), 1)
    self.assertEqual(len(layer.get_losses_for(x2)), 1)
    self.assertEqual(len(layer.get_losses_for(None)), 1)

    network = network_lib.Network(x2, y2)
    self.assertEqual(len(network.losses), 2)
    self.assertEqual(len(network.get_losses_for(x1)), 0)
    self.assertEqual(len(network.get_losses_for(x2)), 1)
    self.assertEqual(len(network.get_losses_for(None)), 1)

    x3 = input_layer_lib.Input(shape=(1,))
    _ = layer.apply(x3)
    self.assertEqual(len(network.losses), 2)

    x4 = input_layer_lib.Input(shape=(1,))
    _ = network(x4)
    self.assertEqual(len(network.losses), 3)
    self.assertEqual(len(network.get_losses_for(x2)), 1)
    self.assertEqual(len(network.get_losses_for(x4)), 1)
    self.assertEqual(len(network.get_losses_for(None)), 1)

    network.add_loss(math_ops.reduce_sum(layer.a))
    self.assertEqual(len(network.losses), 4)
    self.assertEqual(len(network.get_losses_for(None)), 2)

    network.add_loss(math_ops.reduce_sum(x4), inputs=True)
    self.assertEqual(len(network.losses), 5)
    self.assertEqual(len(network.get_losses_for(x4)), 2)
示例#2
0
  def test_get_updates(self):

    class MyLayer(keras.layers.Layer):

      def build(self, input_shape):
        self.a = self.add_variable('a',
                                   (1, 1),
                                   'float32',
                                   trainable=False)
        self.b = self.add_variable('b',
                                   (1, 1),
                                   'float32',
                                   trainable=False)
        self.add_update(state_ops.assign_add(self.a, [[1.]],
                                             name='unconditional_update'))
        self.built = True

      def call(self, inputs):
        self.add_update(state_ops.assign_add(self.b, inputs,
                                             name='conditional_update'),
                        inputs=True)
        return inputs + 1

    x1 = input_layer_lib.Input(shape=(1,))
    layer = MyLayer()
    _ = layer.apply(x1)

    self.assertEqual(len(layer.updates), 2)
    self.assertEqual(len(layer.get_updates_for(x1)), 1)
    self.assertEqual(len(layer.get_updates_for(None)), 1)

    x2 = input_layer_lib.Input(shape=(1,))
    y2 = layer.apply(x2)

    self.assertEqual(len(layer.updates), 3)
    self.assertEqual(len(layer.get_updates_for(x1)), 1)
    self.assertEqual(len(layer.get_updates_for(x2)), 1)
    self.assertEqual(len(layer.get_updates_for(None)), 1)

    network = network_lib.Network(x2, y2)
    self.assertEqual(len(network.updates), 2)
    self.assertEqual(len(network.get_updates_for(x1)), 0)
    self.assertEqual(len(network.get_updates_for(x2)), 1)
    self.assertEqual(len(network.get_updates_for(None)), 1)

    x3 = input_layer_lib.Input(shape=(1,))
    _ = layer.apply(x3)
    self.assertEqual(len(network.updates), 2)

    x4 = input_layer_lib.Input(shape=(1,))
    _ = network(x4)
    self.assertEqual(len(network.updates), 3)
    self.assertEqual(len(network.get_updates_for(x2)), 1)
    self.assertEqual(len(network.get_updates_for(x4)), 1)
    self.assertEqual(len(network.get_updates_for(None)), 1)

    network.add_update(state_ops.assign_add(layer.a, [[1]]))
    self.assertEqual(len(network.updates), 4)
    self.assertEqual(len(network.get_updates_for(None)), 2)

    network.add_update(state_ops.assign_add(layer.b, x4), inputs=True)
    self.assertEqual(len(network.updates), 5)
    self.assertEqual(len(network.get_updates_for(x4)), 2)
示例#3
0
    def add(self, layer):
        """Adds a layer instance on top of the layer stack.

    Arguments:
        layer: layer instance.

    Raises:
        TypeError: If `layer` is not a layer instance.
        ValueError: In case the `layer` argument does not
            know its input shape.
        ValueError: In case the `layer` argument has
            multiple output tensors, or is already connected
            somewhere else (forbidden in `Sequential` models).
    """
        # If we are passed a Keras tensor created by keras.Input(), we can extract
        # the input layer from its keras history and use that without any loss of
        # generality.
        if hasattr(layer, '_keras_history'):
            origin_layer = layer._keras_history[0]
            if isinstance(origin_layer, input_layer.InputLayer):
                layer = origin_layer

        if not isinstance(layer, base_layer.Layer):
            raise TypeError('The added layer must be '
                            'an instance of class Layer. '
                            'Found: ' + str(layer))

        tf_utils.assert_no_legacy_layers([layer])
        if not self._is_layer_name_unique(layer):
            raise ValueError(
                'All layers added to a Sequential model '
                'should have unique names. Name "%s" is already the name'
                ' of a layer in this model. Update the `name` argument '
                'to pass a unique name.' % (layer.name, ))

        self.built = False
        set_inputs = False
        if not self._layers:
            if isinstance(layer, input_layer.InputLayer):
                # Case where the user passes an Input or InputLayer layer via `add`.
                set_inputs = True
            else:
                batch_shape, dtype = training_utils.get_input_shape_and_dtype(
                    layer)
                if batch_shape:
                    # Instantiate an input layer.
                    x = input_layer.Input(batch_shape=batch_shape,
                                          dtype=dtype,
                                          name=layer.name + '_input')
                    # This will build the current layer
                    # and create the node connecting the current layer
                    # to the input layer we just created.
                    layer(x)
                    set_inputs = True

            if set_inputs:
                outputs = nest.flatten(layer._inbound_nodes[-1].outputs)
                if len(outputs) != 1:
                    raise ValueError(SINGLE_LAYER_OUTPUT_ERROR_MSG)
                self.outputs = outputs
                self.inputs = layer_utils.get_source_inputs(self.outputs[0])
                self.built = True
                self._has_explicit_input_shape = True

        elif self.outputs:
            # If the model is being built continuously on top of an input layer:
            # refresh its output.
            output_tensor = layer(self.outputs[0])
            if len(nest.flatten(output_tensor)) != 1:
                raise ValueError(SINGLE_LAYER_OUTPUT_ERROR_MSG)
            self.outputs = [output_tensor]
            self.built = True

        if set_inputs or self._graph_initialized:
            self._init_graph_network(self.inputs, self.outputs)
            self._graph_initialized = True
        else:
            self._layers.append(layer)
            self._handle_deferred_layer_dependencies([layer])

        self._layer_call_argspecs[layer] = tf_inspect.getfullargspec(
            layer.call)
示例#4
0
 def _build_graph_network_for_inferred_shape(self,
                                             input_shape,
                                             input_dtype=None):
     if input_shape is None or not self.layers:
         return
     if not tf2.enabled() or not ops.executing_eagerly_outside_functions():
         # This behavior is disabled in V1 or when eager execution is disabled.
         return
     if (not self._has_explicit_input_shape
             and not self._use_legacy_deferred_behavior):
         # Determine whether the input shape is novel, i.e. whether the model
         # should be rebuilt.
         input_shape = tuple(input_shape)
         if self._inferred_input_shape is None:
             new_shape = input_shape
         else:
             new_shape = relax_input_shape(self._inferred_input_shape,
                                           input_shape)
         if (new_shape is not None
                 and new_shape != self._inferred_input_shape):
             # A novel shape has been received: we need to rebuild the model.
             # In case we are inside a graph function, we step out of it.
             with ops.init_scope():
                 inputs = input_layer.Input(batch_shape=new_shape,
                                            dtype=input_dtype,
                                            name=self.layers[0].name +
                                            '_input')
                 layer_input = inputs
                 created_nodes = set()
                 for layer in self.layers:
                     # Clear nodes previously created via this method. This prevents
                     # node accumulation and ensures that e.g. `layer.output` is
                     # always connected to `model.inputs`
                     # (this is important e.g. for the feature extraction use case).
                     # We don't just do `layer._inbound_nodes = []` in order
                     # not to break shared layers added to Sequential models (which is
                     # technically illegal as per the `add()` docstring,
                     # but wasn't previously disabled).
                     clear_previously_created_nodes(layer,
                                                    self._created_nodes)
                     try:
                         # Create Functional API connection by calling the current layer
                         layer_output = layer(layer_input)
                     except:  # pylint:disable=bare-except
                         # Functional API calls may fail for a number of reasons:
                         # 1) The layer may be buggy. In this case it will be easier for
                         # the user to debug if we fail on the first call on concrete data,
                         # instead of our own call on a symbolic input.
                         # 2) The layer is dynamic (graph-incompatible) and hasn't
                         # overridden `compute_output_shape`. In this case, it is
                         # impossible to build a graph network.
                         # 3) The layer is otherwise incompatible with the Functional API
                         # (e.g. this is the case for some probabilistic layers that rely
                         # on hacks and that do not return tensors).
                         # In all these cases, we should avoid creating a graph network
                         # (or we simply can't).
                         self._use_legacy_deferred_behavior = True
                         return
                     if len(nest.flatten(layer_output)) != 1:
                         raise ValueError(SINGLE_LAYER_OUTPUT_ERROR_MSG)
                     # Keep track of nodes just created above
                     track_nodes_created_by_last_call(layer, created_nodes)
                     layer_input = layer_output
                     outputs = layer_output
                 self._created_nodes = created_nodes
                 try:
                     # Initialize a graph Network. This call will never fail for
                     # a stack of valid Keras layers.
                     # However some users have layers that are fundamentally incompatible
                     # with the Functional API, which do not return tensors. In this
                     # case, we fall back to the legacy deferred behavior.
                     # TODO(fchollet): consider raising here, as we should not be
                     # supporting such layers.
                     self._init_graph_network(inputs, outputs)
                     self._graph_initialized = True
                 except:  # pylint:disable=bare-except
                     self._use_legacy_deferred_behavior = True
             self._inferred_input_shape = new_shape
示例#5
0
    def test_model_with_crossentropy_losses_channels_first(self):
        """Tests use of all crossentropy losses with `channels_first`.

    Tests `sparse_categorical_crossentropy`, `categorical_crossentropy`,
    and `binary_crossentropy`.
    Verifies that evaluate gives the same result with either `channels_first`
    or `channels_last` image_data_format.
    """
        def prepare_simple_model(input_tensor, loss_name, target):
            axis = 1 if K.image_data_format() == 'channels_first' else -1
            loss = None
            num_channels = None
            activation = None
            if loss_name == 'sparse_categorical_crossentropy':
                loss = lambda y_true, y_pred: K.sparse_categorical_crossentropy(  # pylint: disable=g-long-lambda
                    y_true,
                    y_pred,
                    axis=axis)
                num_channels = int(np.amax(target) + 1)
                activation = 'softmax'
            elif loss_name == 'categorical_crossentropy':
                loss = lambda y_true, y_pred: K.categorical_crossentropy(  # pylint: disable=g-long-lambda
                    y_true,
                    y_pred,
                    axis=axis)
                num_channels = target.shape[axis]
                activation = 'softmax'
            elif loss_name == 'binary_crossentropy':
                loss = lambda y_true, y_pred: K.binary_crossentropy(
                    y_true, y_pred)  # pylint: disable=unnecessary-lambda
                num_channels = target.shape[axis]
                activation = 'sigmoid'

            predictions = Conv2D(num_channels,
                                 1,
                                 activation=activation,
                                 kernel_initializer='ones',
                                 bias_initializer='ones')(input_tensor)
            simple_model = training.Model(inputs=input_tensor,
                                          outputs=predictions)
            simple_model.compile(optimizer='rmsprop', loss=loss)
            return simple_model

        if test.is_gpu_available(cuda_only=True):
            with testing_utils.use_gpu():
                losses_to_test = [
                    'sparse_categorical_crossentropy',
                    'categorical_crossentropy', 'binary_crossentropy'
                ]

                data_channels_first = np.array(
                    [[[[8., 7.1, 0.], [4.5, 2.6, 0.55], [0.9, 4.2, 11.2]]]],
                    dtype=np.float32)
                # Labels for testing 4-class sparse_categorical_crossentropy, 4-class
                # categorical_crossentropy, and 2-class binary_crossentropy:
                labels_channels_first = [np.array([[[[0, 1, 3], [2, 1, 0], [2, 2, 1]]]], dtype=np.float32),  # pylint: disable=line-too-long
                                         np.array([[[[0, 1, 0], [0, 1, 0], [0, 0, 0]],
                                                    [[1, 0, 0], [0, 0, 1], [0, 1, 0]],
                                                    [[0, 0, 0], [1, 0, 0], [0, 0, 1]],
                                                    [[0, 0, 1], [0, 0, 0], [1, 0, 0]]]], dtype=np.float32),  # pylint: disable=line-too-long
                                         np.array([[[[0, 1, 0], [0, 1, 0], [0, 0, 1]],
                                                    [[1, 0, 1], [1, 0, 1], [1, 1, 0]]]], dtype=np.float32)]  # pylint: disable=line-too-long
                # Compute one loss for each loss function in the list `losses_to_test`:
                loss_channels_last = [0., 0., 0.]
                loss_channels_first = [0., 0., 0.]

                old_data_format = K.image_data_format()

                # Evaluate a simple network with channels last, with all three loss
                # functions:
                K.set_image_data_format('channels_last')
                data = np.moveaxis(data_channels_first, 1, -1)
                for index, loss_function in enumerate(losses_to_test):
                    labels = np.moveaxis(labels_channels_first[index], 1, -1)
                    inputs = input_layer.Input(shape=(3, 3, 1))
                    model = prepare_simple_model(inputs, loss_function, labels)
                    loss_channels_last[index] = model.evaluate(x=data,
                                                               y=labels,
                                                               batch_size=1,
                                                               verbose=0)

                # Evaluate the same network with channels first, with all three loss
                # functions:
                K.set_image_data_format('channels_first')
                data = data_channels_first
                for index, loss_function in enumerate(losses_to_test):
                    labels = labels_channels_first[index]
                    inputs = input_layer.Input(shape=(1, 3, 3))
                    model = prepare_simple_model(inputs, loss_function, labels)
                    loss_channels_first[index] = model.evaluate(x=data,
                                                                y=labels,
                                                                batch_size=1,
                                                                verbose=0)

                K.set_image_data_format(old_data_format)

                np.testing.assert_allclose(
                    loss_channels_first,
                    loss_channels_last,
                    rtol=1e-06,
                    err_msg='{}{}'.format('Computed different losses for ',
                                          'channels_first and channels_last'))
示例#6
0
    def add(self, layer):
        """Adds a layer instance on top of the layer stack.

    Arguments:
        layer: layer instance.

    Raises:
        TypeError: If `layer` is not a layer instance.
        ValueError: In case the `layer` argument does not
            know its input shape.
        ValueError: In case the `layer` argument has
            multiple output tensors, or is already connected
            somewhere else (forbidden in `Sequential` models).
    """
        # If we are passed a Keras tensor created by keras.Input(), we can extract
        # the input layer from its keras history and use that without any loss of
        # generality.
        if hasattr(layer, '_keras_history'):
            origin_layer = layer._keras_history[0]
            if isinstance(origin_layer, input_layer.InputLayer):
                layer = origin_layer

        if not isinstance(
                layer, (base_layer.Layer, legacy_base_layer.LegacyBaseLayer)):
            raise TypeError('The added layer must be '
                            'an instance of class Layer. '
                            'Found: ' + str(layer))

        tf_utils.assert_no_legacy_layers([layer])

        # This allows the added layer to broadcast mutations to the current
        # layer, which is necessary to ensure cache correctness.
        layer._attribute_sentinel.add_parent(self._attribute_sentinel)

        self.built = False
        set_inputs = False
        if not self._layers:
            if isinstance(layer, input_layer.InputLayer):
                # Corner case where the user passes an InputLayer layer via `add`.
                assert len(
                    nest.flatten(layer._inbound_nodes[-1].output_tensors)) == 1
                set_inputs = True
            else:
                batch_shape, dtype = training_utils.get_input_shape_and_dtype(
                    layer)
                if batch_shape:
                    # Instantiate an input layer.
                    x = input_layer.Input(batch_shape=batch_shape,
                                          dtype=dtype,
                                          name=layer.name + '_input')
                    # This will build the current layer
                    # and create the node connecting the current layer
                    # to the input layer we just created.
                    layer(x)
                    set_inputs = True

            if set_inputs:
                # If an input layer (placeholder) is available.
                if len(nest.flatten(
                        layer._inbound_nodes[-1].output_tensors)) != 1:
                    raise ValueError(SINGLE_LAYER_OUTPUT_ERROR_MSG)
                self.outputs = [
                    nest.flatten(layer._inbound_nodes[-1].output_tensors)[0]
                ]
                self.inputs = layer_utils.get_source_inputs(self.outputs[0])

        elif self.outputs:
            # If the model is being built continuously on top of an input layer:
            # refresh its output.
            output_tensor = layer(self.outputs[0])
            if len(nest.flatten(output_tensor)) != 1:
                raise ValueError(SINGLE_LAYER_OUTPUT_ERROR_MSG)
            self.outputs = [output_tensor]

        if self.outputs:
            # True if set_inputs or self._is_graph_network or if adding a layer
            # to an already built deferred seq model.
            self.built = True

        if set_inputs or self._is_graph_network:
            self._init_graph_network(self.inputs, self.outputs, name=self.name)
        else:
            self._layers.append(layer)
            self._handle_deferred_layer_dependencies([layer])

        self._layer_call_argspecs[layer] = tf_inspect.getfullargspec(
            layer.call)
        # Different Model types add to `._layers` in different ways, so for safety
        # we do a cache invalidation to make sure the changes are reflected.
        self._attribute_sentinel.invalidate_all()
示例#7
0
    def add(self, layer):
        """Adds a layer instance on top of the layer stack.

    Arguments:
        layer: layer instance.

    Raises:
        TypeError: If `layer` is not a layer instance.
        ValueError: In case the `layer` argument does not
            know its input shape.
        ValueError: In case the `layer` argument has
            multiple output tensors, or is already connected
            somewhere else (forbidden in `Sequential` models).
    """
        # If we are passed a Keras tensor created by keras.Input(), we can extract
        # the input layer from its keras history and use that without any loss of
        # generality.
        if hasattr(layer, '_keras_history'):
            origin_layer = layer._keras_history[0]
            if isinstance(origin_layer, input_layer.InputLayer):
                layer = origin_layer

        if not isinstance(layer, base_layer.Layer):
            raise TypeError('The added layer must be '
                            'an instance of class Layer. '
                            'Found: ' + str(layer))

        tf_utils.assert_no_legacy_layers([layer])

        self.built = False
        set_inputs = False
        if not self._layers:
            if isinstance(layer, input_layer.InputLayer):
                # Corner case where the user passes an InputLayer layer via `add`.
                assert len(
                    nest.flatten(layer._inbound_nodes[-1].output_tensors)) == 1
                set_inputs = True
            else:
                batch_shape, dtype = training_utils.get_input_shape_and_dtype(
                    layer)
                if batch_shape:
                    # Instantiate an input layer.
                    x = input_layer.Input(batch_shape=batch_shape,
                                          dtype=dtype,
                                          name=layer.name + '_input')
                    # This will build the current layer
                    # and create the node connecting the current layer
                    # to the input layer we just created.
                    layer(x)
                    set_inputs = True

            if set_inputs:
                # If an input layer (placeholder) is available.
                if len(nest.flatten(
                        layer._inbound_nodes[-1].output_tensors)) != 1:
                    raise ValueError('All layers in a Sequential model '
                                     'should have a single output tensor. '
                                     'For multi-output layers, '
                                     'use the functional API.')
                self.outputs = [
                    nest.flatten(layer._inbound_nodes[-1].output_tensors)[0]
                ]
                self.inputs = layer_utils.get_source_inputs(self.outputs[0])

        elif self.outputs:
            # If the model is being built continuously on top of an input layer:
            # refresh its output.
            output_tensor = layer(self.outputs[0])
            if len(nest.flatten(output_tensor)) != 1:
                raise TypeError('All layers in a Sequential model '
                                'should have a single output tensor. '
                                'For multi-output layers, '
                                'use the functional API.')
            self.outputs = [output_tensor]

        if self.outputs:
            # True if set_inputs or self._is_graph_network or if adding a layer
            # to an already built deferred seq model.
            self.built = True

        if set_inputs or self._is_graph_network:
            self._init_graph_network(self.inputs, self.outputs, name=self.name)
        else:
            self._layers.append(layer)
        if self._layers:
            self._track_layers(self._layers)

        self._layer_call_argspecs[layer] = tf_inspect.getfullargspec(
            layer.call)
示例#8
0
    def test_TensorBoard_multi_input_output(self):
        np.random.seed(1337)
        tmpdir = self.get_temp_dir()
        self.addCleanup(shutil.rmtree, tmpdir, ignore_errors=True)

        with ops.Graph().as_default(), self.cached_session():
            filepath = os.path.join(tmpdir, 'logs')

            (x_train, y_train), (x_test, y_test) = testing_utils.get_test_data(
                train_samples=TRAIN_SAMPLES,
                test_samples=TEST_SAMPLES,
                input_shape=(INPUT_DIM, ),
                num_classes=NUM_CLASSES)
            y_test = np_utils.to_categorical(y_test)
            y_train = np_utils.to_categorical(y_train)

            def data_generator(train):
                if train:
                    max_batch_index = len(x_train) // BATCH_SIZE
                else:
                    max_batch_index = len(x_test) // BATCH_SIZE
                i = 0
                while 1:
                    if train:
                        # simulate multi-input/output models
                        yield ([x_train[i * BATCH_SIZE:(i + 1) * BATCH_SIZE]] *
                               2,
                               [y_train[i * BATCH_SIZE:(i + 1) * BATCH_SIZE]] *
                               2)
                    else:
                        yield ([x_test[i * BATCH_SIZE:(i + 1) * BATCH_SIZE]] *
                               2,
                               [y_test[i * BATCH_SIZE:(i + 1) * BATCH_SIZE]] *
                               2)
                    i += 1
                    i %= max_batch_index

            inp1 = input_layer.Input((INPUT_DIM, ))
            inp2 = input_layer.Input((INPUT_DIM, ))
            inp = layers.add([inp1, inp2])
            hidden = layers.Dense(2, activation='relu')(inp)
            hidden = layers.Dropout(0.1)(hidden)
            output1 = layers.Dense(NUM_CLASSES, activation='softmax')(hidden)
            output2 = layers.Dense(NUM_CLASSES, activation='softmax')(hidden)
            model = training.Model([inp1, inp2], [output1, output2])
            model.compile(loss='categorical_crossentropy',
                          optimizer='sgd',
                          metrics=['accuracy'])

            # we must generate new callbacks for each test, as they aren't stateless
            def callbacks_factory(histogram_freq):
                return [
                    callbacks_v1.TensorBoard(log_dir=filepath,
                                             histogram_freq=histogram_freq,
                                             write_images=True,
                                             write_grads=True,
                                             batch_size=5)
                ]

            # fit without validation data
            model.fit([x_train] * 2, [y_train] * 2,
                      batch_size=BATCH_SIZE,
                      callbacks=callbacks_factory(histogram_freq=0),
                      epochs=3)

            # fit with validation data and accuracy
            model.fit([x_train] * 2, [y_train] * 2,
                      batch_size=BATCH_SIZE,
                      validation_data=([x_test] * 2, [y_test] * 2),
                      callbacks=callbacks_factory(histogram_freq=1),
                      epochs=2)

            # fit generator without validation data
            model.fit_generator(data_generator(True),
                                len(x_train),
                                epochs=2,
                                callbacks=callbacks_factory(histogram_freq=0))

            # fit generator with validation data and accuracy
            model.fit_generator(data_generator(True),
                                len(x_train),
                                epochs=2,
                                validation_data=([x_test] * 2, [y_test] * 2),
                                callbacks=callbacks_factory(histogram_freq=1))
            assert os.path.isdir(filepath)
示例#9
0
 def test_linear_model_with_int_input(self):
     inp = input_layer.Input(shape=(1, ), dtype=dtypes.int32)
     with self.assertRaisesRegexp(TypeError, 'Unable to build'):
         linear.LinearModel()(inp)