def testBasicNetwork(self):
    # minimum viable network
    x = input_layer_lib.Input(shape=(32,))
    dense = keras.layers.Dense(2)
    y = dense(x)
    network = network_lib.Network(x, y, name='dense_network')

    # test basic attributes
    self.assertEqual(network.name, 'dense_network')
    self.assertEqual(len(network.layers), 2)  # InputLayer + Dense
    self.assertEqual(network.layers[1], dense)
    self.assertEqual(network.weights, dense.weights)
    self.assertEqual(network.trainable_weights, dense.trainable_weights)
    self.assertEqual(network.non_trainable_weights, dense.non_trainable_weights)

    # test callability on Input
    x_2 = input_layer_lib.Input(shape=(32,))
    y_2 = network(x_2)
    self.assertEqual(y_2.get_shape().as_list(), [None, 2])

    # test callability on regular tensor
    x_2 = array_ops.placeholder(dtype='float32', shape=(None, 32))
    y_2 = network(x_2)
    self.assertEqual(y_2.get_shape().as_list(), [None, 2])

    # test network `trainable` attribute
    network.trainable = False
    self.assertEqual(network.weights, dense.weights)
    self.assertEqual(network.trainable_weights, [])
    self.assertEqual(network.non_trainable_weights,
                     dense.trainable_weights + dense.non_trainable_weights)
    def testMaskingSingleInput(self):
        class MaskedLayer(keras.layers.Layer):
            def call(self, inputs, mask=None):
                if mask is not None:
                    return inputs * mask
                return inputs

            def compute_mask(self, inputs, mask=None):
                return array_ops.ones_like(inputs)

        if context.executing_eagerly():
            a = constant_op.constant([2] * 32)
            mask = constant_op.constant([0, 1] * 16)
            a._keras_mask = mask
            b = MaskedLayer().apply(a)
            self.assertTrue(hasattr(b, '_keras_mask'))
            self.assertAllEqual(self.evaluate(array_ops.ones_like(mask)),
                                self.evaluate(getattr(b, '_keras_mask')))
            self.assertAllEqual(self.evaluate(a * mask), self.evaluate(b))
        else:
            x = input_layer_lib.Input(shape=(32, ))
            y = MaskedLayer()(x)  # pylint: disable=not-callable
            network = network_lib.Network(x, y)

            # test callability on Input
            x_2 = input_layer_lib.Input(shape=(32, ))
            y_2 = network(x_2)
            self.assertEqual(y_2.get_shape().as_list(), [None, 32])

            # test callability on regular tensor
            x_2 = array_ops.placeholder(dtype='float32', shape=(None, 32))
            y_2 = network(x_2)
            self.assertEqual(y_2.get_shape().as_list(), [None, 32])
    def test_get_updates(self):
        class MyLayer(keras.layers.Layer):
            def build(self, input_shape):
                self.a = self.add_variable('a', (1, 1),
                                           'float32',
                                           trainable=False)
                self.b = self.add_variable('b', (1, 1),
                                           'float32',
                                           trainable=False)
                self.add_update(
                    state_ops.assign_add(self.a, [[1.]],
                                         name='unconditional_update'))
                self.built = True

            def call(self, inputs):
                self.add_update(state_ops.assign_add(
                    self.b, inputs, name='conditional_update'),
                                inputs=True)
                return inputs + 1

        x1 = input_layer_lib.Input(shape=(1, ))
        layer = MyLayer()
        _ = layer.apply(x1)

        self.assertEqual(len(layer.updates), 2)
        self.assertEqual(len(layer.get_updates_for(x1)), 1)
        self.assertEqual(len(layer.get_updates_for(None)), 1)

        x2 = input_layer_lib.Input(shape=(1, ))
        y2 = layer.apply(x2)

        self.assertEqual(len(layer.updates), 3)
        self.assertEqual(len(layer.get_updates_for(x1)), 1)
        self.assertEqual(len(layer.get_updates_for(x2)), 1)
        self.assertEqual(len(layer.get_updates_for(None)), 1)

        network = network_lib.Network(x2, y2)
        self.assertEqual(len(network.updates), 2)
        self.assertEqual(len(network.get_updates_for(x1)), 0)
        self.assertEqual(len(network.get_updates_for(x2)), 1)
        self.assertEqual(len(network.get_updates_for(None)), 1)

        x3 = input_layer_lib.Input(shape=(1, ))
        _ = layer.apply(x3)
        self.assertEqual(len(network.updates), 2)

        x4 = input_layer_lib.Input(shape=(1, ))
        _ = network(x4)
        self.assertEqual(len(network.updates), 3)
        self.assertEqual(len(network.get_updates_for(x2)), 1)
        self.assertEqual(len(network.get_updates_for(x4)), 1)
        self.assertEqual(len(network.get_updates_for(None)), 1)

        network.add_update(state_ops.assign_add(layer.a, [[1]]))
        self.assertEqual(len(network.updates), 4)
        self.assertEqual(len(network.get_updates_for(None)), 2)

        network.add_update(state_ops.assign_add(layer.b, x4), inputs=True)
        self.assertEqual(len(network.updates), 5)
        self.assertEqual(len(network.get_updates_for(x4)), 2)
    def build_discriminator(self, isnetwork=False):
        def d_layer(layer_input, filters, f_size=4, bn=True):
            """Discriminator layer"""
            d = Conv2D(filters, kernel_size=f_size, strides=2,
                       padding='same')(layer_input)
            d = LeakyReLU(alpha=0.2)(d)
            if bn:
                d = BatchNormalization(momentum=0.8)(d)
            return d

        img_A = Input(shape=self.img_shape)
        img_B = Input(shape=self.img_shape)
        mask = Input(shape=self.mask_shape)

        # Concatenate images and conditioning images by channels to produce input
        combined_imgs = Concatenate(axis=-1)([img_A, img_B, mask])

        d1 = d_layer(combined_imgs, self.df, bn=False)
        d2 = d_layer(d1, self.df * 2)
        d3 = d_layer(d2, self.df * 4)
        d4 = d_layer(d3, self.df * 8)
        validity = Conv2D(1,
                          kernel_size=4,
                          strides=1,
                          padding='same',
                          activation="sigmoid")(d4)
        if not isnetwork:
            dis = tf.keras.Model([img_A, img_B, mask], validity)
        else:
            dis = network.Network([img_A, img_B, mask], validity)

        return dis
Exemple #5
0
  def testMultiIONetworkbuilding(self):
    input_a = input_layer_lib.Input(shape=(32,))
    input_b = input_layer_lib.Input(shape=(16,))
    a = keras.layers.Dense(16)(input_a)

    class AddLayer(keras.layers.Layer):

      def call(self, inputs):
        return inputs[0] + inputs[1]

      def compute_output_shape(self, input_shape):
        return input_shape[0]

    c = AddLayer()([a, input_b])  # pylint: disable=not-callable
    c = keras.layers.Dense(2)(c)

    network = network_lib.Network([input_a, input_b], [a, c])
    if context.executing_eagerly():
      a_val = constant_op.constant(
          np.random.random((10, 32)).astype('float32'))
      b_val = constant_op.constant(
          np.random.random((10, 16)).astype('float32'))
      outputs = network([a_val, b_val])
      self.assertEqual(len(outputs), 2)
      self.assertEqual(outputs[0].shape.as_list(), [10, 16])
      self.assertEqual(outputs[1].shape.as_list(), [10, 2])
    def test_get_losses(self):
        class MyLayer(keras.layers.Layer):
            def build(self, input_shape):
                self.a = self.add_variable('a', (1, 1),
                                           'float32',
                                           trainable=False)
                self.b = self.add_variable('b', (1, 1),
                                           'float32',
                                           trainable=False)
                self.add_loss(math_ops.reduce_sum(self.a))
                self.built = True

            def call(self, inputs):
                self.add_loss(math_ops.reduce_sum(inputs), inputs=True)
                return inputs + 1

        x1 = input_layer_lib.Input(shape=(1, ))
        layer = MyLayer()
        _ = layer.apply(x1)

        self.assertEqual(len(layer.losses), 2)
        self.assertEqual(len(layer.get_losses_for(x1)), 1)
        self.assertEqual(len(layer.get_losses_for(None)), 1)

        x2 = input_layer_lib.Input(shape=(1, ))
        y2 = layer.apply(x2)

        self.assertEqual(len(layer.losses), 3)
        self.assertEqual(len(layer.get_losses_for(x1)), 1)
        self.assertEqual(len(layer.get_losses_for(x2)), 1)
        self.assertEqual(len(layer.get_losses_for(None)), 1)

        network = network_lib.Network(x2, y2)
        self.assertEqual(len(network.losses), 2)
        self.assertEqual(len(network.get_losses_for(x1)), 0)
        self.assertEqual(len(network.get_losses_for(x2)), 1)
        self.assertEqual(len(network.get_losses_for(None)), 1)

        x3 = input_layer_lib.Input(shape=(1, ))
        _ = layer.apply(x3)
        self.assertEqual(len(network.losses), 2)

        x4 = input_layer_lib.Input(shape=(1, ))
        _ = network(x4)
        self.assertEqual(len(network.losses), 3)
        self.assertEqual(len(network.get_losses_for(x2)), 1)
        self.assertEqual(len(network.get_losses_for(x4)), 1)
        self.assertEqual(len(network.get_losses_for(None)), 1)

        network.add_loss(math_ops.reduce_sum(layer.a))
        self.assertEqual(len(network.losses), 4)
        self.assertEqual(len(network.get_losses_for(None)), 2)

        network.add_loss(math_ops.reduce_sum(x4), inputs=True)
        self.assertEqual(len(network.losses), 5)
        self.assertEqual(len(network.get_losses_for(x4)), 2)
  def testSimpleNetworkBuilding(self):
    inputs = input_layer_lib.Input(shape=(32,))
    if context.executing_eagerly():
      self.assertEqual(inputs.dtype.name, 'float32')
      self.assertEqual(inputs.shape.as_list(), [None, 32])

    x = keras.layers.Dense(2)(inputs)
    if context.executing_eagerly():
      self.assertEqual(x.dtype.name, 'float32')
      self.assertEqual(x.shape.as_list(), [None, 2])

    outputs = keras.layers.Dense(4)(x)
    network = network_lib.Network(inputs, outputs)
    self.assertIsInstance(network, network_lib.Network)

    if context.executing_eagerly():
      # It should be possible to call such a network on EagerTensors.
      inputs = constant_op.constant(
          np.random.random((10, 32)).astype('float32'))
      outputs = network(inputs)
      self.assertEqual(outputs.shape.as_list(), [10, 4])