Exemple #1
0
    def build(self, inputs):
        keys = [int(key) for key in inputs.keys()]
        self._min_level = min(keys)
        self._max_level = max(keys)
        self._min_depth = inputs[str(self._min_level)][-1]
        self._depths = self.get_raw_depths(self._min_depth)

        self.resamples = {}
        self.preprocessors = {}
        self.outputs = {}

        for level, depth in zip(range(self._min_level, self._max_level + 1),
                                self._depths):
            if level == self._min_level:
                self.preprocessors[str(level)] = nn_blocks.DarkRouteProcess(
                    filters=depth * 2,
                    repetitions=self._max_level_process_len + 2 *
                    (1 if self._embed_spp else 0),
                    insert_spp=self._embed_spp,
                    **self._base_config)
            else:
                self.resamples[str(level)] = nn_blocks.RouteMerge(
                    filters=depth, downsample=True, **self._base_config)
                self.preprocessors[str(level)] = nn_blocks.DarkRouteProcess(
                    filters=depth * 2,
                    repetitions=self._path_process_len,
                    insert_spp=False,
                    **self._base_config)
 def test_pass_through(self, width, height, filters, repetitions, spp):
   x = ks.Input(shape=(width, height, filters))
   test_layer = nn_blocks.DarkRouteProcess(
       filters=filters, repetitions=repetitions, insert_spp=spp)
   outx = test_layer(x)
   self.assertEqual(len(outx), 2, msg="len(outx) != 2")
   self.assertAllEqual(outx[1].shape.as_list(), [None, width, height, filters])
   self.assertAllEqual(
       filters % 2,
       0,
       msg="Output of a DarkRouteProcess layer has an odd number of filters")
   self.assertAllEqual(outx[0].shape.as_list(), [None, width, height, filters])
Exemple #3
0
    def build(self, inputs):
        """ use config dictionary to generate all important attributes for head construction """
        keys = [int(key) for key in inputs.keys()]
        self._min_level = min(keys)
        self._max_level = max(keys)
        self._min_depth = inputs[str(self._min_level)][-1]
        self._depths = self.get_raw_depths(self._min_depth)

        self.resamples = {}
        self.preprocessors = {}
        self.tails = {}
        for level, depth in zip(
                reversed(range(self._min_level, self._max_level + 1)),
                self._depths):

            if level != self._max_level:
                self.resamples[str(level)] = nn_blocks.RouteMerge(
                    filters=depth // 2, **self._base_config)
                self.preprocessors[str(level)] = nn_blocks.DarkRouteProcess(
                    filters=depth,
                    repetitions=self._fpn_path_len,
                    insert_spp=False,
                    **self._base_config)
            else:
                self.preprocessors[str(level)] = nn_blocks.DarkRouteProcess(
                    filters=depth,
                    repetitions=self._fpn_path_len + 2,
                    insert_spp=True,
                    **self._base_config)
            if level == self._min_level:
                self.tails[str(level)] = FPNTail(filters=depth,
                                                 upsample=False,
                                                 **self._base_config)
            else:
                self.tails[str(level)] = FPNTail(filters=depth,
                                                 upsample=True,
                                                 **self._base_config)
        return
  def test_gradient_pass_though(self, width, height, filters, repetitions, spp):
    loss = ks.losses.MeanSquaredError()
    optimizer = ks.optimizers.SGD()
    test_layer = nn_blocks.DarkRouteProcess(
        filters=filters, repetitions=repetitions, insert_spp=spp)

    init = tf.random_normal_initializer()
    x = tf.Variable(
        initial_value=init(shape=(1, width, height, filters), dtype=tf.float32))
    y_0 = tf.Variable(
        initial_value=init(shape=(1, width, height, filters), dtype=tf.float32))
    y_1 = tf.Variable(
        initial_value=init(shape=(1, width, height, filters), dtype=tf.float32))

    with tf.GradientTape() as tape:
      x_hat_0, x_hat_1 = test_layer(x)
      grad_loss_0 = loss(x_hat_0, y_0)
      grad_loss_1 = loss(x_hat_1, y_1)
    grad = tape.gradient([grad_loss_0, grad_loss_1],
                         test_layer.trainable_variables)
    optimizer.apply_gradients(zip(grad, test_layer.trainable_variables))

    self.assertNotIn(None, grad)
    return