def test_gradient_pass_though(self, width, height, filters, upsampling_size): loss = ks.losses.MeanSquaredError() optimizer = ks.optimizers.SGD() test_layer = nn_blocks.RouteMerge( filters=filters, upsample=True, upsample_size=upsampling_size) init = tf.random_normal_initializer() x_conv = tf.Variable( initial_value=init(shape=(1, width, height, filters), dtype=tf.float32)) x_route = tf.Variable( initial_value=init( shape=(1, width * upsampling_size[0], height * upsampling_size[1], filters), dtype=tf.float32)) y = tf.Variable( initial_value=init( shape=(1, width * upsampling_size[0], height * upsampling_size[1], filters * 2), dtype=tf.float32)) with tf.GradientTape() as tape: x_hat = test_layer([x_conv, x_route]) grad_loss = loss(x_hat, y) grad = tape.gradient(grad_loss, test_layer.trainable_variables) optimizer.apply_gradients(zip(grad, test_layer.trainable_variables)) self.assertNotIn(None, grad) return
def build(self, inputs): keys = [int(key) for key in inputs.keys()] self._min_level = min(keys) self._max_level = max(keys) self._min_depth = inputs[str(self._min_level)][-1] self._depths = self.get_raw_depths(self._min_depth) self.resamples = {} self.preprocessors = {} self.outputs = {} for level, depth in zip(range(self._min_level, self._max_level + 1), self._depths): if level == self._min_level: self.preprocessors[str(level)] = nn_blocks.DarkRouteProcess( filters=depth * 2, repetitions=self._max_level_process_len + 2 * (1 if self._embed_spp else 0), insert_spp=self._embed_spp, **self._base_config) else: self.resamples[str(level)] = nn_blocks.RouteMerge( filters=depth, downsample=True, **self._base_config) self.preprocessors[str(level)] = nn_blocks.DarkRouteProcess( filters=depth * 2, repetitions=self._path_process_len, insert_spp=False, **self._base_config)
def test_pass_through(self, width, height, filters, upsampling_size): x_conv = ks.Input(shape=(width, height, filters)) x_route = ks.Input( shape=(width * upsampling_size[0], height * upsampling_size[1], filters)) test_layer = nn_blocks.RouteMerge( filters=filters, upsample=True, upsample_size=upsampling_size) outx = test_layer([x_conv, x_route]) self.assertAllEqual(outx.shape.as_list(), [ None, width * upsampling_size[0], height * upsampling_size[1], filters * 2 ])
def build(self, inputs): """ use config dictionary to generate all important attributes for head construction """ keys = [int(key) for key in inputs.keys()] self._min_level = min(keys) self._max_level = max(keys) self._min_depth = inputs[str(self._min_level)][-1] self._depths = self.get_raw_depths(self._min_depth) self.resamples = {} self.preprocessors = {} self.tails = {} for level, depth in zip( reversed(range(self._min_level, self._max_level + 1)), self._depths): if level != self._max_level: self.resamples[str(level)] = nn_blocks.RouteMerge( filters=depth // 2, **self._base_config) self.preprocessors[str(level)] = nn_blocks.DarkRouteProcess( filters=depth, repetitions=self._fpn_path_len, insert_spp=False, **self._base_config) else: self.preprocessors[str(level)] = nn_blocks.DarkRouteProcess( filters=depth, repetitions=self._fpn_path_len + 2, insert_spp=True, **self._base_config) if level == self._min_level: self.tails[str(level)] = FPNTail(filters=depth, upsample=False, **self._base_config) else: self.tails[str(level)] = FPNTail(filters=depth, upsample=True, **self._base_config) return