Example #1
0
    def build_label_encoder(self, input_label):
        with tf.variable_scope('LabelEncoder'):
            unet = UNet(input_label,
                        n_out=self.n_anatomical_masks,
                        is_training=self.is_training,
                        n_filters=16)
            unet_encoder = unet.build_encoder()
            unet_bottleneck = unet.build_bottleneck(unet_encoder)
            unet_decoder = unet.build_decoder(unet_bottleneck)
            coarse_output = unet.build_output(unet_decoder)

            soft_label_anatomy = tf.nn.softmax(coarse_output)

        with tf.variable_scope('RoundingLabelLayer'):
            hard_label_anatomy = rounding_layer(soft_label_anatomy)

        return soft_label_anatomy, hard_label_anatomy, unet
Example #2
0
    def build_anatomy_encoder(self, input_image):
        with tf.variable_scope('AnatomyEncoder'):
            unet = UNet(input_image,
                        n_out=self.n_anatomical_masks,
                        is_training=self.is_training,
                        n_filters=64)
            unet_encoder = unet.build_encoder()
            unet_bottleneck = unet.build_bottleneck(unet_encoder)
            unet_decoder = unet.build_decoder(unet_bottleneck)
            coarse_output = unet.build_output(unet_decoder)

            # apply softmax to scale the coarse_output channels in the range [0รท1]. This will avoid the same anatomy to
            # be encoded twice from the model (one anatomy per channel).
            soft_anatomy = tf.nn.softmax(coarse_output)

        with tf.variable_scope('RoundingLayer'):
            hard_anatomy = rounding_layer(soft_anatomy)

        return soft_anatomy, hard_anatomy, unet
Example #3
0
    def build(self,
              input_frames,
              input_times,
              delta_times,
              reuse=tf.AUTO_REUSE):
        """
        Build the model.
        :param input_frames: input frames
        :param input_times: time associated to the input frames
        :param delta_times: delta time to move in future
        :param reuse: reuse mode
        :return: prediction of the future frames at time: future_times = input_times + delta_times
        """
        with tf.variable_scope(self.name, reuse=reuse):

            # - - - - - - -
            # build only UNet encoder for now:
            unet = SmallUnet(incoming=input_frames,
                             n_out=self.n_channels,
                             n_filters=self.nf,
                             is_training=self.is_training)
            encoder = unet.build_encoder()

            # get latent code:
            latent_code = encoder[
                -2]  # (this is the output layer of the encoder)

            # - - - - - - -
            # condition with encoded time information (output of MLP #1) :
            latent_shape = latent_code.get_shape().as_list()
            n_fraction = 4
            time_shape = [
                -1, latent_shape[1], latent_shape[2],
                (latent_shape[3] // n_fraction)
            ]
            n_out = reduce(lambda x, y: x * y, latent_shape[1:]) // n_fraction

            times = tf.concat(
                (tf.expand_dims(input_times, 1), tf.expand_dims(
                    delta_times, 1)),
                axis=1)
            mlp_in = MLP(times,
                         128,
                         128,
                         n_out,
                         self.is_training,
                         k_prob=0.8,
                         name='MLP_in').build()
            time_code = tf.reshape(mlp_in, shape=time_shape)

            # define activations for time and latent code, these will be accessible using:
            # [str(op.name) for op in tf.get_default_graph().get_operations() if '_code' in op.name]
            time_code = tf.nn.sigmoid(time_code, name='time_code')
            latent_code = tf.nn.sigmoid(latent_code, name='latent_code')

            latent_code_with_time = tf.concat((latent_code, time_code),
                                              axis=-1)

            # - - - - - - -
            # build the rest of the UNet
            _encoder = [el for el in encoder]
            _encoder[-2] = latent_code_with_time
            code = unet.build_bottleneck(_encoder)
            decoder = unet.build_decoder(code)
            decoded_input = tf.nn.tanh(
                unet.build_output(decoder))  # output in range [-1, +1]
            # decoded_input = tf.nn.sigmoid(unet.build_output(decoder))   # output in range [0, +1]

            # - - - - - - -
            # add residual connection
            self.soft_output_frames = decoded_input + input_frames

            # output_frames = tf.nn.softmax(self.soft_output_frames)
            output_frames = self.soft_output_frames

            with tf.variable_scope('RoundingLayer'):
                self.hard_output_frames = rounding_layer(output_frames)

        return self