Exemple #1
0
    def full(self, i0, x, l0_h2, l1_h2, l2_h2, fb_h2):
        """hGRU body.
        Take the recurrent h2 from a low level and imbue it with
        information froma high layer. This means to treat the lower
        layer h2 as the X and the higher layer h2 as the recurrent state.
        This will serve as I/E from the high layer along with feedback
        kernels.
        """

        # Intermediate FF - h0
        idx = 0
        if self.adapation:
            eta2 = getattr(self, 'eta2_%s' % idx)
            e2 = tf.gather(eta2, i0, axis=-1)
            fb_h2_processed = fb_h2 * e2
        with tf.variable_scope('ff_%s' % idx, reuse=True):
            weights = tf.get_variable("weights")
            bias = tf.get_variable("bias")
            processed_x = tf.nn.conv3d(input=tf.concat(
                [x, fb_h2_processed * x, fb_h2_processed], axis=4),
                                       filter=weights,
                                       strides=self.ff_conv_strides[idx],
                                       padding=self.padding)
            processed_x = tf.nn.bias_add(processed_x, bias)
            processed_x = self.ff_nl(processed_x)
        # if self.include_pooling:
        #     processed_x = max_pool3d(
        #         bottom=processed_x,
        #         k=self.ff_pool_dhw[idx],
        #         s=self.ff_pool_strides[idx],
        #         name='ff_pool_%s' % 0)
        if self.batch_norm:
            # with tf.variable_scope('ff_bn_%s' % idx,
            #         reuse=self.bn_reuse) as scope:
            processed_x = tf.contrib.layers.batch_norm(
                inputs=processed_x,
                scale=True,
                center=True,
                fused=True,
                renorm=False,
                param_initializers=self.param_initializer,
                updates_collections=None,
                reuse=self.bn_reuse,
                is_training=self.train)
        for i in range(self.h_repeat):
            _, l0_h2 = self.hgru_ops(i0=i0,
                                     x=processed_x,
                                     h2=l0_h2,
                                     var_scope='hgru_%s' % idx,
                                     layer_idx=idx)

        #Intermediate FF - h1
        idx = 1
        if self.adapation:
            eta2 = getattr(self, 'eta2_%s' % idx)
            e2 = tf.gather(eta2, i0, axis=-1)
            l0_h2_processed = l0_h2 * e2
        with tf.variable_scope('ff_%s' % idx, reuse=True):
            weights = tf.get_variable("weights")
            bias = tf.get_variable("bias")
            processed_l0 = tf.nn.conv3d(input=tf.concat(
                [processed_x, l0_h2_processed * processed_x, l0_h2_processed],
                axis=4),
                                        filter=weights,
                                        strides=self.ff_conv_strides[idx],
                                        padding=self.padding)
            processed_l0 = tf.nn.bias_add(processed_l0, bias)
            processed_l0 = self.ff_nl(processed_l0)
        # Pool the preceding layer's drive
        if self.include_pooling:
            processed_l0 = max_pool3d(bottom=processed_l0,
                                      k=self.ff_pool_dhw[idx],
                                      s=self.ff_pool_strides[idx],
                                      name='ff_pool_%s' % idx)
        if self.batch_norm:
            # with tf.variable_scope('ff_bn_%s' % idx,
            #         reuse=self.bn_reuse) as scope:
            processed_l0 = tf.contrib.layers.batch_norm(
                inputs=processed_l0,
                scale=True,
                center=True,
                fused=True,
                renorm=False,
                param_initializers=self.param_initializer,
                updates_collections=None,
                reuse=self.bn_reuse,
                is_training=self.train)
        for i in range(self.h_repeat):
            _, l1_h2 = self.hgru_ops(i0=i0,
                                     x=processed_l0,
                                     h2=l1_h2,
                                     var_scope='hgru_%s' % idx,
                                     layer_idx=idx)

        # Intermediate FF - h2
        idx = 2
        if self.adapation:
            eta2 = getattr(self, 'eta2_%s' % idx)
            e2 = tf.gather(eta2, i0, axis=-1)
            l1_h2_processed = l1_h2 * e2
        with tf.variable_scope('ff_%s' % idx, reuse=True):
            weights = tf.get_variable("weights")
            bias = tf.get_variable("bias")
            processed_l1 = tf.nn.conv3d(input=tf.concat([
                processed_l0, l1_h2_processed * processed_l0, l1_h2_processed
            ],
                                                        axis=4),
                                        filter=weights,
                                        strides=self.ff_conv_strides[idx],
                                        padding=self.padding)
            processed_l1 = tf.nn.bias_add(processed_l1, bias)
            processed_l1 = self.ff_nl(processed_l1)
        # Pool the preceding layer's drive
        if self.include_pooling:
            processed_l1 = max_pool3d(bottom=processed_l1,
                                      k=self.ff_pool_dhw[idx],
                                      s=self.ff_pool_strides[idx],
                                      name='ff_pool_%s' % idx)
        if self.batch_norm:
            # with tf.variable_scope('ff_bn_%s' % idx,
            #         reuse=self.bn_reuse) as scope:
            processed_l1 = tf.contrib.layers.batch_norm(
                inputs=processed_l1,
                scale=True,
                center=True,
                fused=True,
                renorm=False,
                param_initializers=self.param_initializer,
                updates_collections=None,
                reuse=self.bn_reuse,
                is_training=self.train)
        for i in range(self.h_repeat):
            _, l2_h2 = self.hgru_ops(i0=i0,
                                     x=processed_l1,
                                     h2=l2_h2,
                                     var_scope='hgru_%s' % idx,
                                     layer_idx=idx)

        # Intermediate FF
        idx = 3
        if self.adapation:
            eta2 = getattr(self, 'eta2_%s' % idx)
            e2 = tf.gather(eta2, i0, axis=-1)
            l2_h2_processed = l2_h2 * e2
        with tf.variable_scope('ff_%s' % idx, reuse=True):
            weights = tf.get_variable("weights")
            bias = tf.get_variable("bias")
            top = tf.nn.conv3d(input=tf.concat([
                processed_l1, l2_h2_processed * processed_l1, l2_h2_processed
            ],
                                               axis=4),
                               filter=weights,
                               strides=self.ff_conv_strides[idx],
                               padding=self.padding)
            top = tf.nn.bias_add(top, bias)
            top = self.ff_nl(top)
        # Pool the preceding layer's drive
        if self.include_pooling:
            top = max_pool3d(bottom=top,
                             k=self.ff_pool_dhw[idx],
                             s=self.ff_pool_strides[idx],
                             name='ff_pool_%s' % idx)
        if self.batch_norm:
            # with tf.variable_scope('ff_bn_%s' % idx,
            #         reuse=self.bn_reuse) as scope:
            top = tf.contrib.layers.batch_norm(
                inputs=top,
                scale=True,
                center=True,
                fused=True,
                renorm=False,
                param_initializers=self.param_initializer,
                updates_collections=None,
                reuse=self.bn_reuse,
                is_training=self.train)

        # FB
        idx = 2
        with tf.variable_scope('fb_%s' % idx, reuse=True):
            weights = tf.get_variable("weights")
            bias = tf.get_variable("bias")
        top = self.resize_x_to_y(x=top,
                                 y=l2_h2,
                                 kernel=weights,
                                 bias=bias,
                                 mode=self.fb_mode,
                                 strides=self.ff_pool_strides[3])
        if self.batch_norm:
            # with tf.variable_scope('fb_bn' % 2,
            #         reuse=self.bn_reuse) as scope:
            top = tf.contrib.layers.batch_norm(
                inputs=top,
                scale=True,
                center=True,
                fused=True,
                renorm=False,
                param_initializers=self.param_initializer,
                updates_collections=None,
                reuse=self.bn_reuse,
                is_training=self.train)
        idx = 1
        with tf.variable_scope('fb_%s' % idx, reuse=True):
            weights = tf.get_variable("weights")
            bias = tf.get_variable("bias")
        top = self.resize_x_to_y(x=top,
                                 y=l1_h2,
                                 kernel=weights,
                                 bias=bias,
                                 mode=self.fb_mode,
                                 strides=self.ff_pool_strides[2])
        if self.batch_norm:
            # with tf.variable_scope('fb_bn' % 1,
            #         reuse=self.bn_reuse) as scope:
            top = tf.contrib.layers.batch_norm(
                inputs=top,
                scale=True,
                center=True,
                fused=True,
                renorm=False,
                param_initializers=self.param_initializer,
                updates_collections=None,
                reuse=self.bn_reuse,
                is_training=self.train)
        idx = 0
        with tf.variable_scope('fb_%s' % idx, reuse=True):
            weights = tf.get_variable("weights")
            bias = tf.get_variable("bias")
        top = self.resize_x_to_y(x=top,
                                 y=fb_h2,
                                 kernel=weights,
                                 bias=bias,
                                 mode=self.fb_mode,
                                 strides=self.ff_pool_strides[1])
        if self.batch_norm:
            # with tf.variable_scope('fb_bn' % 0,
            #         reuse=self.bn_reuse) as scope:
            top = tf.contrib.layers.batch_norm(
                inputs=top,
                scale=True,
                center=True,
                fused=True,
                renorm=False,
                param_initializers=self.param_initializer,
                updates_collections=None,
                reuse=self.bn_reuse,
                is_training=self.train)

        _, fb_h2 = self.hgru_ops(i0=i0,
                                 x=top,
                                 h2=fb_h2,
                                 var_scope='hgru_%s' % 3,
                                 layer_idx=3)

        # Iterate loop
        i0 += 1
        return i0, x, l0_h2, l1_h2, l2_h2, fb_h2
Exemple #2
0
    def full(self, i0, x, l0_h2, l0_fb, l1_h2, l1_fb, l2_h2, l2_fb):
        """hGRU body.
        Take the recurrent h2 from a low level and imbue it with
        information froma high layer. This means to treat the lower
        layer h2 as the X and the higher layer h2 as the recurrent state.
        This will serve as I/E from the high layer along with feedback
        kernels.
        """

        # HGRU 0
        idx = 0
        if self.adapation:
            with tf.variable_scope('hgru_%s' % idx, reuse=True):
                eta1 = tf.get_variable("eta1")
                eta2 = tf.get_variable("eta2")
            e2 = tf.gather(eta2, i0, axis=-1)
            e1 = tf.gather(eta1, i0, axis=-1)
            l0_fb *= e2
        else:
            e1 = 1
        for i in range(self.h_repeat):
            l0_h2 *= e1
            _, l0_h2 = self.hgru_ops(i0=i0,
                                     x=x,
                                     h2=l0_h2,
                                     fb=l0_fb,
                                     var_scope='hgru_%s' % idx)
        if self.batch_norm:
            ff0 = tf.contrib.layers.batch_norm(
                inputs=l0_h2,
                scale=True,
                center=True,
                fused=True,
                renorm=False,
                param_initializers=self.param_initializer,
                updates_collections=None,
                reuse=self.scope_reuse,
                is_training=self.train)
        else:
            ff0 = l0_h2

        # FEEDFORWARD 0
        idx = 0
        with tf.variable_scope('ff_%s' % idx, reuse=True):
            spot_weights_x = tf.get_variable("spot_x")
            spot_weights_y = tf.get_variable("spot_y")
            spot_weights_xy = tf.get_variable("spot_xy")
            weights = tf.get_variable("weights")
            bias = tf.get_variable("bias")
        ff0 = self.generic_combine(x, ff0, spot_weights_x, spot_weights_y,
                                   spot_weights_xy)
        if self.batch_norm:
            ff0 = tf.contrib.layers.batch_norm(
                inputs=ff0,
                scale=True,
                center=True,
                fused=True,
                renorm=False,
                param_initializers=self.param_initializer,
                updates_collections=None,
                is_training=self.train)
        ff0 = self.ff_nl(ff0) + 1
        ff0 = tf.nn.conv3d(input=ff0,
                           filter=weights,
                           strides=self.ff_conv_strides[idx],
                           padding=self.padding)
        if self.batch_norm:
            ff0 = tf.contrib.layers.batch_norm(
                inputs=ff0,
                scale=True,
                center=False,
                fused=True,
                renorm=False,
                param_initializers=self.param_initializer,
                updates_collections=None,
                is_training=self.train)
        if self.ff_kpool_multiplier > 1:
            low_k = 0
            running_max = ff0[:, :, :, :, low_k:low_k + self.ff_conv_k[idx]]
            for i in range(self.ff_kpool_multiplier - 1):
                low_k += self.ff_conv_k[idx]
                running_max = tf.maximum(
                    running_max, ff0[:, :, :, :,
                                     low_k:low_k + self.ff_conv_k[idx]])
            ff0 = running_max
        ff0 = tf.nn.bias_add(ff0, bias)
        ff0 = self.ff_nl(ff0) + 1
        # POOL
        if self.include_pooling:
            ff0 = max_pool3d(bottom=ff0,
                             k=self.ff_pool_dhw[idx],
                             s=self.ff_pool_strides[idx],
                             name='ff_pool_%s' % idx)

        # HGRU 1
        idx = 1
        if self.adapation:
            with tf.variable_scope('hgru_%s' % idx, reuse=True):
                eta1 = tf.get_variable("eta1")
                eta2 = tf.get_variable("eta2")
            e2 = tf.gather(eta2, i0, axis=-1)
            e1 = tf.gather(eta1, i0, axis=-1)
            l1_fb *= e2
        else:
            e1 = 1
        for i in range(self.h_repeat):
            l1_h2 *= e1
            _, l1_h2 = self.hgru_ops(i0=i0,
                                     x=ff0,
                                     h2=l1_h2,
                                     fb=l1_fb,
                                     var_scope='hgru_%s' % idx)
        if self.batch_norm:
            ff1 = tf.contrib.layers.batch_norm(
                inputs=l1_h2,
                scale=True,
                center=True,
                fused=True,
                renorm=False,
                param_initializers=self.param_initializer,
                updates_collections=None,
                is_training=self.train)
        else:
            ff1 = l1_h2

        # FEEDFORWARD 1
        idx = 1
        with tf.variable_scope('ff_%s' % idx, reuse=True):
            spot_weights_x = tf.get_variable("spot_x")
            spot_weights_y = tf.get_variable("spot_y")
            spot_weights_xy = tf.get_variable("spot_xy")
            weights = tf.get_variable("weights")
            bias = tf.get_variable("bias")
        ff1 = self.generic_combine(ff0, ff1, spot_weights_x, spot_weights_y,
                                   spot_weights_xy)
        if self.batch_norm:
            ff1 = tf.contrib.layers.batch_norm(
                inputs=ff1,
                scale=True,
                center=True,
                fused=True,
                renorm=False,
                param_initializers=self.param_initializer,
                updates_collections=None,
                is_training=self.train)
        ff1 = self.ff_nl(ff1) + 1
        ff1 = tf.nn.conv3d(input=ff1,
                           filter=weights,
                           strides=self.ff_conv_strides[idx],
                           padding=self.padding)
        if self.batch_norm:
            ff1 = tf.contrib.layers.batch_norm(
                inputs=ff1,
                scale=True,
                center=False,
                fused=True,
                renorm=False,
                param_initializers=self.param_initializer,
                updates_collections=None,
                is_training=self.train)
        if self.ff_kpool_multiplier > 1:
            low_k = 0
            running_max = ff1[:, :, :, :, low_k:low_k + self.ff_conv_k[idx]]
            for i in range(self.ff_kpool_multiplier - 1):
                low_k += self.ff_conv_k[idx]
                running_max = tf.maximum(
                    running_max, ff1[:, :, :, :,
                                     low_k:low_k + self.ff_conv_k[idx]])
            ff1 = running_max
        ff1 = tf.nn.bias_add(ff1, bias)
        ff1 = self.ff_nl(ff1) + 1
        # POOL
        if self.include_pooling:
            ff1 = max_pool3d(bottom=ff1,
                             k=self.ff_pool_dhw[idx],
                             s=self.ff_pool_strides[idx],
                             name='ff_pool_%s' % idx)

        # HGRU 2
        idx = 2
        if self.adapation:
            with tf.variable_scope('hgru_%s' % idx, reuse=True):
                eta1 = tf.get_variable("eta1")
                eta2 = tf.get_variable("eta2")
            e2 = tf.gather(eta2, i0, axis=-1)
            e1 = tf.gather(eta1, i0, axis=-1)
            l2_fb *= e2
        else:
            e1 = 1
        for i in range(self.h_repeat):
            l2_h2 *= e1
            _, l2_h2 = self.hgru_ops(i0=i0,
                                     x=ff1,
                                     h2=l2_h2,
                                     fb=l2_fb,
                                     var_scope='hgru_%s' % idx)
        if self.batch_norm:
            ff2 = tf.contrib.layers.batch_norm(
                inputs=l2_h2,
                scale=True,
                center=True,
                fused=True,
                renorm=False,
                param_initializers=self.param_initializer,
                updates_collections=None,
                is_training=self.train)
        else:
            ff2 = l1_h2

        # FEEDFORWARD 2
        idx = 2
        with tf.variable_scope('ff_%s' % idx, reuse=True):
            spot_weights_x = tf.get_variable("spot_x")
            spot_weights_y = tf.get_variable("spot_y")
            spot_weights_xy = tf.get_variable("spot_xy")
            weights = tf.get_variable("weights")
            bias = tf.get_variable("bias")
        ff2 = self.generic_combine(ff1, ff2, spot_weights_x, spot_weights_y,
                                   spot_weights_xy)
        if self.batch_norm:
            ff2 = tf.contrib.layers.batch_norm(
                inputs=ff2,
                scale=True,
                center=True,
                fused=True,
                renorm=False,
                param_initializers=self.param_initializer,
                updates_collections=None,
                is_training=self.train)
        ff2 = tf.nn.conv3d(input=ff2,
                           filter=weights,
                           strides=self.ff_conv_strides[idx],
                           padding=self.padding)
        if self.batch_norm:
            ff2 = tf.contrib.layers.batch_norm(
                inputs=ff2,
                scale=True,
                center=False,
                fused=True,
                renorm=False,
                param_initializers=self.param_initializer,
                updates_collections=None,
                is_training=self.train)
        if self.ff_kpool_multiplier > 1:
            low_k = 0
            running_max = ff2[:, :, :, :, low_k:low_k + self.ff_conv_k[idx]]
            for i in range(self.ff_kpool_multiplier - 1):
                low_k += self.ff_conv_k[idx]
                running_max = tf.maximum(
                    running_max, ff2[:, :, :, :,
                                     low_k:low_k + self.ff_conv_k[idx]])
            ff2 = running_max
        ff2 = tf.nn.bias_add(ff2, bias)
        ff2 = self.ff_nl(ff2) + 1
        # POOL
        if self.include_pooling:
            ff2 = max_pool3d(bottom=ff2,
                             k=self.ff_pool_dhw[idx],
                             s=self.ff_pool_strides[idx],
                             name='ff_pool_%s' % idx)

        # FEEDBACK 2
        idx = 2
        with tf.variable_scope('fb_%s' % idx, reuse=True):
            weights = tf.get_variable("weights")
            bias = tf.get_variable("bias")
        fb2 = self.resize_x_to_y(x=ff2,
                                 y=ff1,
                                 kernel=weights,
                                 mode=self.fb_mode,
                                 strides=self.ff_pool_strides[2])
        if self.batch_norm:
            # with tf.variable_scope('fb_bn' % 2,
            #         reuse=self.bn_reuse) as scope:
            fb2 = tf.contrib.layers.batch_norm(
                inputs=fb2,
                scale=True,
                center=False,
                fused=True,
                renorm=False,
                param_initializers=self.param_initializer,
                updates_collections=None,
                is_training=self.train)
        fb2 = tf.nn.bias_add(fb2, bias)
        fb2 = self.ff_nl(fb2) + 1
        l2_fb = fb2

        # FEEDBACK 1
        idx = 1
        with tf.variable_scope('fb_%s' % idx, reuse=True):
            weights = tf.get_variable("weights")
            bias = tf.get_variable("bias")
        fb1 = self.resize_x_to_y(x=fb2,
                                 y=ff0,
                                 kernel=weights,
                                 mode=self.fb_mode,
                                 strides=self.ff_pool_strides[1])
        if self.batch_norm:
            # with tf.variable_scope('fb_bn' % 2,
            #         reuse=self.bn_reuse) as scope:
            fb1 = tf.contrib.layers.batch_norm(
                inputs=fb1,
                scale=True,
                center=False,
                fused=True,
                renorm=False,
                param_initializers=self.param_initializer,
                updates_collections=None,
                is_training=self.train)
        fb1 = tf.nn.bias_add(fb1, bias)
        fb1 = self.ff_nl(fb1) + 1
        l1_fb = fb1

        # FEEDBACK 0
        idx = 0
        with tf.variable_scope('fb_%s' % idx, reuse=True):
            weights = tf.get_variable("weights")
            bias = tf.get_variable("bias")
        fb0 = self.resize_x_to_y(x=fb1,
                                 y=x,
                                 kernel=weights,
                                 mode=self.fb_mode,
                                 strides=self.ff_pool_strides[0])
        if self.batch_norm:
            # with tf.variable_scope('fb_bn' % 2,
            #         reuse=self.bn_reuse) as scope:
            fb0 = tf.contrib.layers.batch_norm(
                inputs=fb0,
                scale=True,
                center=False,
                fused=True,
                renorm=False,
                param_initializers=self.param_initializer,
                updates_collections=None,
                is_training=self.train)
        fb0 = tf.nn.bias_add(fb0, bias)
        fb0 = self.ff_nl(fb0) + 1
        l0_fb = fb0

        # Iterate loop
        i0 += 1
        return i0, x, l0_h2, l0_fb, l1_h2, l1_fb, l2_h2, l2_fb
Exemple #3
0
    def full(self, i0, l1_x, l1_h2, l2_h2, l3_h2):
        """hGRU body.
        Take the recurrent h2 from a low level and imbue it with
        information froma high layer. This means to treat the lower
        layer h2 as the X and the higher layer h2 as the recurrent state.
        This will serve as I/E from the high layer along with feedback
        kernels.

        h1 -> conv -> h2 -> conv -> h3 -> fb -> h2 h2 -> fb -> h1 h1 h1
        """

        # l2-l1 feedback (FEEDBACK KERNEL is 2x channels)
        idx = 0
        with tf.variable_scope('fb_%s' % idx, reuse=True):
            weights = tf.get_variable("weights")
            bias = tf.get_variable("bias")
            top_chs = l2_h2.get_shape().as_list()[-1]
            bottom_chs = l1_x.get_shape().as_list()[-1]
            fb_f = self.resize_x_to_y(x=l2_h2[:, :, :, :, :top_chs / 2],
                                      y=l1_x[:, :, :, :, :bottom_chs / 2],
                                      kernel=weights,
                                      bias=bias,
                                      mode=self.fb_mode,
                                      strides=self.ff_pool_strides[idx])
            fb_g = self.resize_x_to_y(x=l2_h2[:, :, :, :, top_chs / 2:],
                                      y=l1_x[:, :, :, :, bottom_chs / 2:],
                                      kernel=weights,
                                      bias=bias,
                                      mode=self.fb_mode,
                                      strides=self.ff_pool_strides[idx])
            fb = tf.concat([fb_f, fb_g], axis=4)
        l1_x = self.fb_ops(l1_x, fb, var_scope='fb_%s' % idx)

        # LAYER 1
        _, l1_h2 = self.hgru_ops(i0=i0,
                                 x=l1_x,
                                 h2=l1_h2,
                                 layer='h1',
                                 var_scope='hgru_%s' % 0,
                                 layer_idx=0)

        # Intermediate FF
        idx = 0
        with tf.variable_scope('ff_%s' % idx, reuse=True):
            weights = tf.get_variable("weights")
            bias = tf.get_variable("bias")
            bottom_chs = l1_h2.get_shape().as_list()[-1]
            l1_h2_f = l1_h2[:, :, :, :, :bottom_chs / 2]
            l1_h2_g = l1_h2[:, :, :, :, bottom_chs / 2:]
            processed_l1_f = tf.nn.conv3d(input=l1_h2_f,
                                          filter=weights,
                                          strides=self.ff_conv_strides[idx],
                                          padding=self.padding)
            processed_l1_f = tf.nn.bias_add(processed_l1_f, bias)
            processed_l1_g = tf.nn.conv3d(input=l1_h2_g,
                                          filter=weights,
                                          strides=self.ff_conv_strides[idx],
                                          padding=self.padding)
            processed_l1_g = tf.nn.bias_add(processed_l1_g, bias)
            processed_l1 = self.ff_nl(
                tf.concat([processed_l1_f, processed_l1_g], axis=4))

        # Pool the preceding layer's drive
        if self.include_pooling:
            processed_l1 = max_pool3d(bottom=processed_l1,
                                      k=self.ff_pool_dhw[0],
                                      s=self.ff_pool_strides[0],
                                      name='ff_pool_%s' % 0)
        if self.batch_norm:
            with tf.variable_scope('l1_bn_%s' % idx,
                                   reuse=self.scope_reuse) as scope:
                processed_l1 = tf.contrib.layers.batch_norm(
                    inputs=processed_l1,
                    scale=True,
                    center=True,
                    fused=True,
                    renorm=False,
                    param_initializers=self.param_initializer,
                    updates_collections=None,
                    scope=scope,
                    reuse=self.reuse,
                    is_training=self.train)

        # l3-l2 feedback (FEEDBACK KERNEL is 2x channels)
        idx = 1
        with tf.variable_scope('fb_%s' % idx, reuse=True):
            weights = tf.get_variable("weights")
            bias = tf.get_variable("bias")
            top_chs = l3_h2.get_shape().as_list()[-1]
            bottom_chs = processed_l1.get_shape().as_list()[-1]
            fb_f = self.resize_x_to_y(x=l3_h2[:, :, :, :, :top_chs / 2],
                                      y=processed_l1[:, :, :, :, :bottom_chs /
                                                     2],
                                      kernel=weights,
                                      bias=bias,
                                      mode=self.fb_mode,
                                      strides=self.ff_pool_strides[idx])
            fb_g = self.resize_x_to_y(x=l3_h2[:, :, :, :, top_chs / 2:],
                                      y=processed_l1[:, :, :, :,
                                                     bottom_chs / 2:],
                                      kernel=weights,
                                      bias=bias,
                                      mode=self.fb_mode,
                                      strides=self.ff_pool_strides[idx])
            fb = tf.concat([fb_f, fb_g], axis=4)
        processed_l1 = self.fb_ops(processed_l1, fb, var_scope='fb_%s' % idx)

        # LAYER 2
        _, l2_h2 = self.hgru_ops(i0=i0,
                                 x=processed_l1,
                                 h2=l2_h2,
                                 layer='h2',
                                 var_scope='hgru_%s' % 1,
                                 layer_idx=1)

        # Intermediate FF
        idx = 1
        with tf.variable_scope('ff_%s' % idx, reuse=True):
            weights = tf.get_variable("weights")
            bias = tf.get_variable("bias")
            bottom_chs = l2_h2.get_shape().as_list()[-1]
            l2_h2_f = l2_h2[:, :, :, :, :bottom_chs / 2]
            l2_h2_g = l2_h2[:, :, :, :, bottom_chs / 2:]
            processed_l2_f = tf.nn.conv3d(input=l2_h2_f,
                                          filter=weights,
                                          strides=self.ff_conv_strides[idx],
                                          padding=self.padding)
            processed_l2_f = tf.nn.bias_add(processed_l2_f, bias)
            processed_l2_g = tf.nn.conv3d(input=l2_h2_g,
                                          filter=weights,
                                          strides=self.ff_conv_strides[idx],
                                          padding=self.padding)
            processed_l2_g = tf.nn.bias_add(processed_l2_g, bias)
            processed_l2 = self.ff_nl(
                tf.concat([processed_l2_f, processed_l2_g], axis=4))

        # Pool the preceding layer's drive
        if self.include_pooling:
            processed_l2 = max_pool3d(bottom=processed_l2,
                                      k=self.ff_pool_dhw[0],
                                      s=self.ff_pool_strides[0],
                                      name='ff_pool_%s' % 0)
        if self.batch_norm:
            with tf.variable_scope('l2_bn_%s' % idx,
                                   reuse=self.scope_reuse) as scope:
                processed_l2 = tf.contrib.layers.batch_norm(
                    inputs=processed_l2,
                    scale=True,
                    center=False,
                    fused=True,
                    renorm=False,
                    param_initializers=self.param_initializer,
                    updates_collections=None,
                    scope=scope,
                    reuse=self.reuse,
                    is_training=self.train)

        # LAYER 3
        _, l3_h2 = self.hgru_ops(i0=i0,
                                 x=processed_l2,
                                 h2=l3_h2,
                                 layer='h3',
                                 var_scope='hgru_%s' % 2,
                                 layer_idx=2)
        if self.batch_norm:
            with tf.variable_scope('l3_bn', reuse=self.scope_reuse) as scope:
                l3_h2 = tf.contrib.layers.batch_norm(
                    inputs=l3_h2,
                    scale=True,
                    center=False,
                    fused=True,
                    renorm=False,
                    param_initializers=self.param_initializer,
                    updates_collections=None,
                    scope=scope,
                    reuse=self.reuse,
                    is_training=self.train)

        # Iterate loop
        i0 += 1
        return i0, l1_x, l1_h2, l2_h2, l3_h2
Exemple #4
0
    def full(self, i0, x, l1_h2, l2_h2, l3_h2):
        """hGRU body.
        Take the recurrent h2 from a low level and imbue it with
        information froma high layer. This means to treat the lower
        layer h2 as the X and the higher layer h2 as the recurrent state.
        This will serve as I/E from the high layer along with feedback
        kernels.

        h1 -> conv -> h2 -> conv -> h3 -> fb -> h2 h2 -> fb -> h1 h1 h1
        """

        # LAYER 1
        _, l1_h2 = self.hgru_ops(i0=i0, x=x, h2=l1_h2, layer='h1', layer_idx=0)

        # Intermediate FF
        if self.batch_norm:
            with tf.variable_scope('l1_h2_bn',
                                   reuse=self.scope_reuse) as scope:
                l1_h2 = tf.contrib.layers.batch_norm(
                    inputs=l1_h2,
                    scale=True,
                    center=True,
                    fused=True,
                    renorm=False,
                    param_initializers=self.param_initializer,
                    updates_collections=None,
                    scope=scope,
                    reuse=self.reuse,
                    is_training=self.train)

        # Pool the preceding layer's drive
        if self.include_pooling:
            processed_l1_h2 = max_pool3d(bottom=l1_h2,
                                         k=self.ff_pool_dhw[0],
                                         s=self.ff_pool_strides[0],
                                         name='ff_pool_%s' % 0)
        else:
            processed_l1_h2 = l1_h2

        # LAYER 2
        idx = 0
        processed_l1_h2 = tf.nn.conv3d(input=processed_l1_h2,
                                       filter=getattr(self,
                                                      'ff_kernel_%s' % idx),
                                       strides=self.ff_conv_strides[idx],
                                       padding=self.padding)
        processed_l1_h2 = tf.nn.bias_add(processed_l1_h2,
                                         getattr(self, 'ff_bias_%s' % idx))
        processed_l1_h2 = self.ff_nl(processed_l1_h2)
        if self.batch_norm:
            with tf.variable_scope('l1_h2_bn_ff_%s' % idx,
                                   reuse=self.scope_reuse) as scope:
                processed_l1_h2 = tf.contrib.layers.batch_norm(
                    inputs=processed_l1_h2,
                    scale=True,
                    center=True,
                    fused=True,
                    renorm=False,
                    param_initializers=self.param_initializer,
                    updates_collections=None,
                    scope=scope,
                    reuse=self.reuse,
                    is_training=self.train)
        _, l2_h2 = self.hgru_ops(i0=i0,
                                 x=processed_l1_h2,
                                 h2=l2_h2,
                                 layer='h2',
                                 layer_idx=1)
        if self.batch_norm:
            with tf.variable_scope('l2_h2_bn',
                                   reuse=self.scope_reuse) as scope:
                l2_h2 = tf.contrib.layers.batch_norm(
                    inputs=l2_h2,
                    scale=True,
                    center=True,
                    fused=True,
                    renorm=False,
                    param_initializers=self.param_initializer,
                    updates_collections=None,
                    scope=scope,
                    reuse=self.reuse,
                    is_training=self.train)

        # Pool the preceding layer's drive
        if self.include_pooling:
            processed_l2_h2 = max_pool3d(bottom=l2_h2,
                                         k=self.ff_pool_dhw[1],
                                         s=self.ff_pool_strides[1],
                                         name='ff_pool_%s' % idx)
        else:
            processed_l2_h2 = l2_h2

        # LAYER 3
        idx = 1
        processed_l2_h2 = tf.nn.conv3d(input=processed_l2_h2,
                                       filter=getattr(self,
                                                      'ff_kernel_%s' % idx),
                                       strides=self.ff_conv_strides[idx],
                                       padding=self.padding)
        processed_l2_h2 = tf.nn.bias_add(processed_l2_h2,
                                         getattr(self, 'ff_bias_%s' % idx))
        processed_l2_h2 = self.ff_nl(processed_l2_h2)
        if self.batch_norm:
            with tf.variable_scope('l3_h2_bn_ff_%s' % idx,
                                   reuse=self.scope_reuse) as scope:
                processed_l2_h2 = tf.contrib.layers.batch_norm(
                    inputs=processed_l2_h2,
                    scale=True,
                    center=True,
                    fused=True,
                    renorm=False,
                    param_initializers=self.param_initializer,
                    updates_collections=None,
                    scope=scope,
                    reuse=self.reuse,
                    is_training=self.train)
        _, l3_h2 = self.hgru_ops(i0=i0,
                                 x=processed_l2_h2,
                                 h2=l3_h2,
                                 layer='h3',
                                 layer_idx=1)
        if self.batch_norm:
            with tf.variable_scope('l3_h2_bn',
                                   reuse=self.scope_reuse) as scope:
                l3_h2 = tf.contrib.layers.batch_norm(
                    inputs=l3_h2,
                    scale=True,
                    center=True,
                    fused=True,
                    renorm=False,
                    param_initializers=self.param_initializer,
                    updates_collections=None,
                    scope=scope,
                    reuse=self.reuse,
                    is_training=self.train)

        # l3-l2 feedback (FEEDBACK KERNEL is 2x channels)
        _, temp_l2_h2 = self.hgru_ops(i0=i0,
                                      x=l2_h2,
                                      h2=self.resize_x_to_y(
                                          x=l3_h2,
                                          y=l2_h2,
                                          kernel=self.fb_kernel_1,
                                          bias=self.fb_bias_1,
                                          mode=self.fb_mode,
                                          strides=self.ff_pool_strides[1]),
                                      layer='fb2',
                                      layer_idx=3)

        # Peephole
        if self.peephole:
            l2_h2 = temp_l2_h2 + l2_h2
        else:
            l2_h2 = temp_l2_h2

        # l2 horizontal postprocessing
        _, l2_h2 = self.hgru_ops(i0=i0,
                                 x=l2_h2,
                                 h2=l2_h2,
                                 layer='h2',
                                 layer_idx=1)
        _, l2_h2 = self.hgru_ops(i0=i0,
                                 x=l2_h2,
                                 h2=l2_h2,
                                 layer='h2',
                                 layer_idx=1)

        # l2-l1 feedback (FEEDBACK KERNEL is 2x channels)
        _, temp_l1_h2 = self.hgru_ops(i0=i0,
                                      x=l1_h2,
                                      h2=self.resize_x_to_y(
                                          x=l2_h2,
                                          y=l1_h2,
                                          kernel=self.fb_kernel_0,
                                          bias=self.fb_bias_0,
                                          mode=self.fb_mode,
                                          strides=self.ff_pool_strides[0]),
                                      layer='fb1',
                                      layer_idx=4)

        # Peephole
        if self.peephole:
            l1_h2 = temp_l1_h2 + l1_h2
        else:
            l1_h2 = temp_l1_h2

        # l1 horizontal postprocessing
        _, l1_h2 = self.hgru_ops(i0=i0, x=x, h2=l1_h2, layer='h1', layer_idx=0)
        _, l1_h2 = self.hgru_ops(i0=i0, x=x, h2=l1_h2, layer='h1', layer_idx=0)

        # Iterate loop
        i0 += 1
        return i0, x, l1_h2, l2_h2, l3_h2
Exemple #5
0
    def full(self, i0, x, l0_h1, l0_h2, l1_h1, l1_h2, td0_h1, td1_h1):
        # HGRU 0
        l0_h1, l0_h2 = self.hgru0.run(x, l0_h1, l0_h2)
        ff0 = tf.contrib.layers.batch_norm(
            inputs=l0_h2,
            scale=True,
            center=True,
            fused=True,
            renorm=False,
            param_initializers=self.bn_param_initializer,
            updates_collections=None,
            reuse=None,
            is_training=self.train)

        # FEEDFORWARD 0
        idx = 0
        with tf.variable_scope('ff_%s' % idx, reuse=tf.AUTO_REUSE):
            spot_weights_x = tf.get_variable("spot_x")
            spot_weights_xy = tf.get_variable("spot_xy")
            weights = tf.get_variable("weights")
        ff0 = self.generic_combine(x, ff0, spot_weights_x, spot_weights_xy)
        ff0 = tf.nn.elu(ff0) + 1
        ff0 = tf.nn.conv3d(input=ff0,
                           filter=weights,
                           strides=self.ff_conv_strides[idx],
                           padding='SAME')
        if self.ff_kpool_multiplier > 1:
            low_k = 0
            running_max = ff0[:, :, :, :, low_k:low_k + self.ff_conv_k[idx]]
            for i in range(self.ff_kpool_multiplier - 1):
                low_k += self.ff_conv_k[idx]
                running_max = tf.maximum(
                    running_max, ff0[:, :, :, :,
                                     low_k:low_k + self.ff_conv_k[idx]])
            ff0 = running_max
        ff0 = tf.contrib.layers.batch_norm(
            inputs=ff0,
            scale=True,
            center=True,
            fused=True,
            renorm=False,
            param_initializers=self.bn_param_initializer,
            updates_collections=None,
            is_training=self.train)
        ff0 = tf.nn.elu(ff0) + 1

        # POOL
        ff0 = max_pool3d(bottom=ff0,
                         k=self.ff_pool_fsiz[idx],
                         s=self.ff_pool_strides[idx],
                         name='ff_pool_%s' % idx)

        # HGRU 1
        l1_h1, l1_h2 = self.hgru1.run(ff0, l1_h1, l1_h2)
        ff1 = tf.contrib.layers.batch_norm(
            inputs=l1_h2,
            scale=True,
            center=True,
            fused=True,
            renorm=False,
            param_initializers=self.bn_param_initializer,
            updates_collections=None,
            reuse=None,
            is_training=self.train)

        # FEEDFORWARD 1
        idx = 1
        with tf.variable_scope('ff_%s' % idx, reuse=True):
            spot_weights_x = tf.get_variable("spot_x")
            spot_weights_xy = tf.get_variable("spot_xy")
            weights = tf.get_variable("weights")
        ff1 = self.generic_combine(ff0, ff1, spot_weights_x, spot_weights_xy)
        ff1 = tf.nn.elu(ff1) + 1
        ff1 = tf.nn.conv3d(input=ff1,
                           filter=weights,
                           strides=self.ff_conv_strides[idx],
                           padding='SAME')
        if self.ff_kpool_multiplier > 1:
            low_k = 0
            running_max = ff1[:, :, :, :, low_k:low_k + self.ff_conv_k[idx]]
            for i in range(self.ff_kpool_multiplier - 1):
                low_k += self.ff_conv_k[idx]
                running_max = tf.maximum(
                    running_max, ff1[:, :, :, :,
                                     low_k:low_k + self.ff_conv_k[idx]])
            ff1 = running_max
        ff1 = tf.contrib.layers.batch_norm(
            inputs=ff1,
            scale=True,
            center=True,
            fused=True,
            renorm=False,
            param_initializers=self.bn_param_initializer,
            updates_collections=None,
            is_training=self.train)
        ff1 = tf.nn.elu(ff1) + 1

        # POOL
        ff1 = max_pool3d(bottom=ff1,
                         k=self.ff_pool_fsiz[idx],
                         s=self.ff_pool_strides[idx],
                         name='ff_pool_%s' % idx)

        # HGRU 2
        # l2_h1, l2_h2 = self.hgru2.run(ff1, l2_h1, l2_h2)
        # ff2 = tf.contrib.layers.batch_norm(
        #     inputs=l2_h2,
        #     scale=True,
        #     center=True,
        #     fused=True,
        #     renorm=False,
        #     param_initializers=self.bn_param_initializer,
        #     updates_collections=None,
        #     reuse=None,
        #     is_training=self.train)

        # FEEDFORWARD 2
        idx = 2
        with tf.variable_scope('ff_%s' % idx, reuse=True):
            # spot_weights_x = tf.get_variable("spot_x")
            # spot_weights_xy = tf.get_variable("spot_xy")
            weights = tf.get_variable("weights")
        # ff2 = self.generic_combine(
        #     ff1,
        #     ff2,
        #     spot_weights_x, spot_weights_xy)
        # ff2 = tf.nn.elu(ff2) + 1
        ff2 = ff1
        ff2 = tf.nn.conv3d(input=ff2,
                           filter=weights,
                           strides=self.ff_conv_strides[idx],
                           padding='SAME')

        if self.ff_kpool_multiplier > 1:
            low_k = 0
            running_max = ff2[:, :, :, :, low_k:low_k + self.ff_conv_k[idx]]
            for i in range(self.ff_kpool_multiplier - 1):
                low_k += self.ff_conv_k[idx]
                running_max = tf.maximum(
                    running_max, ff2[:, :, :, :,
                                     low_k:low_k + self.ff_conv_k[idx]])
            ff2 = running_max
        ff2 = tf.contrib.layers.batch_norm(
            inputs=ff2,
            scale=True,
            center=False,
            fused=True,
            renorm=False,
            param_initializers=self.bn_param_initializer,
            updates_collections=None,
            is_training=self.train)
        ff2 = tf.nn.elu(ff2) + 1

        # POOL
        ff2 = max_pool3d(bottom=ff2,
                         k=self.ff_pool_fsiz[idx],
                         s=self.ff_pool_strides[idx],
                         name='ff_pool_%s' % idx)

        # FEEDBACK 2
        idx = 2
        with tf.variable_scope('fb_%s' % idx, reuse=True):
            weights = tf.get_variable("weights")
        fb2 = self.resize_x_to_y(x=ff2,
                                 y=ff1,
                                 kernel=weights,
                                 mode='transpose',
                                 strides=self.ff_pool_strides[2])
        fb2 = tf.contrib.layers.batch_norm(
            inputs=fb2,
            scale=True,
            center=True,
            fused=True,
            renorm=False,
            param_initializers=self.bn_param_initializer,
            updates_collections=None,
            is_training=self.train)
        fb2 = tf.nn.elu(fb2) + 1

        # HGRU_TD 2
        # td2_h1, l2_h2 = self.hgru_td2.run(fb2, td2_h1, l2_h2)

        # FEEDBACK 1
        idx = 1
        with tf.variable_scope('fb_%s' % idx, reuse=True):
            weights = tf.get_variable("weights")
        fb1 = self.resize_x_to_y(x=fb2,
                                 y=ff0,
                                 kernel=weights,
                                 mode='transpose',
                                 strides=self.ff_pool_strides[1])
        fb1 = tf.contrib.layers.batch_norm(
            inputs=fb1,
            scale=True,
            center=True,
            fused=True,
            renorm=False,
            param_initializers=self.bn_param_initializer,
            updates_collections=None,
            is_training=self.train)
        fb1 = tf.nn.elu(fb1) + 1

        # HGRU_TD 1
        td1_h1, l1_h2 = self.hgru_td1.run(fb1, td1_h1, l1_h2)

        # FEEDBACK 0
        idx = 0
        with tf.variable_scope('fb_%s' % idx, reuse=True):
            weights = tf.get_variable("weights")
        fb0 = self.resize_x_to_y(x=fb1,
                                 y=x,
                                 kernel=weights,
                                 mode='transpose',
                                 strides=self.ff_pool_strides[0])
        fb0 = tf.contrib.layers.batch_norm(
            inputs=fb0,
            scale=True,
            center=True,
            fused=True,
            renorm=False,
            param_initializers=self.bn_param_initializer,
            updates_collections=None,
            is_training=self.train)
        fb0 = tf.nn.elu(fb0) + 1

        # HGRU_TD 0
        td0_h1, l0_h2 = self.hgru_td0.run(fb0, td0_h1, l0_h2)

        # Iterate loop
        i0 += 1
        return i0, x, l0_h1, l0_h2, l1_h1, l1_h2, td0_h1, td1_h1