Ejemplo n.º 1
0
    def layer_op(self, input_tensor):
        output_tensor = input_tensor
        for (i, k) in enumerate(self.kernels):
            # create parameterised layers
            if self.encoding:
                if i == 0:
                    nb_channels = self.n_output_chns
                elif i == 1:
                    nb_channels = self.n_output_chns
            else:
                if self.double_n:
                    if i == 0:
                        nb_channels = self.n_output_chns
                    elif i == 1:
                        nb_channels = int(self.n_output_chns / 2)
                else:
                    nb_channels = int(self.n_output_chns / 2)

            in_op = InstanceNormLayer(name='in_{}'.format(i))
            acti_op = ActiLayer(func=self.acti_func,
                                regularizer=self.regularizers['w'],
                                name='acti_{}'.format(i))
            conv_op = ConvLayer(n_output_chns=nb_channels,
                                kernel_size=k,
                                stride=self.stride,
                                w_initializer=self.initializers['w'],
                                w_regularizer=self.regularizers['w'],
                                name='conv_{}'.format(i))
            # connect layers
            output_tensor = in_op(output_tensor)
            output_tensor = acti_op(output_tensor)
            output_tensor = conv_op(output_tensor)
        return output_tensor
Ejemplo n.º 2
0
    def layer_op(self, input_tensor, is_training=None, keep_prob=None):
        conv_layer = ConvLayer(n_output_chns=self.n_output_chns,
                               kernel_size=self.kernel_size,
                               stride=self.stride,
                               dilation=self.dilation,
                               padding=self.padding,
                               with_bias=self.with_bias,
                               w_initializer=self.initializers['w'],
                               w_regularizer=self.regularizers['w'],
                               b_initializer=self.initializers['b'],
                               b_regularizer=self.regularizers['b'],
                               padding_constant=self.padding_constant,
                               name='conv_')

        if self.feature_normalization == 'batch':
            if is_training is None:
                raise ValueError(
                    'is_training argument should be '
                    'True or False unless feature_normalization is False')
            bn_layer = BNLayer(regularizer=self.regularizers['w'],
                               moving_decay=self.moving_decay,
                               eps=self.eps,
                               name='bn_')
        elif self.feature_normalization == 'instance':
            in_layer = InstanceNormLayer(eps=self.eps, name='in_')
        elif self.feature_normalization == 'group':
            gn_layer = GNLayer(regularizer=self.regularizers['w'],
                               group_size=self.group_size,
                               eps=self.eps,
                               name='gn_')
        if self.acti_func is not None:
            acti_layer = ActiLayer(func=self.acti_func,
                                   regularizer=self.regularizers['w'],
                                   name='acti_')

        if keep_prob is not None:
            dropout_layer = ActiLayer(func='dropout', name='dropout_')

        def activation(output_tensor):
            if self.feature_normalization == 'batch':
                output_tensor = bn_layer(output_tensor, is_training)
            elif self.feature_normalization == 'instance':
                output_tensor = in_layer(output_tensor)
            elif self.feature_normalization == 'group':
                output_tensor = gn_layer(output_tensor)
            if self.acti_func is not None:
                output_tensor = acti_layer(output_tensor)
            if keep_prob is not None:
                output_tensor = dropout_layer(output_tensor,
                                              keep_prob=keep_prob)
            return output_tensor

        if self.preactivation:
            output_tensor = conv_layer(activation(input_tensor))
        else:
            output_tensor = activation(conv_layer(input_tensor))

        return output_tensor
Ejemplo n.º 3
0
    def test_2d_instnorm_shape(self):
        x = self.get_2d_input()
        instnorm_layer = InstanceNormLayer()
        out_inst = instnorm_layer(x)
        print(instnorm_layer)

        with self.cached_session() as sess:
            sess.run(tf.global_variables_initializer())

            out = sess.run(out_inst)
            x_shape = tuple(x.shape.as_list())
            self.assertAllClose(x_shape, out.shape)
    def layer_op(self, input_tensor, is_training=None, keep_prob=None):
        fc_layer = FCLayer(n_output_chns=self.n_output_chns,
                           with_bias=self.with_bias,
                           w_initializer=self.initializers['w'],
                           w_regularizer=self.regularizers['w'],
                           b_initializer=self.initializers['b'],
                           b_regularizer=self.regularizers['b'],
                           name='fc_')
        output_tensor = fc_layer(input_tensor)

        if self.feature_normalization == 'batch':
            if is_training is None:
                raise ValueError(
                    'is_training argument should be '
                    'True or False unless feature_normalization is False')
            bn_layer = BNLayer(regularizer=self.regularizers['w'],
                               moving_decay=self.moving_decay,
                               eps=self.eps,
                               name='bn_')
            output_tensor = bn_layer(output_tensor, is_training)
        elif self.feature_normalization == 'instance':
            in_layer = InstanceNormLayer(eps=self.eps, name='in_')
            output_tensor = in_layer(output_tensor)
        elif self.feature_normalization == 'group':
            gn_layer = GNLayer(regularizer=self.regularizers['w'],
                               group_size=self.group_size,
                               eps=self.eps,
                               name='gn_')
            output_tensor = gn_layer(output_tensor)

        if self.acti_func is not None:
            acti_layer = ActiLayer(func=self.acti_func,
                                   regularizer=self.regularizers['w'],
                                   name='acti_')
            output_tensor = acti_layer(output_tensor)

        if keep_prob is not None:
            dropout_layer = ActiLayer(func='dropout', name='dropout_')
            output_tensor = dropout_layer(output_tensor, keep_prob=keep_prob)

        return output_tensor