Esempio n. 1
0
    def upsample_residual(self, x, filters, padding, sn, 
                        norm=tf.layers.batch_normalization, activation=tf.nn.relu, name=None):
        """
        upsample a 4-D input tensor in a residual module, follows this implementation
        https://github.com/brain-research/self-attention-gan/blob/ad9612e60f6ba2b5ad3d3340ebae60f724636d75/generator.py#L78
        x:      Input
        layer:  Layer function,
        Caution: _reset_counter should be called first if this residual module is reused
        """
        assert_colorize(padding.lower() != 'valid')
        assert_colorize(len(x.shape.as_list()), f'Input x should be a 4-D tensor, but get {x.shape.as_list()}')
        name = self.get_name(name, 'residual')

        y = x
        conv = self.snconv if sn else self.conv
        with tf.variable_scope(name):
            y = tf_utils.norm_activation(y, norm=norm, activation=activation, training=self.training, name='NormAct_1')
            y = self.upsample_conv(y, filters, 3, 1, padding=padding, sn=sn, name='UpsampleConv')
            y = tf_utils.norm_activation(y, norm=norm, activation=activation, training=self.training, name='NormAct_2')
            y = conv(y, filters, 3, 1, padding=padding, name='Conv')

            x = self.upsample_conv(x, filters, 1, 1, padding='VALID', sn=sn, name='UpsampleConv1x1')
            x = x + y

        return x
Esempio n. 2
0
 def layer_imp():
     o, y = self.noisy(x, units, kernel_initializer=kernel_initializer, 
                     name=name, sigma=sigma, return_noise=True)
     y = tf_utils.norm_activation(y, norm=norm, activation=activation, 
                                  training=self.training)
     
     return o + y
Esempio n. 3
0
    def residual(self, x, layer, norm=tf.layers.batch_normalization, activation=tf.nn.relu, name=None):
        """
        x:      Input
        layer:  Layer function,
        Caution: _reset_counter should be called first if this residual module is reused
        """
        name = self.get_name(name, 'residual')

        y = x
        with tf.variable_scope(name):
            y = tf_utils.norm_activation(y, norm=norm, activation=activation, training=self.training, name='NormAct_1')
            y = layer(y)
            y = tf_utils.norm_activation(y, norm=norm, activation=activation, training=self.training, name='NormAct_2')
            y = layer(y)
            x = x + y

        return x
Esempio n. 4
0
        def layer_imp():
            y = self.convtrans(x, filters, kernel_size, 
                                strides=strides, padding=padding, 
                                kernel_initializer=kernel_initializer)
            y = tf_utils.norm_activation(y, norm=norm, activation=activation, 
                                            training=self.training)

            return y
Esempio n. 5
0
    def _st_net(self, x):
        """ Style transfer net """
        if self.norm == 'batch':
            norm = tf.layers.batch_normalization
        elif self.norm == 'layer':
            norm == tf_utils.layer_norm
        elif self.norm == 'instance':
            norm = tf_utils.instance_norm
        else:
            raise NotImplementedError

        # ConvNet
        with tf.variable_scope('net', reuse=self.reuse):
            for i, (filters, kernel_size,
                    strides) in enumerate(self.args['conv_params']):
                x = self.conv_norm_activation(x,
                                              filters,
                                              kernel_size,
                                              strides,
                                              padding=self.padding,
                                              norm=norm,
                                              name=f'Conv_{i}')

            # residual, following paper "Identity Mappings in Deep Residual Networks"
            for i in range(self.args['n_residuals']):
                layer = lambda x: self.conv(
                    x, x.shape[-1], 3, 1, padding=self.padding)
                x = self.residual(x, layer, norm=norm, name=f'ResBlock_{i}')
            x = tf_utils.norm_activation(x, norm, tf.nn.relu)

            for i, (filters, kernel_size,
                    strides) in enumerate(self.args['convtras_params']):
                x = self.convtrans_norm_activation(x,
                                                   filters,
                                                   kernel_size,
                                                   strides,
                                                   norm=norm,
                                                   name=f'ConvTrans_{i}')

            filters, kernel_size, strides = self.args['final_conv_params']
            x = self.conv_norm_activation(x,
                                          filters,
                                          kernel_size,
                                          strides,
                                          padding=self.padding,
                                          norm=norm,
                                          activation=tf.tanh,
                                          name='FinalConv')
            x = 127.5 * x + 127.5

        return x
Esempio n. 6
0
    def _deterministic_policy_net(self,
                                  state,
                                  units,
                                  action_dim,
                                  noisy_sigma,
                                  name='policy_net'):
        noisy = lambda x, u, norm: self.noisy(x, u, sigma=noisy_sigma)
        x = state
        with tf.variable_scope(name):
            for i, u in enumerate(units):
                if i < len(units) - self.args['n_noisy']:
                    x = self.dense(x, u)
                else:
                    x = self.noisy(x, u, sigma=self.noisy_sigma)
                x = norm_activation(x, norm=self.norm, activation=tf.nn.relu)
            x = self.dense(x, action_dim)
            x = tf.tanh(x, name='action')

        return x
Esempio n. 7
0
        def stochastic_policy_net(state):
            x = state
            self.reset_counter(
                'noisy'
            )  # reset noisy counter for each call to enable reuse if desirable

            with tf.variable_scope('net'):
                for i, u in enumerate(units):
                    if i < len(units) - n_noisy:
                        x = self.dense(x, u)
                    else:
                        x = self.noisy(x, u, sigma=noisy_sigma)
                    x = norm_activation(x, norm=norm, activation=tf.nn.relu)

                mean = self.dense(x, self.action_dim, name='action_mean')

                # constrain logstd to be in range [LOG_STD_MIN, LOG_STD_MAX]
                with tf.variable_scope('action_std'):
                    logstd = tf.layers.dense(x, self.action_dim)
                    # logstd = tf.tanh(logstd)
                    # logstd = LOG_STD_MIN + .5 * (LOG_STD_MAX - LOG_STD_MIN) * (logstd + 1)
                    logstd = tf.clip_by_value(logstd, LOG_STD_MIN, LOG_STD_MAX)

            return mean, logstd, mean
Esempio n. 8
0
        def layer_imp():
            y = self.dense(x, units, use_bias=use_bias, kernel_initializer=kernel_initializer)
            y = tf_utils.norm_activation(y, norm=norm, activation=activation, 
                                        training=self.training)

            return y
Esempio n. 9
0
        def layer_imp():
            y = layer(x)
            y = tf_utils.norm_activation(y, norm=norm, activation=activation, 
                                        training=self.training)

            return y