def conv2D(inputs, kernel, output_channel, stride, use_bias=False, name='conv_1', spectral_normed=True, weight_normed=False, batch_normed=False, 
           layer_normed=False, instance_normed=False, lr_normed=False, stddev=0.02, padding="SAME"):

    with tf.variable_scope(name) as scope:
        
        if scope_has_variables(scope):
            scope.reuse_variables()
            
        if not weight_normed: 
            w = tf.get_variable("w", [kernel, kernel, inputs.get_shape()[-1], output_channel],
                        initializer=tf.contrib.layers.variance_scaling_initializer())
                        
            if spectral_normed:
                w = spectral_norm(w)

        if weight_normed:
            conv = WeightNorm(tf.keras.layers.Conv2D(output_channel, kernel, use_bias=use_bias, strides=stride, padding=padding))(inputs)
        elif batch_normed:
            conv = tf.keras.layers.BatchNormalization(tf.keras.layers.Conv2D(output_channel, kernel, use_bias=use_bias, strides=stride, padding=padding))(inputs)
        else:
            conv = tf.nn.conv2d(inputs, w, strides=[1, stride, stride, 1], padding=padding)
            
        if layer_normed:
            conv = tf.contrib.layers.layer_norm(conv)
        if batch_normed:
            conv = tf.layers.batch_normalization(conv)
        if instance_normed:
            conv = instance_norm(conv)          
        if lr_normed:
            conv = tf.nn.lrn(conv, bias=0.00005)
            
        return conv
def deconv2D(inputs, kernel, output_channel,strides, shape, spectral_normed=False, weight_normed=False, batch_normed=False, 
             lr_normed=False, layer_normed=False, use_bias=False, instance_normed=False, name='conv'):
    s = [1, strides, strides, 1]
    with tf.variable_scope(name) as scope:
    
        if scope_has_variables(scope):
            scope.reuse_variables()
      
        if not weight_normed:  
            w = tf.get_variable("w", [kernel, kernel, output_channel, inputs.get_shape()[-1]],
                        initializer=tf.contrib.layers.variance_scaling_initializer())
            
            if spectral_normed:
                w = spectral_norm(w)

        if weight_normed:
            deconv = WeightNorm(tf.keras.layers.Conv2DTranspose(output_channel, kernel, use_bias=use_bias, strides=strides, padding='SAME'))(inputs)
        elif batch_normed:
            deconv = tf.keras.layers.BatchNormalization(tf.keras.layers.Conv2DTranspose(output_channel, kernel, use_bias=use_bias, strides=strides, padding='SAME'))(inputs)
        else:
            deconv = tf.nn.conv2d_transpose(inputs, w, shape, strides=s, padding='SAME')
            
        if layer_normed:
            deconv = tf.contrib.layers.layer_norm(deconv)
        if batch_normed:
            deconv = tf.layers.batch_normalization(deconv)
        if instance_normed:
            deconv = instance_norm(deconv)
        if lr_normed:
            deconv = tf.nn.lrn(deconv, bias=0.00005)
            
    return deconv
Beispiel #3
0
def conv3_sn(inputs,
             kernel,
             output_channel,
             stride,
             use_bias=False,
             name='conv_1',
             spectral_normed=True,
             stddev=0.02,
             padding="SAME"):

    with tf.variable_scope(name) as scope:

        if scope_has_variables(scope):
            scope.reuse_variables()

        w = tf.get_variable(
            "w",
            [kernel, kernel, kernel,
             inputs.get_shape()[-1], output_channel],
            initializer=tf.contrib.layers.xavier_initializer())

        if spectral_normed:
            conv = tf.nn.conv3d(inputs,
                                spectral_norm(w),
                                strides=[1, stride, stride, stride, 1],
                                padding=padding)
        else:
            conv = tf.nn.conv3d(inputs,
                                w,
                                strides=[1, stride, stride, stride, 1],
                                padding=padding)

        return conv
Beispiel #4
0
def deconv1D(inputs,
             kernel,
             output_channel,
             stride,
             shape,
             name='conv',
             spectral_normed=False,
             weight_normed=False,
             batch_normed=False,
             batch_renormed=False,
             lr_normed=False,
             layer_normed=False,
             use_bias=False,
             instance_normed=False,
             is_training=False):
    with tf.variable_scope(name) as scope:

        if scope_has_variables(scope):
            scope.reuse_variables()

        w = tf.get_variable("w",
                            [kernel, output_channel,
                             inputs.get_shape()[-1]],
                            initializer=tf.contrib.layers.xavier_initializer())

        if spectral_normed:
            w = spectral_norm(w)

        deconv = tf.contrib.nn.conv1d_transpose(inputs,
                                                w,
                                                shape,
                                                stride=stride,
                                                padding='SAME')

        if batch_renormed:
            clip = {
                'rmax': tf.constant(3, dtype=tf.float32),
                'rmin': tf.constant(1 / 3, dtype=tf.float32),
                'dmax': tf.constant(5, dtype=tf.float32)
            }
            deconv = tf.layers.batch_normalization(deconv,
                                                   training=is_training,
                                                   renorm=True,
                                                   renorm_clipping=clip)
        if batch_normed:
            deconv = tf.layers.batch_normalization(deconv,
                                                   training=is_training)
        if layer_normed:
            deconv = tf.contrib.layers.layer_norm(deconv)
        if instance_normed:
            deconv = instance_norm(deconv)
        if lr_normed:
            deconv = tf.nn.lrn(deconv, bias=0.00005)

    return deconv
Beispiel #5
0
def conv1D(inputs,
           kernel,
           output_channel,
           stride,
           use_bias=False,
           name='conv_1',
           stddev=0.02,
           padding="SAME",
           spectral_normed=True,
           batch_normed=False,
           depthwise=False,
           batch_renormed=False,
           layer_normed=False,
           instance_normed=False,
           lr_normed=False,
           is_training=False):

    with tf.variable_scope(name) as scope:

        if scope_has_variables(scope):
            scope.reuse_variables()

        w = tf.get_variable(
            "w", [kernel, inputs.get_shape()[-1], output_channel],
            initializer=tf.contrib.layers.xavier_initializer())

        if spectral_normed:
            w = spectral_norm(w)

        conv = tf.nn.conv1d(inputs, w, stride=stride, padding=padding)

        if batch_renormed:
            clip = {
                'rmax': tf.constant(3, dtype=tf.float32),
                'rmin': tf.constant(1 / 3, dtype=tf.float32),
                'dmax': tf.constant(5, dtype=tf.float32)
            }
            conv = tf.layers.batch_normalization(conv,
                                                 training=is_training,
                                                 renorm=True,
                                                 renorm_clipping=clip)
            #conv = tf.layers.batch_normalization(conv)
        if batch_normed:
            conv = tf.layers.batch_normalization(conv, training=is_training)
        if layer_normed:
            conv = tf.contrib.layers.layer_norm(conv)
        if instance_normed:
            conv = instance_norm(conv)
        if lr_normed:
            conv = tf.nn.lrn(conv, bias=0.00005)

        return conv
def linear(input_, output_size, use_bias=False, bias_start=0.0, spectral_normed=False, batch_normed=False, weight_normed=False, lr_normed=False, layer_normed=False, instance_normed=False, name="linear"):
    
    shape = input_.get_shape().as_list()
   
    with tf.variable_scope(name) as scope:
        if scope_has_variables(scope):
            scope.reuse_variables()
            
        if not weight_normed: 
            weight = tf.get_variable("w", [shape[1], output_size], tf.float32, tf.contrib.layers.variance_scaling_initializer()) 
            
            if spectral_normed:
                weight = spectral_norm(weight)
        
        #print(name) #debug use only

        if weight_normed:
            mul = WeightNorm(tf.keras.layers.Dense(output_size, use_bias=use_bias))(input_)
        elif batch_normed:
            mul = tf.keras.layers.BatchNormalization(tf.keras.layers.Dense(output_size, use_bias=use_bias))(input_)
        else:
            mul = tf.matmul(input_, weight)
        
        if use_bias and not weight_normed:
            bias = tf.get_variable(name="bias", shape=[output_size],
                                   initializer=tf.constant_initializer(bias_start))
            mul += bias
            
        #if layer_normed:
            #mul = tf.contrib.layers.layer_norm(mul)
        if batch_normed:
            mul = tf.layers.batch_normalization(mul)
        #if instance_normed:
            #mul = instance_norm(mul)


        return mul
Beispiel #7
0
def deconv3d_layer(inputs,
                   kernel,
                   output_channel,
                   strides,
                   shape,
                   name='conv'):
    s = [1, strides, strides, strides, 1]
    with tf.variable_scope(name) as scope:

        if scope_has_variables(scope):
            scope.reuse_variables()

        w = tf.get_variable(
            "w",
            [kernel, kernel, kernel, output_channel,
             inputs.get_shape()[-1]],
            initializer=tf.contrib.layers.xavier_initializer())

        deconv = tf.nn.conv3d_transpose(inputs,
                                        w,
                                        shape,
                                        strides=s,
                                        padding='SAME')
    return deconv
Beispiel #8
0
def conv2D(inputs,
           kernel,
           output_channel,
           stride,
           use_bias=False,
           name='conv_1',
           spectral_normed=True,
           weight_normed=False,
           batch_normed=False,
           depthwise=False,
           batch_renormed=False,
           layer_normed=False,
           instance_normed=False,
           lr_normed=False,
           is_training=False,
           stddev=0.02,
           padding="SAME"):

    with tf.variable_scope(name) as scope:

        if scope_has_variables(scope):
            scope.reuse_variables()

        if not weight_normed:
            w = tf.get_variable(
                "w", [kernel, kernel,
                      inputs.get_shape()[-1], output_channel],
                initializer=tf.contrib.layers.variance_scaling_initializer())

            if spectral_normed:
                w = spectral_norm(w)

        if depthwise:
            conv = tf.keras.layers.SeparableConv2D(output_channel,
                                                   kernel,
                                                   use_bias=use_bias,
                                                   strides=stride,
                                                   padding=padding)(inputs)
        if weight_normed:
            conv = WeightNorm(
                tf.keras.layers.Conv2D(output_channel,
                                       kernel,
                                       use_bias=use_bias,
                                       strides=stride,
                                       padding=padding))(inputs)
        else:
            conv = tf.nn.conv2d(inputs,
                                w,
                                strides=[1, stride, stride, 1],
                                padding=padding)

        if batch_renormed:
            clip = {
                'rmax': tf.constant(3, dtype=tf.float32),
                'rmin': tf.constant(1 / 3, dtype=tf.float32),
                'dmax': tf.constant(5, dtype=tf.float32)
            }
            conv = tf.layers.batch_normalization(conv,
                                                 training=is_training,
                                                 renorm=True,
                                                 renorm_clipping=clip)
            #conv = tf.layers.batch_normalization(conv)

        if batch_normed:
            conv = tf.layers.batch_normalization(conv, training=is_training)

        if layer_normed:
            conv = tf.contrib.layers.layer_norm(conv)
        if instance_normed:
            conv = instance_norm(conv)
        if lr_normed:
            conv = tf.nn.lrn(conv, bias=0.00005)

        return conv