Ejemplo n.º 1
0
 def _test_grad(self, dtype):
     A = np.random.rand(9).astype(dtype) - 0.5
     shape = A.shape
     data = tf.constant(A)
     output = ops.leaky_relu(input=data, leak=0.2)
     #print(A)
     #print(output.eval())
     err = tf.test.compute_gradient_error(data,
                                          shape,
                                          output,
                                          output.get_shape().as_list(),
                                          x_init_value=A)
     print('error', err, flush=True)
     self.assertLess(err, 1e-3)
     grad = tf.test.compute_gradient(data,
                                     shape,
                                     output,
                                     output.get_shape().as_list(),
                                     x_init_value=A,
                                     delta=0.1)
     for g in grad:
         print(g)
         print(g.shape)
Ejemplo n.º 2
0
def myLeakyRelu(x):
    """Leaky ReLU with leak factor 0.1"""
    # return tf.maximum(0.1*x,x)
    return sops.leaky_relu(x, leak=0.1)
Ejemplo n.º 3
0
def _conv_bn_relu(input, **kwargs):

    k_initializer = tf.contrib.layers.variance_scaling_initializer(
        factor=2, mode='FAN_IN', uniform=False)
    k_regularizer = tf.contrib.layers.l2_regularizer(
        scale=nd.scope.weight_decay())
    b_initializer = tf.zeros_initializer

    # for shared params
    dropout = kwargs.pop("dropout", False)
    if dropout: raise NotImplementedError

    kernel_size = kwargs.pop('kernel_size', False)
    num_output = kwargs.pop('num_output', False)
    stride = kwargs.pop('stride', 1)
    pad = kwargs.pop('pad', 0)
    name = kwargs.pop('name', 'conv_no_name')

    if not kernel_size:
        raise KeyError('Missing kernel size')
    if not num_output:
        raise KeyError('Missing output size')

    # layer
    # note: input might be a tuple, in which case weights are shared
    if not isinstance(input, tuple):
        conv_out = tf.layers.conv2d(pad_input(input, pad),
                                    num_output,
                                    kernel_size,
                                    strides=stride,
                                    data_format='channels_first',
                                    trainable=nd.scope.learn(),
                                    kernel_regularizer=k_regularizer,
                                    kernel_initializer=k_initializer,
                                    bias_initializer=b_initializer,
                                    name=name)
        bn_out = tf.layers.batch_normalization(
            conv_out,
            axis=1,
            gamma_initializer=tf.constant_initializer(1.0),
            beta_initializer=tf.constant_initializer(0.0),
            scale=True,
            center=True,
            training=bool(nd.phase == 'train'),
            trainable=nd.scope.learn(),
            beta_regularizer=k_regularizer,
            gamma_regularizer=k_regularizer,
            name=name + '_bn')
        return leaky_relu(bn_out)
    else:
        outputs = []
        for i in input:
            conv_out = tf.layers.conv2d(
                pad_input(i, pad),
                num_output,
                kernel_size,
                strides=stride,
                data_format='channels_first',
                trainable=nd.scope.learn(),
                reuse=tf.AUTO_REUSE,
                kernel_regularizer=k_regularizer,
                kernel_initializer=k_initializer,
                bias_initializer=b_initializer,
                name=name,
            )
            bn_out = tf.layers.batch_normalization(
                conv_out,
                axis=1,
                gamma_initializer=tf.constant_initializer(1.0),
                beta_initializer=tf.constant_initializer(0.0),
                scale=True,
                center=True,
                training=bool(nd.phase == 'train'),
                trainable=nd.scope.learn(),
                beta_regularizer=k_regularizer,
                gamma_regularizer=k_regularizer,
                name=name + '_bn',
                reuse=tf.AUTO_REUSE,
            )
            outputs.append(leaky_relu(bn_out))

        return outputs
Ejemplo n.º 4
0
def _upconv_bn_relu(input, **kwargs):

    k_initializer = tf.contrib.layers.variance_scaling_initializer(
        factor=2, mode='FAN_IN', uniform=False)
    b_initializer = tf.zeros_initializer
    # We don't regularize the kernels for deconv in caffe
    k_regularizer = tf.contrib.layers.l2_regularizer(
        scale=nd.scope.weight_decay())  #

    kernel_size = kwargs.pop('kernel_size', False)
    num_output = kwargs.pop('num_output', False)
    stride = kwargs.pop('stride', 1)
    pad = kwargs.pop('pad', 0)
    pad = 'same'
    name = kwargs.pop('name', None)

    if not kernel_size:
        raise KeyError('Missing kernel size')
    if not num_output:
        raise KeyError('Missing output size')

    # layer
    # note: input might be a tuple, in which case weights are shared
    if not isinstance(input, tuple):
        deconv_out = tf.layers.conv2d_transpose(
            inputs=input,
            filters=num_output,
            kernel_size=kernel_size,
            strides=stride,
            padding=pad,
            data_format='channels_first',
            trainable=nd.scope.learn(),
            kernel_initializer=k_initializer,
            #kernel_regularizer = k_regularizer,
            bias_initializer=b_initializer,
            use_bias=True,
            name=name,
        )
        bn_out = tf.layers.batch_normalization(
            deconv_out,
            axis=1,
            gamma_initializer=tf.constant_initializer(1.0),
            beta_initializer=tf.constant_initializer(0.0),
            scale=True,
            center=True,
            training=bool(nd.phase == 'train'),
            trainable=nd.scope.learn(),
            beta_regularizer=k_regularizer,
            gamma_regularizer=k_regularizer,
            name=name + "_bn")

        return leaky_relu(bn_out)
    else:
        outputs = []
        for i in input:
            deconv_out = tf.layers.conv2d_transpose(
                inputs=i,
                filters=num_output,
                kernel_size=kernel_size,
                strides=stride,
                padding=pad,
                data_format='channels_first',
                trainable=nd.scope.learn(),
                reuse=tf.AUTO_REUSE,
                kernel_initializer=k_initializer,
                #kernel_regularizer = k_regularizer,
                bias_initializer=b_initializer,
                use_bias=True,
                name=name,
            )

            bn_out = tf.layers.batch_normalization(
                deconv_out,
                axis=1,
                gamma_initializer=tf.constant_initializer(1.0),
                beta_initializer=tf.constant_initializer(0.0),
                scale=True,
                center=True,
                training=bool(nd.phase == 'train'),
                trainable=nd.scope.learn(),
                beta_regularizer=k_regularizer,
                gamma_regularizer=k_regularizer,
                name=name + "_bn",
                reuse=tf.AUTO_REUSE,
            )

        outputs.append(leaky_relu(bn_out))

        return outputs