コード例 #1
0
def embedding(vocab_size, embedding_size, name="", pretrained=None, init = "normal"):
    embeddings = None
    with tf.name_scope("embeddings"):
        if pretrained is not None:
            embeddings = tf.get_variable(name="Ww-%s" % name, shape=pretrained.shape,
                                         initializer=tf.constant_initializer(pretrained),
                                         trainable=True, )
        elif init == "normal":
            embeddings = tf.get_variable(name="Ww-%s" % name,
                                         trainable=True,
                                         dtype=tf.float32,
                                         initializer=init_ops.VarianceScaling(mode='fan_out', seed = 42),
                                         shape=[vocab_size, embedding_size])
        elif init == "uniform":
            embeddings = tf.get_variable(name="Ww-%s" % name,
                                         trainable=True,
                                         dtype=tf.float32,
                                         initializer=init_ops.random_uniform_initializer(minval = -1/vocab_size, maxval = 1/vocab_size, seed = 42),
                                         shape=[vocab_size, embedding_size])
        elif init == "xavier":
            embeddings = tf.get_variable(name="Ww-%s" % name,
                                         trainable=True,
                                         dtype=tf.float32,
                                         initializer=init_ops.glorot_uniform_initializer(seed = 42),
                                         shape=[vocab_size, embedding_size])
        else:
            raise Exception("embedding initialize: %s is either pretrained, or initialized from {normal, uniform, xavier}" %(name))

    return embeddings
コード例 #2
0
ファイル: signal_conv.py プロジェクト: ywu40/M-LVC_CVPR2020
 def __init__(self,
              rank,
              filters,
              kernel_support,
              corr=False,
              strides_down=1,
              strides_up=1,
              padding="valid",
              extra_pad_end=True,
              channel_separable=False,
              data_format="channels_last",
              activation=None,
              use_bias=False,
              kernel_initializer=init_ops.VarianceScaling(),
              bias_initializer=init_ops.Zeros(),
              kernel_regularizer=None,
              bias_regularizer=None,
              kernel_parameterizer=parameterizers.RDFTParameterizer(),
              bias_parameterizer=None,
              **kwargs):
     super(_SignalConv, self).__init__(**kwargs)
     self._rank = int(rank)
     self._filters = int(filters)
     self._kernel_support = utils.normalize_tuple(kernel_support,
                                                  self._rank,
                                                  "kernel_support")
     self._corr = bool(corr)
     self._strides_down = utils.normalize_tuple(strides_down, self._rank,
                                                "strides_down")
     self._strides_up = utils.normalize_tuple(strides_up, self._rank,
                                              "strides_up")
     self._padding = str(padding).lower()
     try:
         self._pad_mode = {
             "valid": None,
             "same_zeros": "CONSTANT",
             "same_reflect": "REFLECT",
         }[self.padding]
     except KeyError:
         raise ValueError("Unsupported padding mode: '{}'".format(padding))
     self._extra_pad_end = bool(extra_pad_end)
     self._channel_separable = bool(channel_separable)
     self._data_format = utils.normalize_data_format(data_format)
     self._activation = activation
     self._use_bias = bool(use_bias)
     self._kernel_initializer = kernel_initializer
     self._bias_initializer = bias_initializer
     self._kernel_regularizer = kernel_regularizer
     self._bias_regularizer = bias_regularizer
     self._kernel_parameterizer = kernel_parameterizer
     self._bias_parameterizer = bias_parameterizer
     self.input_spec = base.InputSpec(ndim=self._rank + 2)
コード例 #3
0
ファイル: general.py プロジェクト: fbcotter/tf_ops
def _residual_core(x,
                   filters,
                   kernel_size=3,
                   stride=1,
                   train=True,
                   wd=0.0,
                   bn_momentum=0.99,
                   bn_epsilon=0.001):
    """ Core function of a residual unit.

    In -> conv -> bn -> relu -> conv

    Note that the normal residual layer has a batch norm and relu before the
    first conv. This is in the residual function which calls this.

    Parameters
    ----------
    x : tf tensor
        Input to be modified
    filters : int
        Number of output filters (will be used for all convolutions in the
        resnet core).
    kernel_size : int
        Size of the filter kernels
    stride : int
        Conv stride
    train : bool or tf boolean tensor
        Whether we are in the train phase or not. Can set to a tensorflow tensor
        so that it can be modified on the fly.
    wd : float
        Weight decay term for the convolutional weights
    bn_momentum : float
        The momentum for the batch normalization layers in the resnet
    bn_epsilon : float
        The epsilon for the batch normalization layers in the resnet
    """

    init = init_ops.VarianceScaling(scale=1.0, mode='fan_out')
    reg = lambda w: real_reg(w, wd, norm=2)
    bn_class = lambda name: normalization.BatchNormalization(
        name=name, momentum=bn_momentum, epsilon=bn_epsilon)
    conv_class = lambda name, stride: convolutional.Conv2D(
        filters,
        3, (stride, stride),
        use_bias=False,
        padding=('SAME' if stride == 1 else 'VALID'),
        kernel_initializer=init,
        kernel_regularizer=reg,
        name=name)

    with tf.variable_scope('sub1'):
        # As we will do downsampling with strides, need to make sure the output
        # size is the correct format.
        if stride > 1:
            x = fixed_padding(x, kernel_size, 'channels_last')

        conv = conv_class('conv1', stride)
        x = conv.apply(x)

    with tf.variable_scope('sub2'):
        bn = bn_class('between_bn')
        x = bn.apply(x, training=train)
        x = tf.nn.relu(x)
        conv = conv_class('conv2', 1)
        x = conv.apply(x)

    return x