コード例 #1
0
def he_normal(seed=None, scale=2.):
    """He normal initializer.
    It draws samples from a truncated normal distribution centered on 0
    with standard deviation (after truncation) given by
    `stddev = sqrt(2 / fan_in)` where `fan_in` is the number of
    input units in the weight tensor.
      
    Parameters
    ----------
    seed: A Python integer. Used to seed the random generator.
    
    Returns
    -------
    An initializer.
    
    References
    ----------
        [He et al., 2015]
        (https://www.cv-foundation.org/openaccess/content_iccv_2015/html/He_Delving_Deep_into_ICCV_2015_paper.html)
        # pylint: disable=line-too-long
        ([pdf](https://www.cv-foundation.org/openaccess/content_iccv_2015/papers/He_Delving_Deep_into_ICCV_2015_paper.pdf))
    """
    return VarianceScaling(scale=scale,
                           mode="fan_in",
                           distribution="truncated_normal",
                           seed=seed)
def he_uniform(seed=None, mode='fan_in', dtype=tf.float32):
    '''
    mode: a string. 'fan_in', 'fan_out' or 'fan_avg'
    '''
    return VarianceScaling(scale=2.0,
                           mode=mode,
                           distribution='uniform',
                           seed=seed,
                           dtype=tf.float32)
コード例 #3
0
def he_normal_fanout(seed=None):
    """He normal initializer.
  
    It draws samples from a truncated normal distribution centered on 0
    with `stddev = sqrt(2 / fan_out)`
    where `fan_in` is the number of input units in the weight tensor.
    To keep aligned with official implementation
    """
    return VarianceScaling(
        scale=2., mode="fan_out", distribution="truncated_normal", seed=seed)
コード例 #4
0
    def create(self):

        X_img = tf.placeholder(tf.float32, shape=(None, 84, 84, 4))
        Y = tf.placeholder(tf.float32, shape=(None, ))
        a = tf.placeholder(tf.int32, shape=(None, 2))

        initializer = VarianceScaling(1.0)
        initializer_bias = tf.zeros_initializer()

        W_conv1 = tf.Variable(initializer([8, 8, 4, 32]))
        b_conv1 = tf.Variable(initializer_bias(shape=(32, )))
        A_conv1 = tf.nn.relu(
            tf.nn.conv2d(X_img, W_conv1, strides=[1, 4, 4, 1], padding='SAME')
            + b_conv1)

        W_conv2 = tf.Variable(initializer([4, 4, 32, 64]))
        b_conv2 = tf.Variable(initializer_bias(shape=(64, )))
        A_conv2 = tf.nn.relu(
            tf.nn.conv2d(
                A_conv1, W_conv2, strides=[1, 2, 2, 1], padding='SAME') +
            b_conv2)

        W_conv3 = tf.Variable(initializer([3, 3, 64, 64]))
        b_conv3 = tf.Variable(initializer_bias(shape=(64, )))
        A_conv3 = tf.nn.relu(
            tf.nn.conv2d(
                A_conv2, W_conv3, strides=[1, 1, 1, 1], padding='SAME') +
            b_conv3)

        flatten = tf.reshape(A_conv3, [-1, 11 * 11 * 64])

        n_neurons_1 = 512
        n_neurons_2 = self.n_actions

        W_fc1 = tf.Variable(initializer([11 * 11 * 64, n_neurons_1]))
        b_fc1 = tf.Variable(initializer_bias(n_neurons_1))
        Z_fc1 = tf.nn.relu(tf.matmul(flatten, W_fc1) + b_fc1)

        W_fc2 = tf.Variable(initializer([n_neurons_1, n_neurons_2]))
        b_fc2 = tf.Variable(initializer_bias(n_neurons_2))
        Qs = tf.matmul(Z_fc1, W_fc2) + b_fc2

        action = tf.argmax(Qs, axis=1)

        max_q = tf.reduce_max(Qs, axis=1)

        loss = tf.reduce_mean(
            tf.square(tf.stop_gradient(Y) - tf.gather_nd(Qs, a)))
        optimizer = tf.train.RMSPropOptimizer(self.lr, self.decay)
        train = optimizer.minimize(loss)

        saver = tf.train.Saver()

        return (X_img, Y, train, loss, saver, W_conv2, action, Qs, a)
コード例 #5
0
def he_uniform(seed=None):
  """He uniform variance scaling initializer.

  It draws samples from a uniform distribution within [-limit, limit]
  where `limit` is `sqrt(6 / fan_in)`
  where `fan_in` is the number of input units in the weight tensor.

  Arguments:
      seed: A Python integer. Used to seed the random generator.

  Returns:
      An initializer.

  References:
      He et al., http://arxiv.org/abs/1502.01852
  """
  return VarianceScaling(
      scale=2., mode='fan_in', distribution='uniform', seed=seed)
コード例 #6
0
def he_normal(seed=None):
  """He normal initializer.

  It draws samples from a truncated normal distribution centered on 0
  with `stddev = sqrt(2 / fan_in)`
  where `fan_in` is the number of input units in the weight tensor.

  Arguments:
      seed: A Python integer. Used to seed the random generator.

  Returns:
      An initializer.

  References:
      He et al., http://arxiv.org/abs/1502.01852
  """
  return VarianceScaling(
      scale=2., mode='fan_in', distribution='normal', seed=seed)
コード例 #7
0
def lecun_uniform(seed=None):
  """LeCun uniform initializer.

  It draws samples from a uniform distribution within [-limit, limit]
  where `limit` is `sqrt(3 / fan_in)`
  where `fan_in` is the number of input units in the weight tensor.

  Arguments:
      seed: A Python integer. Used to seed the random generator.

  Returns:
      An initializer.

  References:
      LeCun 98, Efficient Backprop,
      http://yann.lecun.com/exdb/publis/pdf/lecun-98b.pdf
  """
  return VarianceScaling(
      scale=1., mode='fan_in', distribution='uniform', seed=seed)
コード例 #8
0
def glorot_normal(seed=None):
  """Glorot normal initializer, also called Xavier normal initializer.

  It draws samples from a truncated normal distribution centered on 0
  with `stddev = sqrt(2 / (fan_in + fan_out))`
  where `fan_in` is the number of input units in the weight tensor
  and `fan_out` is the number of output units in the weight tensor.

  Arguments:
      seed: A Python integer. Used to seed the random generator.

  Returns:
      An initializer.

  References:
      Glorot & Bengio, AISTATS 2010
      http://jmlr.org/proceedings/papers/v9/glorot10a/glorot10a.pdf
  """
  return VarianceScaling(
      scale=1., mode='fan_avg', distribution='normal', seed=seed)
コード例 #9
0
def glorot_uniform(seed=None):
  """Glorot uniform initializer, also called Xavier uniform initializer.

  It draws samples from a uniform distribution within [-limit, limit]
  where `limit` is `sqrt(6 / (fan_in + fan_out))`
  where `fan_in` is the number of input units in the weight tensor
  and `fan_out` is the number of output units in the weight tensor.

  Arguments:
      seed: A Python integer. Used to seed the random generator.

  Returns:
      An initializer.

  References:
      Glorot & Bengio, AISTATS 2010
      http://jmlr.org/proceedings/papers/v9/glorot10a/glorot10a.pdf
  """
  return VarianceScaling(
      scale=1., mode='fan_avg', distribution='uniform', seed=seed)
コード例 #10
0
def lecun_normal(seed=None):
  """LeCun normal initializer.

  It draws samples from a truncated normal distribution centered on 0
  with `stddev = sqrt(1 / fan_in)`
  where `fan_in` is the number of input units in the weight tensor.

  Arguments:
      seed: A Python integer. Used to seed the random generator.

  Returns:
      An initializer.

  References:
      - [Self-Normalizing Neural Networks](https://arxiv.org/abs/1706.02515)
      - [Efficient
      Backprop](http://yann.lecun.com/exdb/publis/pdf/lecun-98b.pdf)
  """
  return VarianceScaling(
      scale=1., mode='fan_in', distribution='normal', seed=seed)
コード例 #11
0
ファイル: tf_utils.py プロジェクト: superhg2012/tf_text_cnn
def initializer(mode="FAN_IN",
                distribution="normal",
                is_relu=False,
                seed=None):
    """ create xavier initializer, include he_initializer for relu
    args:
        mode: "FAN_IN"(default) or "FAN_OUT" or "FAN_AVG"
        distribution: "normal"(default) or "uniform"
        is_relu: True, norm variance by 2
                    in case we output to a ReLU unit
    Reference: http://jmlr.org/proceedings/papers/v9/glorot10a/glorot10a.pdf
               https://arxiv.org/abs/1502.01852 by Kaiming He
    return:
        initializer op

    """
    mode = mode.lower()
    scale = 2. if is_relu else 1.
    return VarianceScaling(scale=scale,
                           mode=mode,
                           distribution=distribution,
                           seed=seed)
コード例 #12
0
def selu_normal(seed=None):
    return VarianceScaling(scale=1.,
                           mode='fan_in',
                           distribution='normal',
                           seed=seed)
コード例 #13
0
    cnn3 = tf.nn.relu(conv_layer3)

    flat = tf.contrib.layers.flatten(cnn3)

    fc1 = tf.matmul(flat, weights["fc1"]) + bias["fc1"]
    fc1 = tf.nn.relu(fc1)

    Z = tf.matmul(fc1, weights["fc2"]) + bias["fc2"]
    return Z  # n x 4


weights_online = {
    "cnn1":
    tf.get_variable("o_cnn1",
                    shape=[8, 8, 4, 32],
                    initializer=VarianceScaling(),
                    trainable=True,
                    dtype=tf.float32),
    "cnn2":
    tf.get_variable("o_cnn2",
                    shape=[4, 4, 32, 64],
                    initializer=VarianceScaling(),
                    trainable=True,
                    dtype=tf.float32),
    "cnn3":
    tf.get_variable("o_cnn3",
                    shape=[3, 3, 64, 64],
                    initializer=VarianceScaling(),
                    trainable=True,
                    dtype=tf.float32),
    "fc1":
コード例 #14
0
 def lecun_uniform(seed=None):
     return VarianceScaling(scale=1.,
                            mode='fan_in',
                            distribution='uniform',
                            seed=seed)
コード例 #15
0
 def lecun_normal(seed=None):
     return VarianceScaling(scale=1.,
                            mode='fan_in',
                            distribution='truncated_normal',
                            seed=seed)
def glorot_normal(seed=None, dtype=tf.float32):
    return VarianceScaling(scale=1.0, 
                           mode='fan_avg', 
                           distribution='normal', 
                           seed=seed,
                           dtype=tf.float32)