コード例 #1
0
def np_ortho(shape, random_state, scale=1.):
    """
    Builds a numpy variable filled with orthonormal random values
    Parameters
    ----------
    shape, tuple of ints or tuple of tuples
        shape of values to initialize
        tuple of ints should be single shape
        tuple of tuples is primarily for convnets and should be of form
        ((n_in_kernels, kernel_width, kernel_height),
         (n_out_kernels, kernel_width, kernel_height))
    random_state, numpy.random.RandomState() object
    scale, float (default 1.)
        default of 1. results in orthonormal random values sacled by 1.
    Returns
    -------
    initialized_ortho, array-like
        Array-like of random values the same size as shape parameter
    References
    ----------
    Exact solutions to the nonlinear dynamics of learning in deep linear
    neural networks
        A. Saxe, J. McClelland, S. Ganguli
    """
    if type(shape[0]) is tuple:
        shp = (shape[1][0], shape[0][0]) + shape[1][1:]
        flat_shp = (shp[0], np.prd(shp[1:]))
    else:
        shp = shape
        flat_shp = shape
    g = random_state.randn(*flat_shp)
    U, S, VT = linalg.svd(g, full_matrices=False)
    res = U if U.shape == flat_shp else VT  # pick one with the correct shape
    res = res.reshape(shp)
    return (scale * res).astype("float32")
コード例 #2
0
ファイル: multi_event_rnn_lib.py プロジェクト: danabo/magenta
def np_ortho(shp, random_state, scale=1.):
  """Builds a numpy variable filled with orthonormal random values.

  Args:
    shp: tuple of ints or tuple of tuples
      shape of values to initialize
      tuple of ints should be single shape
      tuple of tuples is primarily for convnets and should be of form
      ((n_in_kernels, kernel_width, kernel_height),
       (n_out_kernels, kernel_width, kernel_height))
    random_state: numpy.random.RandomState() object
    scale: float (default 1.)
      default of 1. results in orthonormal random values sacled by 1.

  Returns:
    initialized_ortho, array-like
      Array-like of random values the same size as shape parameter

  References
  ----------
  Exact solutions to the nonlinear dynamics of learning in deep linear
  neural networks
      A. Saxe, J. McClelland, S. Ganguli
  """
  if isinstance(shp[0], tuple):
    shp = (shp[1][0], shp[0][0]) + shp[1][1:]
    flat_shp = (shp[0], np.prd(shp[1:]))
  else:
    flat_shp = shp
  g = random_state.randn(*flat_shp)
  u, _, vt = linalg.svd(g, full_matrices=False)
  res = u if u.shape == flat_shp else vt  # pick one with the correct shape
  res = res.reshape(shp)
  return (scale * res).astype('float32')
コード例 #3
0
ファイル: handwriter.py プロジェクト: szcom/speaker
def np_ortho(shape, random_state, scale=1.):
    if type(shape[0]) is tuple:
        shp = (shape[1][0], shape[0][0]) + shape[1][1:]
        flat_shp = (shp[0], np.prd(shp[1:]))
    else:
        shp = shape
        flat_shp = shape
    g = random_state.randn(*flat_shp)
    U, S, VT = linalg.svd(g, full_matrices=False)
    res = U if U.shape == flat_shp else VT  # pick one with the correct shape
    res = res.reshape(shp)
    return (scale * res).astype(theano.config.floatX)
コード例 #4
0
ファイル: bgmm.py プロジェクト: cindeem/nipy
def Wishart_eval(n, V, W, dV=None, dW=None, piV=None):
    """
    Evaluation of the  probability of W under Wishart(n,V)

    Parameters
    ----------
    n: float,
        the number of degrees of freedom (dofs)
    V: array of shape (n,n)
        the scale matrix of the Wishart density
    W: array of shape (n,n)
        the sample to be evaluated
    dV: float, optional,
        determinant of V
    dW: float, optional,
        determinant of W
    piV: array of shape (n,n), optional
        psuedo-inverse of V

    Returns
    -------
    (float) the density
    """
    # check that shape(V)==shape(W)
    p = V.shape[0]
    if dV == None:
        dV = np.prd(eigvalsh(V))
    if dW == None:
        dW = np.prod(eigvalsh(W))
    if piV==None:
        piV = inv(V)
    ldW = np.log(dW)*(n-p-1)/2
    ltr = - np.trace(np.dot(piV, W))/2
    la = ( n*p*np.log(2) + np.log(dV)*n )/2
    lg = np.log(np.pi)*p*(p-1)/4
    #for j in range(p):
    #    lg += gammaln((n-j)/2)
    lg += gammaln(np.arange(n-p+1, n+1).astype(np.float)/2).sum()
    lt = ldW + ltr -la -lg
    return np.exp(lt)