Exemplo n.º 1
0
def softmax_loss(x, y):
    """
  Computes the loss and gradient for softmax classification.

  Inputs:
  - x: Input data, of shape (N, C) where x[i, j] is the score for the jth class
    for the ith input.
  - y: Vector of labels, of shape (N,) where y[i] is the label for x[i] and
    0 <= y[i] < C

  Returns a tuple of:
  - loss: Scalar giving the loss
  - dx: Gradient of the loss with respect to x
  """
    #np.expand_dims(correct_class_scores, axis = 1)
    #probs = np.exp(x - np.max(x, axis=1, keepdims=True))
    #print "x.shape", x.shape

    #Somehow Buggy. Max doesn't work.
    probs = np.exp(x - np.max(x, axis=1))
    #probs /= np.expand_dims(np.sum(probs, axis=1), axis = 1)
    probs /= np.expand_dims(np.sum(probs, axis=1), axis=1)
    N = x.shape[0]
    loss = -np.sum(np.log(probs[np.arange(N), y])) / N

    dx = probs.copy()
    dx[np.arange(N), y] -= 1
    dx /= N

    return loss, dx
Exemplo n.º 2
0
def softmax_loss(x, y):
    """
    Computes the loss and gradient for softmax classification.

    Inputs:
    - x: Input data, of shape (N, C) where x[i, j] is the score for the jth class
      for the ith input.
    - y: Either of the followings:
      - One hot encoding of labels, of shape (N, C)
      - Label index of shape (N, ), each y[i] is the label of i^th example
        (0 <= y[i] < C)

    Returns a tuple of:
    - loss: Scalar giving the loss
    """
    N = x.shape[0]
    C = x.shape[1]
    if len(y.shape) == 1:
        #convert it to one hot encoding
        onehot_y = np.zeros([N, C])
        np.onehot_encode(y, onehot_y)
    else:
        onehot_y = y
    probs = x - np.max(x, axis=1, keepdims=True)
    loss = -np.sum(probs * onehot_y) / N
    loss += np.sum(np.log(np.sum(np.exp(probs), axis=1, keepdims=True))) / N
    return loss
Exemplo n.º 3
0
def pricepaths(S, tau, r, q, v, M, N):
    dt = tau / M
    g1 = (r - q - v / 2) * dt
    g2 = math.sqrt(v * dt)
    aux = math.log(S) + np.cumsum(
        g1 + g2 * np.random.randn(M, N, dtype=np.float32), 0)
    return np.exp(aux)
Exemplo n.º 4
0
 def forward(self, X):
     """Forward pass to obtain the action probabilities for each observation in `X`."""
     a = np.dot(self.params['w1'], X.T)
     h = np.maximum(0, a)
     logits = np.dot(h.T, self.params['w2'].T)
     p = 1.0 / (1.0 + np.exp(-logits))
     return p
def softmax_loss(x, y):
  """
  Computes the loss and gradient for softmax classification.

  Inputs:
  - x: Input data, of shape (N, C) where x[i, j] is the score for the jth class
    for the ith input.
  - y: Vector of labels, of shape (N,) where y[i] is the label for x[i] and
    0 <= y[i] < C

  Returns a tuple of:
  - loss: Scalar giving the loss
  - dx: Gradient of the loss with respect to x
  """
  #np.expand_dims(correct_class_scores, axis = 1)
  #probs = np.exp(x - np.max(x, axis=1, keepdims=True))
  #print "x.shape", x.shape

  #Somehow Buggy. Max doesn't work.
  probs = np.exp(x - np.max(x, axis=1))
  #probs /= np.expand_dims(np.sum(probs, axis=1), axis = 1)
  probs /= np.expand_dims(np.sum(probs, axis=1), axis = 1)
  N = x.shape[0]
  loss = -np.sum(np.log(probs[np.arange(N), y])) / N

  dx = probs.copy()
  dx[np.arange(N), y] -= 1
  dx /= N

  return loss, dx
Exemplo n.º 6
0
def softmax_loss(x, y):
    """
    Computes the loss and gradient for softmax classification.

    Inputs:
    - x: Input data, of shape (N, C) where x[i, j] is the score for the jth class
      for the ith input.
    - y: Either of the followings:
      - One hot encoding of labels, of shape (N, C)
      - Label index of shape (N, ), each y[i] is the label of i^th example
        (0 <= y[i] < C)

    Returns a tuple of:
    - loss: Scalar giving the loss
    """
    N = x.shape[0]
    C = x.shape[1]
    if len(y.shape) == 1:
        #convert it to one hot encoding
        onehot_y = np.zeros([N, C])
        np.onehot_encode(y, onehot_y)
    else:
        onehot_y = y
    probs = x - np.max(x, axis=1, keepdims=True)
    loss = -np.sum(probs * onehot_y) / N
    loss += np.sum(np.log(np.sum(np.exp(probs), axis=1, keepdims=True))) / N
    return loss
Exemplo n.º 7
0
 def _softmax_loss(self, X, y, *args):
     N = X.shape[0]
     scores = self._forward(X, *args)
     scores = np.exp(scores - np.max(scores, axis=1, keepdims=True))
     prob = scores / np.sum(scores, axis=1, keepdims=True)
     loss = np.sum(-np.log(prob[np.arange(N), y])) / float(N)
     return loss
Exemplo n.º 8
0
 def forward(self, X):
     """Forward pass to obtain the action probabilities for each observation in `X`."""
     a = np.dot(self.params['w1'], X.T)
     h = np.maximum(0, a)
     logits = np.dot(h.T, self.params['w2'].T)
     p = 1.0 / (1.0 + np.exp(-logits))
     return p
Exemplo n.º 9
0
def probs(scores):
    """
    Calculates the probabilities out of a neural networks class scores.
    :param scores: The score matrix of form (N x K), where N is the number of observations and K is the number of classes.
    :return: The probabilities of the same form as the input scores.
    """
    exp_scores = np.exp(scores)
    return exp_scores / np.sum(exp_scores, axis=1, keepdims=True)
Exemplo n.º 10
0
def softmax(x, y):
    import numpy as np
    y = y.astype(int)
    probs = np.exp(x - np.max(x, axis=1, keepdims=True))
    probs /= np.sum(probs, axis=1, keepdims=True)
    N = x.shape[0]
    loss = -np.sum(np.log(probs[np.arange(N), y])) / N
    return loss
Exemplo n.º 11
0
 def forward(self, X):
     a = np.dot(self.params['fc1'], X.T)
     h = np.maximum(0, a)
     logits = np.dot(h.T, self.params['policy_fc_last'].T)
     ps = np.exp(logits - np.max(logits, axis=1, keepdims=True))
     ps /= np.sum(ps, axis=1, keepdims=True)
     vs = np.dot(h.T, self.params['vf_fc_last'].T) + self.params['vf_fc_last_bias']
     return ps, vs
Exemplo n.º 12
0
def softmax_crossentropy(x, y):
    # x should be (batch, prob)
    # y should be (batch, )

    x_dev = x - np.max(x, axis=1, keepdims=True) # minpy doesn't support x.max()
    sm = x_dev - np.log(np.sum(np.exp(x_dev), axis=1, keepdims=True))
    ids = np.arange(0, y.shape[0])*x.shape[1] + y
    ce = -np.sum(sm.reshape((sm.shape[0]*sm.shape[1],))[ids])/(1.0*y.shape[0])  # minpy doesn't support -1 in shape inference
    return ce
Exemplo n.º 13
0
def softmax_probability(p, channel):
    N, C = p.shape
    p -= np.max(p, axis=1).reshape((N, 1))
    code = np.zeros((N, C))
    np.onehot_encode(channel, code)
    p = np.exp(p)
    selected_p = p * code
    total_p = np.sum(p, axis=1).reshape((N, 1))
    return np.sum(selected_p / total_p, axis=1)
Exemplo n.º 14
0
 def grad(g):
     import numpy as np
     y = label.astype(int)
     probs = np.exp(x - np.max(x, axis=1, keepdims=True))
     probs /= np.sum(probs, axis=1, keepdims=True)
     N = x.shape[0]
     dx = probs.copy()
     dx[np.arange(N), y] -= 1
     dx /= N
     return dx
Exemplo n.º 15
0
 def _loss_function(*args):
     normal_loss = model.loss(model.forward(X, 'train'), Y)
     noisy_output = model.forward(noisy_X, 'train')
     noisy_output -= np.max(noisy_output, axis=1).reshape((K, 1))
     noisy_output = np.exp(noisy_output)
     model_p_noisy_X = noisy_output / np.sum(noisy_output, axis=1).reshape(
         (K, 1))
     kl = KL(1.0 / N_CLASSES, model_p_noisy_X)
     noisy_loss = gamma * np.sum(kl) / float(K)
     return gamma * normal_loss + (1 - gamma) * noisy_loss
Exemplo n.º 16
0
def sigmoid(x):
    """
    Computes the forward pass for a layer of sigmoid units.

    Input:
    - x: Inputs, of any shape

    Returns a tuple of:
    - out: Output, of the same shape as x
    """

    return 1 / (1 + np.exp(-x))
Exemplo n.º 17
0
def sigmoid(x):
    """
    Computes the forward pass for a layer of sigmoid units.

    Input:
    - x: Inputs, of any shape

    Returns a tuple of:
    - out: Output, of the same shape as x
    """

    return 1 / (1 + np.exp(-x))
Exemplo n.º 18
0
    def collectPhoton(self, x, y, z, react):

        self.x = x
        self.y = y
        self.z = z / self.axialRatio  #rescale of z coordinate
        self.react = react

        self.stepNum = len(x)
        self.moleculeNum = x.size / len(x)

        #print 'calculating fluorescence...'
        self.Imax = 2 * self.power / (np.pi * self.radius**2
                                      )  # the center laser intensity (uW/um^2)
        self.photonRate = self.Imax * self.Qfluor * self.react / self.ePhoton * self.Qdetect * self.sigma * 1e-12  #(us^-1)

        self.singleTrace = np.random.poisson(
            self.photonRate * self.dt) * np.exp(
                -2 * (self.x**2 + self.y**2 + self.z**2) / self.radius**2)
        self.trace = np.sum(self.singleTrace, axis=1)

        self.singleTrace_nr = self.photonRate * self.dt * np.exp(
            -2 * (self.x**2 + self.y**2 + self.z**2) / self.radius**2)
        self.trace_nr = np.sum(self.singleTrace_nr, axis=1)
Exemplo n.º 19
0
def softmax_crossentropy(x, y):
    EPSI = 1e-6
    batch_size, seq_len, prob_dim = x.shape
    x = x.reshape((x.shape[0] * x.shape[1], x.shape[2]))
    y = y.reshape((y.shape[0] * y.shape[1], ))

    #print x.shape, y.shape
    # x should be (batch, prob)
    # y should be (batch, )

    x_dev = x - np.max(x, axis=1,
                       keepdims=True)  # minpy doesn't support x.max()
    sm = x_dev - np.log(EPSI + np.sum(np.exp(x_dev), axis=1, keepdims=True))
    ids = np.arange(0, y.shape[0]) * seq_len + y
    ce = -np.sum(sm.reshape((sm.shape[0] * sm.shape[1], ))[ids]) / (
        1.0 * y.shape[0])  # minpy doesn't support -1 in shape inference
    return ce
Exemplo n.º 20
0
def softmax_loss(x, y):
  """
  Computes the loss and gradient for softmax classification.

  Inputs:
  - x: Input data, of shape (N, C) where x[i, j] is the score for the jth class
    for the ith input.
  - y: Vector of labels, of shape (N,) where y[i] is the label for x[i] and
    0 <= y[i] < C

  Returns a tuple of:
  - loss: Scalar giving the loss
  """
  #TODO: Missing Max Operator 
  probs = np.exp(x - np.expand_dims(np.max(x, axis=1), axis = 1))
  probs = probs / np.expand_dims(np.sum(probs, axis=1), axis = 1)
  N = x.shape[0]
  loss = -np.sum(np.log(probs[np.arange(N), y])) / N

  return loss
Exemplo n.º 21
0
def softmax_loss(x, y):
    """
  Computes the loss and gradient for softmax classification.

  Inputs:
  - x: Input data, of shape (N, C) where x[i, j] is the score for the jth class
    for the ith input.
  - y: Vector of labels, of shape (N,) where y[i] is the label for x[i] and
    0 <= y[i] < C

  Returns a tuple of:
  - loss: Scalar giving the loss
  """
    #TODO: Missing Max Operator
    probs = np.exp(x - np.expand_dims(np.max(x, axis=1), axis=1))
    probs = probs / np.expand_dims(np.sum(probs, axis=1), axis=1)
    N = x.shape[0]
    loss = -np.sum(np.log(probs[np.arange(N), y])) / N

    return loss
Exemplo n.º 22
0
	def forward(X,y,*p):
		N, C, H, W = X.shape

		X = X.reshape((N,C*H*W))

		print '>>',X.shape
		print '>>',p[0].shape

		first = np.dot( X, p[0] ) + p[1]

		second = np.dot( first, p[2] ) + p[3]

		exp = np.exp(second)

		pred = exp / np.sum(exp)

		N = X.shape[0]

		loss = -np.sum( pred[np.arange(N),y] )

		return loss
Exemplo n.º 23
0
def temporal_softmax_loss(x, y, mask, verbose=False):
    """
    A temporal version of softmax loss for use in RNNs. We assume that we are
    making predictions over a vocabulary of size V for each timestep of a
    timeseries of length T, over a minibatch of size N. The input x gives scores
    for all vocabulary elements at all timesteps, and y gives the indices of the
    ground-truth element at each timestep. We use a cross-entropy loss at each
    timestep, summing the loss over all timesteps and averaging across the
    minibatch.

    As an additional complication, we may want to ignore the model output at some
    timesteps, since sequences of different length may have been combined into a
    minibatch and padded with NULL tokens. The optional mask argument tells us
    which elements should contribute to the loss.

    Inputs:
    - x: Input scores, of shape (N, T, V)
    - y: Ground-truth indices, of shape (N, T) where each element is in the range
       0 <= y[i, t] < V
    - mask: Boolean array of shape (N, T) where mask[i, t] tells whether or not
    the scores at x[i, t] should contribute to the loss.

    Returns a tuple of:
    - loss: Scalar giving loss
    - dx: Gradient of loss with respect to scores x.
    """
    N, T, V = x.shape

    x_flat = x.reshape(N * T, V)
    y_flat = y.reshape(N * T)
    mask_flat = mask.reshape(N * T)

    probs = np.exp(x_flat - np.max(x_flat, axis=1, keepdims=True))
    probs = probs / np.sum(probs, axis=1, keepdims=True)
    loss = -np.sum(mask_flat * np.log(probs[np.arange(N * T), y_flat])) / N

    return loss
Exemplo n.º 24
0
def temporal_softmax_loss(x, y, mask, verbose=False):
    """
    A temporal version of softmax loss for use in RNNs. We assume that we are
    making predictions over a vocabulary of size V for each timestep of a
    timeseries of length T, over a minibatch of size N. The input x gives scores
    for all vocabulary elements at all timesteps, and y gives the indices of the
    ground-truth element at each timestep. We use a cross-entropy loss at each
    timestep, summing the loss over all timesteps and averaging across the
    minibatch.

    As an additional complication, we may want to ignore the model output at some
    timesteps, since sequences of different length may have been combined into a
    minibatch and padded with NULL tokens. The optional mask argument tells us
    which elements should contribute to the loss.

    Inputs:
    - x: Input scores, of shape (N, T, V)
    - y: Ground-truth indices, of shape (N, T) where each element is in the range
       0 <= y[i, t] < V
    - mask: Boolean array of shape (N, T) where mask[i, t] tells whether or not
    the scores at x[i, t] should contribute to the loss.

    Returns a tuple of:
    - loss: Scalar giving loss
    - dx: Gradient of loss with respect to scores x.
    """
    N, T, V = x.shape

    x_flat = x.reshape(N * T, V)
    y_flat = y.reshape(N * T)
    mask_flat = mask.reshape(N * T)

    probs = np.exp(x_flat - np.max(x_flat, axis=1, keepdims=True))
    probs = probs / np.sum(probs, axis=1, keepdims=True)
    loss = -np.sum(mask_flat * np.log(probs[np.arange(N * T), y_flat])) / N

    return loss
Exemplo n.º 25
0
def test_ufunc():
    x = np.array([-1.2, 1.2])
    np.absolute(x)
    np.absolute(1.2 + 1j)
    x = np.linspace(start=-10, stop=10, num=101)
    np.add(1.0, 4.0)
    x1 = np.arange(9.0).reshape((3, 3))
    x2 = np.arange(3.0)
    np.add(x1, x2)
    np.arccos([1, -1])
    x = np.linspace(-1, 1, num=100)
    np.arccosh([np.e, 10.0])
    np.arccosh(1)
    np.arcsin(0)
    np.arcsinh(np.array([np.e, 10.0]))
    np.arctan([0, 1])
    np.pi / 4
    x = np.linspace(-10, 10)
    x = np.array([-1, +1, +1, -1])
    y = np.array([-1, -1, +1, +1])
    np.arctan2(y, x) * 180 / np.pi
    np.arctan2([1., -1.], [0., 0.])
    np.arctan2([0., 0., np.inf], [+0., -0., np.inf])
    np.arctanh([0, -0.5])
    np.bitwise_and(13, 17)
    np.bitwise_and(14, 13)
    # np.binary_repr(12)    return str
    np.bitwise_and([14, 3], 13)
    np.bitwise_and([11, 7], [4, 25])
    np.bitwise_and(np.array([2, 5, 255]), np.array([3, 14, 16]))
    np.bitwise_and([True, True], [False, True])
    np.bitwise_or(13, 16)
    # np.binary_repr(29)
    np.bitwise_or(32, 2)
    np.bitwise_or([33, 4], 1)
    np.bitwise_or([33, 4], [1, 2])
    np.bitwise_or(np.array([2, 5, 255]), np.array([4, 4, 4]))
    # np.array([2, 5, 255]) | np.array([4, 4, 4])
    np.bitwise_or(np.array([2, 5, 255, 2147483647], dtype=np.int32),
                  np.array([4, 4, 4, 2147483647], dtype=np.int32))
    np.bitwise_or([True, True], [False, True])
    np.bitwise_xor(13, 17)
    # np.binary_repr(28)
    np.bitwise_xor(31, 5)
    np.bitwise_xor([31, 3], 5)
    np.bitwise_xor([31, 3], [5, 6])
    np.bitwise_xor([True, True], [False, True])
    a = np.array([-1.7, -1.5, -0.2, 0.2, 1.5, 1.7, 2.0])
    np.ceil(a)
    a = np.array([-1.7, -1.5, -0.2, 0.2, 1.5, 1.7, 2.0])
    np.trunc(a)
    np.cos(np.array([0, np.pi / 2, np.pi]))
    np.cosh(0)
    x = np.linspace(-4, 4, 1000)
    rad = np.arange(12.) * np.pi / 6
    np.degrees(rad)
    out = np.zeros((rad.shape))
    r = np.degrees(rad, out)
    # np.all(r == out) return bool
    np.rad2deg(np.pi / 2)
    np.divide(2.0, 4.0)
    x1 = np.arange(9.0).reshape((3, 3))
    x2 = np.arange(3.0)
    np.divide(2, 4)
    np.divide(2, 4.)
    np.equal([0, 1, 3], np.arange(3))
    np.equal(1, np.ones(1))
    x = np.linspace(-2 * np.pi, 2 * np.pi, 100)
    np.exp2([2, 3])
    np.expm1(1e-10)
    np.exp(1e-10) - 1
    np.fabs(-1)
    np.fabs([-1.2, 1.2])
    a = np.array([-1.7, -1.5, -0.2, 0.2, 1.5, 1.7, 2.0])
    np.floor(a)
    np.floor_divide(7, 3)
    np.floor_divide([1., 2., 3., 4.], 2.5)
    np.fmod([-3, -2, -1, 1, 2, 3], 2)
    np.remainder([-3, -2, -1, 1, 2, 3], 2)
    np.fmod([5, 3], [2, 2.])
    a = np.arange(-3, 3).reshape(3, 2)
    np.fmod(a, [2, 2])
    np.greater([4, 2], [2, 2])
    a = np.array([4, 2])
    b = np.array([2, 2])
    a > b
    np.greater_equal([4, 2, 1], [2, 2, 2])
    np.hypot(3 * np.ones((3, 3)), 4 * np.ones((3, 3)))
    np.hypot(3 * np.ones((3, 3)), [4])
    np.bitwise_not is np.invert
    np.invert(np.array([13], dtype=np.uint8))
    # np.binary_repr(242, width=8)
    np.invert(np.array([13], dtype=np.uint16))
    np.invert(np.array([13], dtype=np.int8))
    # np.binary_repr(-14, width=8)
    np.invert(np.array([True, False]))
    # np.isfinite(1)
    # np.isfinite(0)
    # np.isfinite(np.nan)
    # np.isfinite(np.inf)
    # np.isfinite(np.NINF)
    x = np.array([-np.inf, 0., np.inf])
    y = np.array([2, 2, 2])
    np.isfinite(x, y)
    # np.isinf(np.inf)
    # np.isinf(np.nan)
    # np.isinf(np.NINF)
    # np.isinf([np.inf, -np.inf, 1.0, np.nan])
    x = np.array([-np.inf, 0., np.inf])
    y = np.array([2, 2, 2])
    # np.isinf(x, y)
    # np.isnan(np.nan)
    # np.isnan(np.inf)
    # np.binary_repr(5)
    np.left_shift(5, 2)
    # np.binary_repr(20)
    np.left_shift(5, [1, 2, 3])
    np.less([1, 2], [2, 2])
    np.less_equal([4, 2, 1], [2, 2, 2])
    x = np.array([0, 1, 2, 2**4])
    xi = np.array([0 + 1.j, 1, 2 + 0.j, 4.j])
    np.log2(xi)
    prob1 = np.log(1e-50)
    prob2 = np.log(2.5e-50)
    prob12 = np.logaddexp(prob1, prob2)
    prob12
    np.exp(prob12)
    prob1 = np.log2(1e-50)
    prob2 = np.log2(2.5e-50)
    prob12 = np.logaddexp2(prob1, prob2)
    prob1, prob2, prob12
    2**prob12
    np.log1p(1e-99)
    np.log(1 + 1e-99)
    # np.logical_and(True, False)
    # np.logical_and([True, False], [False, False])
    x = np.arange(5)
    # np.logical_and(x>1, x<4)
    # np.logical_not(3)
    # np.logical_not([True, False, 0, 1])
    x = np.arange(5)
    # np.logical_not(x<3)
    # np.logical_or(True, False)
    # np.logical_or([True, False], [False, False])
    x = np.arange(5)
    # np.logical_or(x < 1, x > 3)
    # np.logical_xor(True, False)
    # np.logical_xor([True, True, False, False], [True, False, True, False])
    x = np.arange(5)
    # np.logical_xor(x < 1, x > 3)
    # np.logical_xor(0, np.eye(2))
    np.maximum([2, 3, 4], [1, 5, 2])
    # np.maximum([np.nan, 0, np.nan], [0, np.nan, np.nan])
    # np.maximum(np.Inf, 1)
    np.minimum([2, 3, 4], [1, 5, 2])
    # np.minimum([np.nan, 0, np.nan],[0, np.nan, np.nan])
    # np.minimum(-np.Inf, 1)
    np.fmax([2, 3, 4], [1, 5, 2])
    np.fmax(np.eye(2), [0.5, 2])
    # np.fmax([np.nan, 0, np.nan],[0, np.nan, np.nan])
    np.fmin([2, 3, 4], [1, 5, 2])
    np.fmin(np.eye(2), [0.5, 2])
    # np.fmin([np.nan, 0, np.nan],[0, np.nan, np.nan])
    np.modf([0, 3.5])
    np.modf(-0.5)
    np.multiply(2.0, 4.0)
    x1 = np.arange(9.0).reshape((3, 3))
    x2 = np.arange(3.0)
    np.multiply(x1, x2)
    np.negative([1., -1.])
    np.not_equal([1., 2.], [1., 3.])
    np.not_equal([1, 2], [[1, 3], [1, 4]])
    x1 = range(6)
    np.power(x1, 3)
    x2 = [1.0, 2.0, 3.0, 3.0, 2.0, 1.0]
    np.power(x1, x2)
    x2 = np.array([[1, 2, 3, 3, 2, 1], [1, 2, 3, 3, 2, 1]])
    np.power(x1, x2)
    deg = np.arange(12.) * 30.
    np.radians(deg)
    out = np.zeros((deg.shape))
    ret = np.radians(deg, out)
    ret is out
    np.deg2rad(180)
    np.reciprocal(2.)
    np.reciprocal([1, 2., 3.33])
    np.remainder([4, 7], [2, 3])
    np.remainder(np.arange(7), 5)
    # np.binary_repr(10)
    np.right_shift(10, 1)
    # np.binary_repr(5)
    np.right_shift(10, [1, 2, 3])
    a = np.array([-1.7, -1.5, -0.2, 0.2, 1.5, 1.7, 2.0])
    np.rint(a)
    np.sign([-5., 4.5])
    np.sign(0)
    # np.sign(5-2j)
    # np.signbit(-1.2)
    np.signbit(np.array([1, -2.3, 2.1]))
    np.copysign(1.3, -1)
    np.copysign([-1, 0, 1], -1.1)
    np.copysign([-1, 0, 1], np.arange(3) - 1)
    np.sin(np.pi / 2.)
    np.sin(np.array((0., 30., 45., 60., 90.)) * np.pi / 180.)
    x = np.linspace(-np.pi, np.pi, 201)
    np.sinh(0)
    # np.sinh(np.pi*1j/2)
    np.sqrt([1, 4, 9])
    np.sqrt([4, -1, -3 + 4J])
    np.cbrt([1, 8, 27])
    np.square([-1j, 1])
    np.subtract(1.0, 4.0)
    x1 = np.arange(9.0).reshape((3, 3))
    x2 = np.arange(3.0)
    np.subtract(x1, x2)
    np.tan(np.array([-pi, pi / 2, pi]))
    np.tanh((0, np.pi * 1j, np.pi * 1j / 2))
    x = np.arange(5)
    np.true_divide(x, 4)
    x = np.arange(9)
    y1, y2 = np.frexp(x)
    y1 * 2**y2
    np.ldexp(5, np.arange(4))
    x = np.arange(6)
    np.ldexp(*np.frexp(x))
Exemplo n.º 26
0
def sigmoid(x):
    return 1 / (1 + np.exp(-x))
Exemplo n.º 27
0
def sigmoid(x):
    return 1.0/(1.0 + mp.exp(-x))
Exemplo n.º 28
0
 def U(beta):
     return mp.sum(mp.log(1 + mp.exp(mp.dot(X, beta))))-mp.dot(y.T,(mp.dot(X,beta)))+(0.5/alpha)*mp.sum(beta**2)
Exemplo n.º 29
0
def logsumexp(X, axis=1):
    max_X = np.max(X)
    return max_X + np.log(np.sum(np.exp(X - max_X), axis=axis, keepdims=True))
Exemplo n.º 30
0
def logsumexp(X, axis=1):
    max_X = np.max(X)
    return max_X + np.log(np.sum(np.exp(X - max_X), axis=axis, keepdims=True))
Exemplo n.º 31
0
def sigmoid(X):
    return 1 / (1 + np.exp(-X))
Exemplo n.º 32
0
def logsoftmax(x, valid_idx):
    x[np.array(valid_idx)] += 1e6
    x_max = np.max(x)
    return x - x_max - np.log(np.sum(np.exp(x - x_max)))
Exemplo n.º 33
0
 def forward(self, inputs, *args):
     return 1 / (1 + np.exp(-inputs))
Exemplo n.º 34
0
def logsumexp(X, axis, keepdims=False):
    max_X = np.max(X)
    return max_X + np.log(np.sum(np.exp(X - max_X), axis=axis, keepdims=keepdims))
Exemplo n.º 35
0
def sigmoid(x):
    return 1/(1+np.exp(-x))
Exemplo n.º 36
0
def test_ufunc():
    x = np.array([-1.2, 1.2])
    np.absolute(x)
    np.absolute(1.2 + 1j)
    x = np.linspace(start=-10, stop=10, num=101)
    np.add(1.0, 4.0)
    x1 = np.arange(9.0).reshape((3, 3))
    x2 = np.arange(3.0)
    np.add(x1, x2)
    np.arccos([1, -1])
    x = np.linspace(-1, 1, num=100)
    np.arccosh([np.e, 10.0])
    np.arccosh(1)
    np.arcsin(0)
    np.arcsinh(np.array([np.e, 10.0]))
    np.arctan([0, 1])
    np.pi/4
    x = np.linspace(-10, 10)
    x = np.array([-1, +1, +1, -1])
    y = np.array([-1, -1, +1, +1])
    np.arctan2(y, x) * 180 / np.pi
    np.arctan2([1., -1.], [0., 0.])
    np.arctan2([0., 0., np.inf], [+0., -0., np.inf])
    np.arctanh([0, -0.5])
    np.bitwise_and(13, 17)
    np.bitwise_and(14, 13)
    # np.binary_repr(12)    return str
    np.bitwise_and([14,3], 13)
    np.bitwise_and([11,7], [4,25])
    np.bitwise_and(np.array([2,5,255]), np.array([3,14,16]))
    np.bitwise_and([True, True], [False, True])
    np.bitwise_or(13, 16)
    # np.binary_repr(29)
    np.bitwise_or(32, 2)
    np.bitwise_or([33, 4], 1)
    np.bitwise_or([33, 4], [1, 2])
    np.bitwise_or(np.array([2, 5, 255]), np.array([4, 4, 4]))
    # np.array([2, 5, 255]) | np.array([4, 4, 4])
    np.bitwise_or(np.array([2, 5, 255, 2147483647], dtype=np.int32),
                  np.array([4, 4, 4, 2147483647], dtype=np.int32))
    np.bitwise_or([True, True], [False, True])
    np.bitwise_xor(13, 17)
    # np.binary_repr(28)
    np.bitwise_xor(31, 5)
    np.bitwise_xor([31,3], 5)
    np.bitwise_xor([31,3], [5,6])
    np.bitwise_xor([True, True], [False, True])
    a = np.array([-1.7, -1.5, -0.2, 0.2, 1.5, 1.7, 2.0])
    np.ceil(a)
    a = np.array([-1.7, -1.5, -0.2, 0.2, 1.5, 1.7, 2.0])
    np.trunc(a)
    np.cos(np.array([0, np.pi/2, np.pi]))
    np.cosh(0)
    x = np.linspace(-4, 4, 1000)
    rad = np.arange(12.)*np.pi/6
    np.degrees(rad)
    out = np.zeros((rad.shape))
    r = np.degrees(rad, out)
    # np.all(r == out) return bool
    np.rad2deg(np.pi/2)
    np.divide(2.0, 4.0)
    x1 = np.arange(9.0).reshape((3, 3))
    x2 = np.arange(3.0)
    np.divide(2, 4)
    np.divide(2, 4.)
    np.equal([0, 1, 3], np.arange(3))
    np.equal(1, np.ones(1))
    x = np.linspace(-2*np.pi, 2*np.pi, 100)
    np.exp2([2, 3])
    np.expm1(1e-10)
    np.exp(1e-10) - 1
    np.fabs(-1)
    np.fabs([-1.2, 1.2])
    a = np.array([-1.7, -1.5, -0.2, 0.2, 1.5, 1.7, 2.0])
    np.floor(a)
    np.floor_divide(7,3)
    np.floor_divide([1., 2., 3., 4.], 2.5)
    np.fmod([-3, -2, -1, 1, 2, 3], 2)
    np.remainder([-3, -2, -1, 1, 2, 3], 2)
    np.fmod([5, 3], [2, 2.])
    a = np.arange(-3, 3).reshape(3, 2)
    np.fmod(a, [2,2])
    np.greater([4,2],[2,2])
    a = np.array([4,2])
    b = np.array([2,2])
    a > b
    np.greater_equal([4, 2, 1], [2, 2, 2])
    np.hypot(3*np.ones((3, 3)), 4*np.ones((3, 3)))
    np.hypot(3*np.ones((3, 3)), [4])
    np.bitwise_not is np.invert
    np.invert(np.array([13], dtype=np.uint8))
    # np.binary_repr(242, width=8)
    np.invert(np.array([13], dtype=np.uint16))
    np.invert(np.array([13], dtype=np.int8))
    # np.binary_repr(-14, width=8)
    np.invert(np.array([True, False]))
    # np.isfinite(1)
    # np.isfinite(0)
    # np.isfinite(np.nan)
    # np.isfinite(np.inf)
    # np.isfinite(np.NINF)
    x = np.array([-np.inf, 0., np.inf])
    y = np.array([2, 2, 2])
    np.isfinite(x, y)
    # np.isinf(np.inf)
    # np.isinf(np.nan)
    # np.isinf(np.NINF)
    # np.isinf([np.inf, -np.inf, 1.0, np.nan])
    x = np.array([-np.inf, 0., np.inf])
    y = np.array([2, 2, 2])
    # np.isinf(x, y)
    # np.isnan(np.nan)
    # np.isnan(np.inf)
    # np.binary_repr(5)
    np.left_shift(5, 2)
    # np.binary_repr(20)
    np.left_shift(5, [1,2,3])
    np.less([1, 2], [2, 2])
    np.less_equal([4, 2, 1], [2, 2, 2])
    x = np.array([0, 1, 2, 2**4])
    xi = np.array([0+1.j, 1, 2+0.j, 4.j])
    np.log2(xi)
    prob1 = np.log(1e-50)
    prob2 = np.log(2.5e-50)
    prob12 = np.logaddexp(prob1, prob2)
    prob12
    np.exp(prob12)
    prob1 = np.log2(1e-50)
    prob2 = np.log2(2.5e-50)
    prob12 = np.logaddexp2(prob1, prob2)
    prob1, prob2, prob12
    2**prob12
    np.log1p(1e-99)
    np.log(1 + 1e-99)
    # np.logical_and(True, False)
    # np.logical_and([True, False], [False, False])
    x = np.arange(5)
    # np.logical_and(x>1, x<4)
    # np.logical_not(3)
    # np.logical_not([True, False, 0, 1])
    x = np.arange(5)
    # np.logical_not(x<3)
    # np.logical_or(True, False)
    # np.logical_or([True, False], [False, False])
    x = np.arange(5)
    # np.logical_or(x < 1, x > 3)
    # np.logical_xor(True, False)
    # np.logical_xor([True, True, False, False], [True, False, True, False])
    x = np.arange(5)
    # np.logical_xor(x < 1, x > 3)
    # np.logical_xor(0, np.eye(2))
    np.maximum([2, 3, 4], [1, 5, 2])
    # np.maximum([np.nan, 0, np.nan], [0, np.nan, np.nan])
    # np.maximum(np.Inf, 1)
    np.minimum([2, 3, 4], [1, 5, 2])
    # np.minimum([np.nan, 0, np.nan],[0, np.nan, np.nan])
    # np.minimum(-np.Inf, 1)
    np.fmax([2, 3, 4], [1, 5, 2])
    np.fmax(np.eye(2), [0.5, 2])
    # np.fmax([np.nan, 0, np.nan],[0, np.nan, np.nan])
    np.fmin([2, 3, 4], [1, 5, 2])
    np.fmin(np.eye(2), [0.5, 2])
    # np.fmin([np.nan, 0, np.nan],[0, np.nan, np.nan])
    np.modf([0, 3.5])
    np.modf(-0.5)
    np.multiply(2.0, 4.0)
    x1 = np.arange(9.0).reshape((3, 3))
    x2 = np.arange(3.0)
    np.multiply(x1, x2)
    np.negative([1.,-1.])
    np.not_equal([1.,2.], [1., 3.])
    np.not_equal([1, 2], [[1, 3],[1, 4]])
    x1 = range(6)
    np.power(x1, 3)
    x2 = [1.0, 2.0, 3.0, 3.0, 2.0, 1.0]
    np.power(x1, x2)
    x2 = np.array([[1, 2, 3, 3, 2, 1], [1, 2, 3, 3, 2, 1]])
    np.power(x1, x2)
    deg = np.arange(12.) * 30.
    np.radians(deg)
    out = np.zeros((deg.shape))
    ret = np.radians(deg, out)
    ret is out
    np.deg2rad(180)
    np.reciprocal(2.)
    np.reciprocal([1, 2., 3.33])
    np.remainder([4, 7], [2, 3])
    np.remainder(np.arange(7), 5)
    # np.binary_repr(10)
    np.right_shift(10, 1)
    # np.binary_repr(5)
    np.right_shift(10, [1,2,3])
    a = np.array([-1.7, -1.5, -0.2, 0.2, 1.5, 1.7, 2.0])
    np.rint(a)
    np.sign([-5., 4.5])
    np.sign(0)
    # np.sign(5-2j)
    # np.signbit(-1.2)
    np.signbit(np.array([1, -2.3, 2.1]))
    np.copysign(1.3, -1)
    np.copysign([-1, 0, 1], -1.1)
    np.copysign([-1, 0, 1], np.arange(3)-1)
    np.sin(np.pi/2.)
    np.sin(np.array((0., 30., 45., 60., 90.)) * np.pi / 180. )
    x = np.linspace(-np.pi, np.pi, 201)
    np.sinh(0)
    # np.sinh(np.pi*1j/2)
    np.sqrt([1,4,9])
    np.sqrt([4, -1, -3+4J])
    np.cbrt([1,8,27])
    np.square([-1j, 1])
    np.subtract(1.0, 4.0)
    x1 = np.arange(9.0).reshape((3, 3))
    x2 = np.arange(3.0)
    np.subtract(x1, x2)
    np.tan(np.array([-pi,pi/2,pi]))
    np.tanh((0, np.pi*1j, np.pi*1j/2))
    x = np.arange(5)
    np.true_divide(x, 4)
    x = np.arange(9)
    y1, y2 = np.frexp(x)
    y1 * 2**y2
    np.ldexp(5, np.arange(4))
    x = np.arange(6)
    np.ldexp(*np.frexp(x))
Exemplo n.º 37
0
 def sigmoid(x):
     return 1 / (1 - np.exp(-1 * x))
Exemplo n.º 38
0
 def nonlinearity(self, x):
     return 1 / (1 + np.exp(-x))
Exemplo n.º 39
0
 def dU(beta):
     return mp.dot(X.T, (mp.exp(mp.dot(X,beta))/(1+mp.exp(mp.dot(X,beta))) - y)) + beta/alpha
Exemplo n.º 40
0
 def forward(self, inputs, *args):
     return 1 / (1 + np.exp(-inputs))
Exemplo n.º 41
0
 def predict(w, x):
     a = np.exp(np.dot(x, w))
     a_sum = np.sum(a, axis=1, keepdims=True)
     prob = a / a_sum
     return prob
Exemplo n.º 42
0
 def check_fn(x):
     y = x + 1
     print(mp.exp(y))
     return mp.sum(2 * y)
Exemplo n.º 43
0
def predict(w, x):
    a = np.exp(np.dot(x, w))
    a_sum = np.sum(a, axis=1, keepdims=True)
    prob = a / a_sum
    return prob