Example #1
0
def activate(inputs, mode, lower, upper):
    if mode == 'relu':
        return np.maximum(Number(0.0), inputs)
    elif mode == 'drelu':
        return np.minimum(upper, np.maximum(lower, inputs))
    else:
        print 'Not Supported'
Example #2
0
 def forward(self, X):
     """Forward pass to obtain the action probabilities for each observation in `X`."""
     a = np.dot(self.params['w1'], X.T)
     h = np.maximum(0, a)
     logits = np.dot(h.T, self.params['w2'].T)
     p = 1.0 / (1.0 + np.exp(-logits))
     return p
def svm_loss(x, y):
  """
  Computes the loss and gradient using for multiclass SVM classification.

  Inputs:
  - x: Input data, of shape (N, C) where x[i, j] is the score for the jth class
    for the ith input.
  - y: Vector of labels, of shape (N,) where y[i] is the label for x[i] and
    0 <= y[i] < C

  Returns a tuple of:
  - loss: Scalar giving the loss
  - dx: Gradient of the loss with respect to x
  """

  N = x.shape[0]
  correct_class_scores = x[np.arange(N), y]
  
  #TODO: Support broadcast case: (X,) (X, Y)
  #shape(x) is (d0, d1)
  #shape(correct_class_scores) is (d0,)
  #margins = np.maximum(0, x - correct_class_scores + 1.0)
  margins = np.transpose(np.maximum(0, np.transpose(x) - np.transpose(correct_class_scores) + 1.0))

  loss = (np.sum(margins) - np.sum(margins[np.arange(N), y])) / N

  return loss
Example #4
0
 def forward(self, X):
     """Forward pass to obtain the action probabilities for each observation in `X`."""
     a = np.dot(self.params['w1'], X.T)
     h = np.maximum(0, a)
     logits = np.dot(h.T, self.params['w2'].T)
     p = 1.0 / (1.0 + np.exp(-logits))
     return p
Example #5
0
 def loss(self, ps, as_, vs, rs, advs):
     ps = np.maximum(1.0e-5, np.minimum(1.0 - 1e-5, ps))
     policy_grad_loss = -np.sum(np.log(ps) * as_ * advs)
     vf_loss = 0.5*np.sum((vs - rs)**2)
     entropy = -np.sum(ps*np.log(ps))
     loss_ = policy_grad_loss + self.config.vf_wt*vf_loss - self.config.entropy_wt*entropy
     return loss_
Example #6
0
def svm_loss(x, y):
    """
  Computes the loss and gradient using for multiclass SVM classification.

  Inputs:
  - x: Input data, of shape (N, C) where x[i, j] is the score for the jth class
    for the ith input.
  - y: Vector of labels, of shape (N,) where y[i] is the label for x[i] and
    0 <= y[i] < C

  Returns a tuple of:
  - loss: Scalar giving the loss
  - dx: Gradient of the loss with respect to x
  """

    N = x.shape[0]
    correct_class_scores = x[np.arange(N), y]

    #TODO: Support broadcast case: (X,) (X, Y)
    #shape(x) is (d0, d1)
    #shape(correct_class_scores) is (d0,)
    #margins = np.maximum(0, x - correct_class_scores + 1.0)
    margins = np.transpose(
        np.maximum(0,
                   np.transpose(x) - np.transpose(correct_class_scores) + 1.0))

    loss = (np.sum(margins) - np.sum(margins[np.arange(N), y])) / N

    return loss
def svm_loss(x, y, mode):
  """
  Computes the loss and gradient using for multiclass SVM classification.

  Inputs:
  - x: Input data, of shape (N, C) where x[i, j] is the score for the jth class
    for the ith input.
  - y: Vector of labels, of shape (N,) where y[i] is the label for x[i] and
    0 <= y[i] < C

  Returns a tuple of:
  - loss: Scalar giving the loss
  - dx: Gradient of the loss with respect to x
  """
  if mode == 'cpu':
    np.set_policy(policy.OnlyNumpyPolicy())
  else:
    np.set_policy(policy.PreferMXNetPolicy())

  N = x.shape[0]
  correct_class_scores = x[np.arange(N), y]
  
  #margins = np.maximum(0, x - correct_class_scores[:, np.newaxis] + 1.0)
  margins = np.maximum(0, x - np.expand_dims(correct_class_scores, axis = 1) + 1.0)

  margins[np.arange(N), y] = 0
  loss = np.sum(margins) / N
  num_pos = np.sum(margins > 0, axis=1)
  dx = np.zeros_like(x)
  dx[margins > 0] = 1
  dx[np.arange(N), y] -= num_pos
  dx /= N

  return loss, dx
def rel_error(x, y):
    """Returns relative error"""
    if isinstance(x, (int, float, Number)):
        x = float(x)
        y = float(y)
        return abs(x - y) / max(1e-8, abs(x) + abs(y))
    else:
        return np.max(np.abs(x - y) / (np.maximum(1e-8, np.abs(x) + np.abs(y))))
Example #9
0
 def forward(self, X):
     a = np.dot(self.params['fc1'], X.T)
     h = np.maximum(0, a)
     logits = np.dot(h.T, self.params['policy_fc_last'].T)
     ps = np.exp(logits - np.max(logits, axis=1, keepdims=True))
     ps /= np.sum(ps, axis=1, keepdims=True)
     vs = np.dot(h.T, self.params['vf_fc_last'].T) + self.params['vf_fc_last_bias']
     return ps, vs
Example #10
0
def test_operations():
    a = np.ones((2, 3))
    b = np.ones((2, 3))
    c = a + b
    d = -c
    print(d)
    e = np.sin(c**2).T
    print(e)
    f = np.maximum(a, c)
    print(f)
Example #11
0
def relu(x):
    """
    Computes the forward pass for a layer of rectified linear units (ReLUs).

    Input:
    - x: Inputs, of any shape

    Returns a tuple of:
    - out: Output, of the same shape as x
    """
    out = np.maximum(0, x)
    return out
Example #12
0
def relu(x):
    """
    Computes the forward pass for a layer of rectified linear units (ReLUs).

    Input:
    - x: Inputs, of any shape

    Returns a tuple of:
    - out: Output, of the same shape as x
    """
    out = np.maximum(0, x)
    return out
Example #13
0
def svm_loss(x, y, mode):
    """
  Computes the loss and gradient using for multiclass SVM classification.

  Inputs:
  - x: Input data, of shape (N, C) where x[i, j] is the score for the jth class
    for the ith input.
  - y: Vector of labels, of shape (N,) where y[i] is the label for x[i] and
    0 <= y[i] < C

  Returns a tuple of:
  - loss: Scalar giving the loss
  - dx: Gradient of the loss with respect to x
  """
    if mode == 'cpu':
        np.set_policy(policy.OnlyNumpyPolicy())
    else:
        np.set_policy(policy.PreferMXNetPolicy())

    N = x.shape[0]
    correct_class_scores = x[np.arange(N), y]

    #TODO: Support broadcast case: (X,) (X, Y)
    #margins = np.maximum(0, x - correct_class_scores + 1.0)
    margins = np.transpose(
        np.maximum(0,
                   np.transpose(x) - np.transpose(correct_class_scores) + 1.0))

    #margins[np.arange(N), y] = 0
    #loss = np.sum(margins) / N
    loss = (np.sum(margins) - np.sum(margins[np.arange(N), y])) / N
    margins[np.arange(N), y] = 0

    num_pos = np.sum(margins > 0, axis=1)
    dx = np.zeros_like(x)
    dx[margins > 0] = 1
    dx[np.arange(N), y] -= num_pos
    dx /= N

    return loss, dx
Example #14
0
 def initial_point_reward(self, loc):
     line_width = int(config['target_line_width'] / 2)
     distance = config['forward_distance']
     long_width = line_width + distance
     left_idx = [-line_width, long_width, -line_width, line_width]
     right_idx = [-long_width, line_width, -line_width, line_width]
     down_idx = [-line_width, line_width, -line_width, long_width]
     up_idx = [-line_width, line_width, -long_width, line_width]
     base = np.array([loc[0], loc[0], loc[1], loc[1]])
     idxs = np.array([left_idx, right_idx, down_idx, up_idx])
     for i in range(len(idxs)):
         idxs[i] = np.minimum(
             np.maximum(idxs[i] + base, 0),
             [self.width, self.width, self.height, self.height])
     target_direction = self.count_not_target_points(idxs)
     if target_direction == 0:
         return -300
     elif target_direction == 1:
         return 300
     elif target_direction == 2:
         return 0
     elif target_direction > 2:
         return -200
Example #15
0
 def loss(self, ps, ys, rs):
     # Prevent log of zero.
     ps = np.maximum(1.0e-5, np.minimum(1.0 - 1e-5, ps))
     step_losses = ys * np.log(ps) + (1.0 - ys) * np.log(1.0 - ps)
     return -np.sum(step_losses * rs)
Example #16
0
def clip(X, lower, upper):
    return np.minimum(upper, np.maximum(lower, X))
Example #17
0
 def loss(self, ps, ys, rs):
     # Prevent log of zero.
     ps = np.maximum(1.0e-5, np.minimum(1.0 - 1e-5, ps))
     step_losses = ys * np.log(ps) + (1.0 - ys) * np.log(1.0 - ps)
     return -np.sum(step_losses * rs)
def test_ufunc():
    x = np.array([-1.2, 1.2])
    np.absolute(x)
    np.absolute(1.2 + 1j)
    x = np.linspace(start=-10, stop=10, num=101)
    np.add(1.0, 4.0)
    x1 = np.arange(9.0).reshape((3, 3))
    x2 = np.arange(3.0)
    np.add(x1, x2)
    np.arccos([1, -1])
    x = np.linspace(-1, 1, num=100)
    np.arccosh([np.e, 10.0])
    np.arccosh(1)
    np.arcsin(0)
    np.arcsinh(np.array([np.e, 10.0]))
    np.arctan([0, 1])
    np.pi/4
    x = np.linspace(-10, 10)
    x = np.array([-1, +1, +1, -1])
    y = np.array([-1, -1, +1, +1])
    np.arctan2(y, x) * 180 / np.pi
    np.arctan2([1., -1.], [0., 0.])
    np.arctan2([0., 0., np.inf], [+0., -0., np.inf])
    np.arctanh([0, -0.5])
    np.bitwise_and(13, 17)
    np.bitwise_and(14, 13)
    # np.binary_repr(12)    return str
    np.bitwise_and([14,3], 13)
    np.bitwise_and([11,7], [4,25])
    np.bitwise_and(np.array([2,5,255]), np.array([3,14,16]))
    np.bitwise_and([True, True], [False, True])
    np.bitwise_or(13, 16)
    # np.binary_repr(29)
    np.bitwise_or(32, 2)
    np.bitwise_or([33, 4], 1)
    np.bitwise_or([33, 4], [1, 2])
    np.bitwise_or(np.array([2, 5, 255]), np.array([4, 4, 4]))
    # np.array([2, 5, 255]) | np.array([4, 4, 4])
    np.bitwise_or(np.array([2, 5, 255, 2147483647], dtype=np.int32),
                  np.array([4, 4, 4, 2147483647], dtype=np.int32))
    np.bitwise_or([True, True], [False, True])
    np.bitwise_xor(13, 17)
    # np.binary_repr(28)
    np.bitwise_xor(31, 5)
    np.bitwise_xor([31,3], 5)
    np.bitwise_xor([31,3], [5,6])
    np.bitwise_xor([True, True], [False, True])
    a = np.array([-1.7, -1.5, -0.2, 0.2, 1.5, 1.7, 2.0])
    np.ceil(a)
    a = np.array([-1.7, -1.5, -0.2, 0.2, 1.5, 1.7, 2.0])
    np.trunc(a)
    np.cos(np.array([0, np.pi/2, np.pi]))
    np.cosh(0)
    x = np.linspace(-4, 4, 1000)
    rad = np.arange(12.)*np.pi/6
    np.degrees(rad)
    out = np.zeros((rad.shape))
    r = np.degrees(rad, out)
    # np.all(r == out) return bool
    np.rad2deg(np.pi/2)
    np.divide(2.0, 4.0)
    x1 = np.arange(9.0).reshape((3, 3))
    x2 = np.arange(3.0)
    np.divide(2, 4)
    np.divide(2, 4.)
    np.equal([0, 1, 3], np.arange(3))
    np.equal(1, np.ones(1))
    x = np.linspace(-2*np.pi, 2*np.pi, 100)
    np.exp2([2, 3])
    np.expm1(1e-10)
    np.exp(1e-10) - 1
    np.fabs(-1)
    np.fabs([-1.2, 1.2])
    a = np.array([-1.7, -1.5, -0.2, 0.2, 1.5, 1.7, 2.0])
    np.floor(a)
    np.floor_divide(7,3)
    np.floor_divide([1., 2., 3., 4.], 2.5)
    np.fmod([-3, -2, -1, 1, 2, 3], 2)
    np.remainder([-3, -2, -1, 1, 2, 3], 2)
    np.fmod([5, 3], [2, 2.])
    a = np.arange(-3, 3).reshape(3, 2)
    np.fmod(a, [2,2])
    np.greater([4,2],[2,2])
    a = np.array([4,2])
    b = np.array([2,2])
    a > b
    np.greater_equal([4, 2, 1], [2, 2, 2])
    np.hypot(3*np.ones((3, 3)), 4*np.ones((3, 3)))
    np.hypot(3*np.ones((3, 3)), [4])
    np.bitwise_not is np.invert
    np.invert(np.array([13], dtype=np.uint8))
    # np.binary_repr(242, width=8)
    np.invert(np.array([13], dtype=np.uint16))
    np.invert(np.array([13], dtype=np.int8))
    # np.binary_repr(-14, width=8)
    np.invert(np.array([True, False]))
    # np.isfinite(1)
    # np.isfinite(0)
    # np.isfinite(np.nan)
    # np.isfinite(np.inf)
    # np.isfinite(np.NINF)
    x = np.array([-np.inf, 0., np.inf])
    y = np.array([2, 2, 2])
    np.isfinite(x, y)
    # np.isinf(np.inf)
    # np.isinf(np.nan)
    # np.isinf(np.NINF)
    # np.isinf([np.inf, -np.inf, 1.0, np.nan])
    x = np.array([-np.inf, 0., np.inf])
    y = np.array([2, 2, 2])
    # np.isinf(x, y)
    # np.isnan(np.nan)
    # np.isnan(np.inf)
    # np.binary_repr(5)
    np.left_shift(5, 2)
    # np.binary_repr(20)
    np.left_shift(5, [1,2,3])
    np.less([1, 2], [2, 2])
    np.less_equal([4, 2, 1], [2, 2, 2])
    x = np.array([0, 1, 2, 2**4])
    xi = np.array([0+1.j, 1, 2+0.j, 4.j])
    np.log2(xi)
    prob1 = np.log(1e-50)
    prob2 = np.log(2.5e-50)
    prob12 = np.logaddexp(prob1, prob2)
    prob12
    np.exp(prob12)
    prob1 = np.log2(1e-50)
    prob2 = np.log2(2.5e-50)
    prob12 = np.logaddexp2(prob1, prob2)
    prob1, prob2, prob12
    2**prob12
    np.log1p(1e-99)
    np.log(1 + 1e-99)
    # np.logical_and(True, False)
    # np.logical_and([True, False], [False, False])
    x = np.arange(5)
    # np.logical_and(x>1, x<4)
    # np.logical_not(3)
    # np.logical_not([True, False, 0, 1])
    x = np.arange(5)
    # np.logical_not(x<3)
    # np.logical_or(True, False)
    # np.logical_or([True, False], [False, False])
    x = np.arange(5)
    # np.logical_or(x < 1, x > 3)
    # np.logical_xor(True, False)
    # np.logical_xor([True, True, False, False], [True, False, True, False])
    x = np.arange(5)
    # np.logical_xor(x < 1, x > 3)
    # np.logical_xor(0, np.eye(2))
    np.maximum([2, 3, 4], [1, 5, 2])
    # np.maximum([np.nan, 0, np.nan], [0, np.nan, np.nan])
    # np.maximum(np.Inf, 1)
    np.minimum([2, 3, 4], [1, 5, 2])
    # np.minimum([np.nan, 0, np.nan],[0, np.nan, np.nan])
    # np.minimum(-np.Inf, 1)
    np.fmax([2, 3, 4], [1, 5, 2])
    np.fmax(np.eye(2), [0.5, 2])
    # np.fmax([np.nan, 0, np.nan],[0, np.nan, np.nan])
    np.fmin([2, 3, 4], [1, 5, 2])
    np.fmin(np.eye(2), [0.5, 2])
    # np.fmin([np.nan, 0, np.nan],[0, np.nan, np.nan])
    np.modf([0, 3.5])
    np.modf(-0.5)
    np.multiply(2.0, 4.0)
    x1 = np.arange(9.0).reshape((3, 3))
    x2 = np.arange(3.0)
    np.multiply(x1, x2)
    np.negative([1.,-1.])
    np.not_equal([1.,2.], [1., 3.])
    np.not_equal([1, 2], [[1, 3],[1, 4]])
    x1 = range(6)
    np.power(x1, 3)
    x2 = [1.0, 2.0, 3.0, 3.0, 2.0, 1.0]
    np.power(x1, x2)
    x2 = np.array([[1, 2, 3, 3, 2, 1], [1, 2, 3, 3, 2, 1]])
    np.power(x1, x2)
    deg = np.arange(12.) * 30.
    np.radians(deg)
    out = np.zeros((deg.shape))
    ret = np.radians(deg, out)
    ret is out
    np.deg2rad(180)
    np.reciprocal(2.)
    np.reciprocal([1, 2., 3.33])
    np.remainder([4, 7], [2, 3])
    np.remainder(np.arange(7), 5)
    # np.binary_repr(10)
    np.right_shift(10, 1)
    # np.binary_repr(5)
    np.right_shift(10, [1,2,3])
    a = np.array([-1.7, -1.5, -0.2, 0.2, 1.5, 1.7, 2.0])
    np.rint(a)
    np.sign([-5., 4.5])
    np.sign(0)
    # np.sign(5-2j)
    # np.signbit(-1.2)
    np.signbit(np.array([1, -2.3, 2.1]))
    np.copysign(1.3, -1)
    np.copysign([-1, 0, 1], -1.1)
    np.copysign([-1, 0, 1], np.arange(3)-1)
    np.sin(np.pi/2.)
    np.sin(np.array((0., 30., 45., 60., 90.)) * np.pi / 180. )
    x = np.linspace(-np.pi, np.pi, 201)
    np.sinh(0)
    # np.sinh(np.pi*1j/2)
    np.sqrt([1,4,9])
    np.sqrt([4, -1, -3+4J])
    np.cbrt([1,8,27])
    np.square([-1j, 1])
    np.subtract(1.0, 4.0)
    x1 = np.arange(9.0).reshape((3, 3))
    x2 = np.arange(3.0)
    np.subtract(x1, x2)
    np.tan(np.array([-pi,pi/2,pi]))
    np.tanh((0, np.pi*1j, np.pi*1j/2))
    x = np.arange(5)
    np.true_divide(x, 4)
    x = np.arange(9)
    y1, y2 = np.frexp(x)
    y1 * 2**y2
    np.ldexp(5, np.arange(4))
    x = np.arange(6)
    np.ldexp(*np.frexp(x))
Example #19
0
 def forward(self, inputs, parameters):
     lower = parameters[self._lower]
     upper = parameters[self._upper]
     return np.minimum(upper, np.maximum(lower, inputs))
Example #20
0
def test_ufunc():
    x = np.array([-1.2, 1.2])
    np.absolute(x)
    np.absolute(1.2 + 1j)
    x = np.linspace(start=-10, stop=10, num=101)
    np.add(1.0, 4.0)
    x1 = np.arange(9.0).reshape((3, 3))
    x2 = np.arange(3.0)
    np.add(x1, x2)
    np.arccos([1, -1])
    x = np.linspace(-1, 1, num=100)
    np.arccosh([np.e, 10.0])
    np.arccosh(1)
    np.arcsin(0)
    np.arcsinh(np.array([np.e, 10.0]))
    np.arctan([0, 1])
    np.pi / 4
    x = np.linspace(-10, 10)
    x = np.array([-1, +1, +1, -1])
    y = np.array([-1, -1, +1, +1])
    np.arctan2(y, x) * 180 / np.pi
    np.arctan2([1., -1.], [0., 0.])
    np.arctan2([0., 0., np.inf], [+0., -0., np.inf])
    np.arctanh([0, -0.5])
    np.bitwise_and(13, 17)
    np.bitwise_and(14, 13)
    # np.binary_repr(12)    return str
    np.bitwise_and([14, 3], 13)
    np.bitwise_and([11, 7], [4, 25])
    np.bitwise_and(np.array([2, 5, 255]), np.array([3, 14, 16]))
    np.bitwise_and([True, True], [False, True])
    np.bitwise_or(13, 16)
    # np.binary_repr(29)
    np.bitwise_or(32, 2)
    np.bitwise_or([33, 4], 1)
    np.bitwise_or([33, 4], [1, 2])
    np.bitwise_or(np.array([2, 5, 255]), np.array([4, 4, 4]))
    # np.array([2, 5, 255]) | np.array([4, 4, 4])
    np.bitwise_or(np.array([2, 5, 255, 2147483647], dtype=np.int32),
                  np.array([4, 4, 4, 2147483647], dtype=np.int32))
    np.bitwise_or([True, True], [False, True])
    np.bitwise_xor(13, 17)
    # np.binary_repr(28)
    np.bitwise_xor(31, 5)
    np.bitwise_xor([31, 3], 5)
    np.bitwise_xor([31, 3], [5, 6])
    np.bitwise_xor([True, True], [False, True])
    a = np.array([-1.7, -1.5, -0.2, 0.2, 1.5, 1.7, 2.0])
    np.ceil(a)
    a = np.array([-1.7, -1.5, -0.2, 0.2, 1.5, 1.7, 2.0])
    np.trunc(a)
    np.cos(np.array([0, np.pi / 2, np.pi]))
    np.cosh(0)
    x = np.linspace(-4, 4, 1000)
    rad = np.arange(12.) * np.pi / 6
    np.degrees(rad)
    out = np.zeros((rad.shape))
    r = np.degrees(rad, out)
    # np.all(r == out) return bool
    np.rad2deg(np.pi / 2)
    np.divide(2.0, 4.0)
    x1 = np.arange(9.0).reshape((3, 3))
    x2 = np.arange(3.0)
    np.divide(2, 4)
    np.divide(2, 4.)
    np.equal([0, 1, 3], np.arange(3))
    np.equal(1, np.ones(1))
    x = np.linspace(-2 * np.pi, 2 * np.pi, 100)
    np.exp2([2, 3])
    np.expm1(1e-10)
    np.exp(1e-10) - 1
    np.fabs(-1)
    np.fabs([-1.2, 1.2])
    a = np.array([-1.7, -1.5, -0.2, 0.2, 1.5, 1.7, 2.0])
    np.floor(a)
    np.floor_divide(7, 3)
    np.floor_divide([1., 2., 3., 4.], 2.5)
    np.fmod([-3, -2, -1, 1, 2, 3], 2)
    np.remainder([-3, -2, -1, 1, 2, 3], 2)
    np.fmod([5, 3], [2, 2.])
    a = np.arange(-3, 3).reshape(3, 2)
    np.fmod(a, [2, 2])
    np.greater([4, 2], [2, 2])
    a = np.array([4, 2])
    b = np.array([2, 2])
    a > b
    np.greater_equal([4, 2, 1], [2, 2, 2])
    np.hypot(3 * np.ones((3, 3)), 4 * np.ones((3, 3)))
    np.hypot(3 * np.ones((3, 3)), [4])
    np.bitwise_not is np.invert
    np.invert(np.array([13], dtype=np.uint8))
    # np.binary_repr(242, width=8)
    np.invert(np.array([13], dtype=np.uint16))
    np.invert(np.array([13], dtype=np.int8))
    # np.binary_repr(-14, width=8)
    np.invert(np.array([True, False]))
    # np.isfinite(1)
    # np.isfinite(0)
    # np.isfinite(np.nan)
    # np.isfinite(np.inf)
    # np.isfinite(np.NINF)
    x = np.array([-np.inf, 0., np.inf])
    y = np.array([2, 2, 2])
    np.isfinite(x, y)
    # np.isinf(np.inf)
    # np.isinf(np.nan)
    # np.isinf(np.NINF)
    # np.isinf([np.inf, -np.inf, 1.0, np.nan])
    x = np.array([-np.inf, 0., np.inf])
    y = np.array([2, 2, 2])
    # np.isinf(x, y)
    # np.isnan(np.nan)
    # np.isnan(np.inf)
    # np.binary_repr(5)
    np.left_shift(5, 2)
    # np.binary_repr(20)
    np.left_shift(5, [1, 2, 3])
    np.less([1, 2], [2, 2])
    np.less_equal([4, 2, 1], [2, 2, 2])
    x = np.array([0, 1, 2, 2**4])
    xi = np.array([0 + 1.j, 1, 2 + 0.j, 4.j])
    np.log2(xi)
    prob1 = np.log(1e-50)
    prob2 = np.log(2.5e-50)
    prob12 = np.logaddexp(prob1, prob2)
    prob12
    np.exp(prob12)
    prob1 = np.log2(1e-50)
    prob2 = np.log2(2.5e-50)
    prob12 = np.logaddexp2(prob1, prob2)
    prob1, prob2, prob12
    2**prob12
    np.log1p(1e-99)
    np.log(1 + 1e-99)
    # np.logical_and(True, False)
    # np.logical_and([True, False], [False, False])
    x = np.arange(5)
    # np.logical_and(x>1, x<4)
    # np.logical_not(3)
    # np.logical_not([True, False, 0, 1])
    x = np.arange(5)
    # np.logical_not(x<3)
    # np.logical_or(True, False)
    # np.logical_or([True, False], [False, False])
    x = np.arange(5)
    # np.logical_or(x < 1, x > 3)
    # np.logical_xor(True, False)
    # np.logical_xor([True, True, False, False], [True, False, True, False])
    x = np.arange(5)
    # np.logical_xor(x < 1, x > 3)
    # np.logical_xor(0, np.eye(2))
    np.maximum([2, 3, 4], [1, 5, 2])
    # np.maximum([np.nan, 0, np.nan], [0, np.nan, np.nan])
    # np.maximum(np.Inf, 1)
    np.minimum([2, 3, 4], [1, 5, 2])
    # np.minimum([np.nan, 0, np.nan],[0, np.nan, np.nan])
    # np.minimum(-np.Inf, 1)
    np.fmax([2, 3, 4], [1, 5, 2])
    np.fmax(np.eye(2), [0.5, 2])
    # np.fmax([np.nan, 0, np.nan],[0, np.nan, np.nan])
    np.fmin([2, 3, 4], [1, 5, 2])
    np.fmin(np.eye(2), [0.5, 2])
    # np.fmin([np.nan, 0, np.nan],[0, np.nan, np.nan])
    np.modf([0, 3.5])
    np.modf(-0.5)
    np.multiply(2.0, 4.0)
    x1 = np.arange(9.0).reshape((3, 3))
    x2 = np.arange(3.0)
    np.multiply(x1, x2)
    np.negative([1., -1.])
    np.not_equal([1., 2.], [1., 3.])
    np.not_equal([1, 2], [[1, 3], [1, 4]])
    x1 = range(6)
    np.power(x1, 3)
    x2 = [1.0, 2.0, 3.0, 3.0, 2.0, 1.0]
    np.power(x1, x2)
    x2 = np.array([[1, 2, 3, 3, 2, 1], [1, 2, 3, 3, 2, 1]])
    np.power(x1, x2)
    deg = np.arange(12.) * 30.
    np.radians(deg)
    out = np.zeros((deg.shape))
    ret = np.radians(deg, out)
    ret is out
    np.deg2rad(180)
    np.reciprocal(2.)
    np.reciprocal([1, 2., 3.33])
    np.remainder([4, 7], [2, 3])
    np.remainder(np.arange(7), 5)
    # np.binary_repr(10)
    np.right_shift(10, 1)
    # np.binary_repr(5)
    np.right_shift(10, [1, 2, 3])
    a = np.array([-1.7, -1.5, -0.2, 0.2, 1.5, 1.7, 2.0])
    np.rint(a)
    np.sign([-5., 4.5])
    np.sign(0)
    # np.sign(5-2j)
    # np.signbit(-1.2)
    np.signbit(np.array([1, -2.3, 2.1]))
    np.copysign(1.3, -1)
    np.copysign([-1, 0, 1], -1.1)
    np.copysign([-1, 0, 1], np.arange(3) - 1)
    np.sin(np.pi / 2.)
    np.sin(np.array((0., 30., 45., 60., 90.)) * np.pi / 180.)
    x = np.linspace(-np.pi, np.pi, 201)
    np.sinh(0)
    # np.sinh(np.pi*1j/2)
    np.sqrt([1, 4, 9])
    np.sqrt([4, -1, -3 + 4J])
    np.cbrt([1, 8, 27])
    np.square([-1j, 1])
    np.subtract(1.0, 4.0)
    x1 = np.arange(9.0).reshape((3, 3))
    x2 = np.arange(3.0)
    np.subtract(x1, x2)
    np.tan(np.array([-pi, pi / 2, pi]))
    np.tanh((0, np.pi * 1j, np.pi * 1j / 2))
    x = np.arange(5)
    np.true_divide(x, 4)
    x = np.arange(9)
    y1, y2 = np.frexp(x)
    y1 * 2**y2
    np.ldexp(5, np.arange(4))
    x = np.arange(6)
    np.ldexp(*np.frexp(x))
    def MTNE(self):
        # dictionary
        D = np.random.rand(self.p, self.m)

        lambda_ff_list = []

        # Author
        Aprime_list = []
        # weight
        W_list = []
        # dense vector
        F_list = []
        # similarity local
        X_list = []
        X_mask_list = []

        # all sparse embeddings across all timestamps

        # F_big=np.zeros((self.q,self.k))
        X_big = np.zeros((self.q, self.q))
        X_mask_big = np.zeros((self.q, self.q))

        S = np.random.rand(self.q, self.q)

        indexDict_local2global = collections.OrderedDict()
        indexDict_global2local = dict()
        globalIndex = 0

        for key in self.edgeDict:

            A = self.edgeDict[key]

            X = self.initX(A, self.theta)
            X = X / (np.amax(X) - np.amin(X))
            X_list.append(X)

            X_mask = np.zeros((self.q, self.q))
            # number of nodes in the current time
            n = A.shape[0]

            Aprime = np.random.rand(n, self.p)
            Aprime_list.append(Aprime)

            indexDict = dict()
            for i in range(n):
                indexDict[i] = globalIndex + i
                indexDict_global2local[globalIndex + i] = (key, i)
            indexDict_local2global[key] = indexDict

            for i in range(n):
                i_big = indexDict[i]
                for j in range(n):
                    j_big = indexDict[j]
                    X_big[i_big, j_big] = X[i, j]
                    X_mask[i_big, j_big] = 1.
                    X_mask_big[i_big, j_big] = 1.
            X_mask_list.append(X_mask)

            globalIndex += n

            W = np.random.rand(n, self.p)
            W_list.append(W)

            F = np.random.rand(n, self.m)
            F_list.append(F)

            lambda_ff_list.append(random.random())

        F_big = self.concatenateMatrixInList(F_list, self.m, 0)

        loss_t1 = 1000000000.0

        print loss_t1
        loss_t = self.epsilon + loss_t1 + 1
        while abs(loss_t - loss_t1) >= self.epsilon:

            #% optimize each element in randomized sequence
            nita = 1. / math.sqrt(self.t)
            self.t = self.t + 1
            loss_t = loss_t1
            loss_t1 = 0.0

            counter = 0

            for key in self.edgeDict:

                X = X_list[counter]
                W = W_list[counter]
                F = F_list[counter]
                Aprime = Aprime_list[counter]
                indexDict = indexDict_local2global[key]
                lambda_ff = lambda_ff_list[counter]
                X_mask = X_mask_list[counter]

                P = self.getP(X_mask, F_big, X_big)

                n = X.shape[0]

                for i in range(n):
                    # for A
                    z = Aprime[i] - nita * (
                        np.dot(np.dot(Aprime[i], W.T) - X[i], W) +
                        self.beta * np.dot(np.dot(Aprime[i], D) - F[i], D.T))
                    update = np.maximum(np.zeros(np.shape(z)),
                                        np.abs(z) - nita * self.lamda_pgd)
                    Aprime[i] = np.sign(z) * update

                    # for F
                    lf_part1 = self.beta * F[i] - np.dot(Aprime[i], D)
                    lf_part2 = np.zeros(self.m)
                    i_big_index = indexDict[i]
                    for j in range(self.q):
                        lf_part2 += self.rho * (F[i] -
                                                F_big[j]) * S[i_big_index, j]

                    val1 = np.dot(F[i], F_big.T)
                    val2 = val1 - np.ones(self.q)
                    val3 = np.dot(val2, F_big)
                    lf_part3 = 0.01 * val3
                    F[i] = F[i] - nita * (lf_part1 + lf_part2 + lf_part3)
                    F_big[i_big_index] = F[i]

                    # vec=np.dot(F[i],F_big.T)-np.ones(self.q)
                    # # print vec.shape
                    # lambda_ff=lambda_ff-nita*np.linalg.norm(vec)

                    # for S
                    ls = (S[i_big_index] -
                          P[i_big_index]) - self.epsilon * np.ones(
                              self.q) - self.alpha * S[i_big_index]
                    S[i_big_index] = S[i_big_index] - nita * ls

                Aprime = self.chechnegtive(Aprime, None, None)

                LW = np.dot((np.dot(W, Aprime.T) - X), Aprime) + self.lamda * W
                W = W - nita * LW
                W = self.chechnegtive(W, None, None)

                p1 = np.dot(Aprime, D) - F
                p2 = self.beta * np.dot(Aprime.T, p1)
                LD = p2 + self.gamma * D
                D = D - nita * LD

                W_list[counter] = W
                F_list[counter] = F
                Aprime_list[counter] = Aprime
                lambda_ff_list[counter] = lambda_ff

                loss_t1_part = self.lossfuction(X, W, Aprime, F, D)
                loss_t1 += loss_t1_part
                counter += 1

            # loss_last=self.laplacianLoss(simM,F)
            simMD, simML = self.getLaplacian(S)
            trval = np.trace(np.dot(np.dot(F_big.T, simML), F_big))
            gapval = self.norm(X_mask_big * (S - X_big))

            loss_t1 += self.rho * trval + self.eta * gapval

            if loss_t < loss_t1 and loss_t != 0:
                break
        # print loss_t
            print loss_t1
        return [Aprime_list, F_list, S]
    cumulative_update = np.zeros_like(net_params)  # initialize update values
    for ui, k_id in enumerate(kids_rank):
        np.random.seed(noise_seed[k_id])  # reconstruct noise using seed
        cumulative_update += utility[ui] * sign(k_id) * np.random.randn(
            net_params.size)

    gradients = optimizer.get_gradients(cumulative_update /
                                        (2 * N_KID * SIGMA))
    return net_params + gradients, rewards


if __name__ == "__main__":
    # utility instead reward for update parameters (rank transformation)
    base = N_KID * 2  # *2 for mirrored sampling
    rank = np.arange(1, base + 1)
    util_ = np.maximum(0, numpy.log(base / 2 + 1) - np.log(rank))
    utility = util_ / sum(util_) - 1 / base

    # training
    net_shapes, net_params = build_net()
    env = gym.make(CONFIG['game']).unwrapped
    optimizer = SGD(net_params, LR)
    pool = mp.Pool(processes=N_CORE)
    mar = None  # moving average reward
    for g in range(N_GENERATION):
        t0 = time.time()
        net_params, kid_rewards = train(net_shapes, net_params, optimizer,
                                        utility, pool)

        # test trained net without noise
        net_r = get_reward(
Example #23
0
def barrier(S0, K, B, tau, r, q, v, M, N):
    S = pricepaths(S0, tau, r, q, v, M, N)
    l = np.min(S, 0) > B
    payoffs = l * np.maximum(S[-1, :] - K, 0)
    return math.exp(-r * tau) * np.mean(payoffs)