예제 #1
0
def affine_backward(dout, cache):
    """
  Computes the backward pass for an affine layer.

  Inputs:
  - dout: Upstream derivative, of shape (N, M)
  - cache: Tuple of:
    - x: Input data, of shape (N, d_1, ... d_k)
    - w: Weights, of shape (D, M)

  Returns a tuple of:
  - dx: Gradient with respect to x, of shape (N, d1, ..., d_k)
  - dw: Gradient with respect to w, of shape (D, M)
  - db: Gradient with respect to b, of shape (M,)
  """
    x, w, b = cache
    x_plain = np.reshape(x, (x.shape[0], -1))

    db = np.sum(dout, axis=0)

    dx_plain = np.dot(dout, np.transpose(w))

    dx = np.reshape(dx_plain, x.shape)
    dw = np.dot(np.transpose(x_plain), dout)

    return dx, dw, db
def affine_backward(dout, cache):
  """
  Computes the backward pass for an affine layer.

  Inputs:
  - dout: Upstream derivative, of shape (N, M)
  - cache: Tuple of:
    - x: Input data, of shape (N, d_1, ... d_k)
    - w: Weights, of shape (D, M)

  Returns a tuple of:
  - dx: Gradient with respect to x, of shape (N, d1, ..., d_k)
  - dw: Gradient with respect to w, of shape (D, M)
  - db: Gradient with respect to b, of shape (M,)
  """
  x, w, b = cache
  x_plain = np.reshape(x, (x.shape[0], -1))

  db = np.sum(dout, axis=0)

  dx_plain = np.dot(dout, np.transpose(w))

  dx = np.reshape(dx_plain, x.shape)
  dw = np.dot(np.transpose(x_plain), dout)

  return dx, dw, db
예제 #3
0
    def forward(self, inputs, params):
        N, C, W, H = inputs.shape
        inputs = transpose(inputs, (0, 2, 3, 1))
        inputs = np.reshape(inputs, (N * W * H, C))

        outputs, running_mean, running_variance = layers.batchnorm(
            inputs, params[self.gamma], params[self.beta],
            params['__training_in_progress__'], self.epsilon, self.momentum,
            self.running_mean, self.running_variance)
        self.running_mean, self.running_variance = running_mean, running_variance
        outputs = np.reshape(outputs, (N, W, H, C))
        outputs = transpose(outputs, (0, 3, 1, 2))
        return outputs
예제 #4
0
    def forward(self, inputs, params):
        N, C, W, H = inputs.shape
        inputs = transpose(inputs, (0, 2, 3, 1))
        inputs = np.reshape(inputs, (N * W * H, C))

        outputs, running_mean, running_variance = layers.batchnorm(
            inputs, params[self.gamma], params[self.beta],
            params['__training_in_progress__'], self.epsilon, self.momentum,
            self.running_mean, self.running_variance)
        self.running_mean, self.running_variance = running_mean, running_variance
        outputs = np.reshape(outputs, (N, W, H, C))
        outputs = transpose(outputs, (0, 3, 1, 2))
        return outputs
예제 #5
0
def affine_forward(x, w, b):
    """
  Computes the forward pass for an affine (fully-connected) layer.

  The input x has shape (N, d_1, ..., d_k) and contains a minibatch of N
  examples, where each example x[i] has shape (d_1, ..., d_k). We will
  reshape each input into a vector of dimension D = d_1 * ... * d_k, and
  then transform it to an output vector of dimension M.

  Inputs:
  - x: A numpy array containing input data, of shape (N, d_1, ..., d_k)
  - w: A numpy array of weights, of shape (D, M)
  - b: A numpy array of biases, of shape (M,)
  
  Returns a tuple of:
  - out: output, of shape (N, M)
  - cache: (x, w, b)
  """

    x_plain = np.reshape(x, (x.shape[0], -1))
    out = np.dot(x_plain, w) + b

    cache = (x, w, b)

    return out, cache
def affine_forward(x, w, b):
  """
  Computes the forward pass for an affine (fully-connected) layer.

  The input x has shape (N, d_1, ..., d_k) and contains a minibatch of N
  examples, where each example x[i] has shape (d_1, ..., d_k). We will
  reshape each input into a vector of dimension D = d_1 * ... * d_k, and
  then transform it to an output vector of dimension M.

  Inputs:
  - x: A numpy array containing input data, of shape (N, d_1, ..., d_k)
  - w: A numpy array of weights, of shape (D, M)
  - b: A numpy array of biases, of shape (M,)
  
  Returns a tuple of:
  - out: output, of shape (N, M)
  - cache: (x, w, b)
  """

  x_plain = np.reshape(x, (x.shape[0], -1))
  out = np.dot(x_plain, w) + b

  cache = (x, w, b) 
  
  return out, cache
예제 #7
0
def test_index_slice():
    a = np.arange(6)
    a = np.reshape(a, (3, 2))
    print(a[:])
    a[1:2] = -1
    print(a)
    d = np.slice_axis(a, axis=1, begin=1, end=2)
    print(d)
예제 #8
0
def blob_normalization(X,
                       settings,
                       gamma,
                       beta,
                       mode='train',
                       epsilon=1e-5,
                       momentum=0.9,
                       running_mean=None,
                       running_variance=None):
    N, D = map(int, X.shape)
    size = N * D

    if running_mean is None:
        running_mean = np.zeros(1)
    if running_variance is None:
        running_variance = np.zeros(1)

    if mode == 'train':
        if 'shared_mean' in settings:
            mean = np.sum(X) / size
        else:
            mean = np.sum(X, axis=0) / N
            mean = np.reshape(mean, (1, D))

        centered_X = X - mean

        if 'shared_deviation' in settings:
            variance = np.sum(centered_X**2) / size
        else:
            variance = np.sum(centered_X**2, axis=0) / N
            variance = np.reshape(variance, (1, D))

        deviation = variance**0.5
        rescaled_X = centered_X / deviation

        out = gamma * rescaled_X + beta

        running_mean = momentum * running_mean + (1.0 - momentum) * mean
        running_variance = momentum * running_variance + (1.0 -
                                                          momentum) * variance

    elif mode == 'test':
        X_hat = (X - running_mean) / np.sqrt(running_variance + epsilon)
        out = gamma * X_hat + beta

    return out, running_mean, running_variance
예제 #9
0
  def loss_and_derivative(self, X, y=None):
    """
    Compute loss and gradient for the fully-connected net.

    Input / output: Same as TwoLayerNet above.
    """

    X_plain = np.reshape(X, (X.shape[0], -1))
    mode = 'test' if y is None else 'train'

    if self.dropout_param is not None:
      self.dropout_param['mode'] = mode   

    if self.use_batchnorm:
      for bn_param in self.bn_params:
        bn_param['mode'] = mode

    params_array = self.pack_params()

    def train_loss(*args):
      X = args[0]
      y = args[1]

      res = X
      for l in xrange(self.num_layers):
        prev_res = res
        res = affine_forward(prev_res, args[self.w_idx(l)], args[self.b_idx(l)])

        if l < (self.num_layers - 1):
          if self.use_batchnorm:
            res = batchnorm_forward(res, args[self.bn_ga_idx(l)],
                                    args[self.bn_bt_idx(l)], self.bn_params[l])
          res = relu_forward(res)
          if self.use_dropout:
            res = dropout_forward(res, self.dropout_param)

      scores = res

      if mode == 'test':
        return scores

      #loss, _ = softmax_loss(scores, y)
      loss = svm_loss(scores, y)
      return loss

    if y is None:
      return train_loss(X_plain, y, *params_array)

    grad_function = grad_and_loss(train_loss, range(self.data_target_cnt, self.data_target_cnt + len(params_array)))
    grads_array, loss = grad_function(X_plain, y, *params_array)

    grads = {}

    for i, grad in enumerate(grads_array):
      grads[self.param_keys[i]] = grad
    return loss, grads
예제 #10
0
 def forward(self, X, mode):
     # Flatten the input data to matrix.
     X = np.reshape(X, (batch_size, flattened_input_size))
     # First affine layer (fully-connected layer).
     y1 = layers.affine(X, self.params['w1'], self.params['b1'])
     # ReLU activation.
     y2 = layers.relu(y1)
     # Second affine layer.
     y3 = layers.affine(y2, self.params['w2'], self.params['b2'])
     return y3
예제 #11
0
    def loss_and_derivative(self, X, y=None):
        """
    Compute loss and gradient for a minibatch of data.

    Inputs:
    - X: Array of input data of shape (N, d_1, ..., d_k)
    - y: Array of labels, of shape (N,). y[i] gives the label for X[i].

    Returns:
    If y is None, then run a test-time forward pass of the model and return:
    - scores: Array of shape (N, C) giving classification scores, where
      scores[i, c] is the classification score for X[i] and class c.

    If y is not None, then run a training-time forward and backward pass and
    return a tuple of:
    - loss: Scalar value giving the loss
    - grads: Dictionary with the same keys as self.params, mapping parameter
      names to gradients of the loss with respect to those parameters.
    """

        # Note: types of X, y are mxnet.ndarray
        def train_loss(X, y, W1, W2, b1, b2):
            l1 = affine_relu_forward(X, W1, b1)
            l2 = affine_forward(l1, W2, b2)
            scores = l2

            if y is None:
                return scores

            #[TODO]: softmax is not supported yet
            # loss, d_scores = softmax_loss(scores, y)
            loss = svm_loss(scores, y)
            loss_with_reg = loss + np.sum(W1**2) * 0.5 * self.reg + np.sum(
                W2**2) * 0.5 * self.reg

            return loss_with_reg

        self.params_array = []
        params_list_name = ['W1', 'W2', 'b1', 'b2']
        for param_name in params_list_name:
            self.params_array.append(self.params[param_name])

        X_plain = np.reshape(X, (X.shape[0], -1))
        if y is None:
            return train_loss(X_plain, y, *self.params_array)

        grad_function = grad_and_loss(train_loss, range(2, 6))

        grads_array, loss = grad_function(X_plain, y, *self.params_array)

        grads = {}
        for i in range(len(params_list_name)):
            grads[params_list_name[i]] = grads_array[i]

        return loss, grads
예제 #12
0
  def loss_and_derivative(self, X, y=None):
    """
    Compute loss and gradient for a minibatch of data.

    Inputs:
    - X: Array of input data of shape (N, d_1, ..., d_k)
    - y: Array of labels, of shape (N,). y[i] gives the label for X[i].

    Returns:
    If y is None, then run a test-time forward pass of the model and return:
    - scores: Array of shape (N, C) giving classification scores, where
      scores[i, c] is the classification score for X[i] and class c.

    If y is not None, then run a training-time forward and backward pass and
    return a tuple of:
    - loss: Scalar value giving the loss
    - grads: Dictionary with the same keys as self.params, mapping parameter
      names to gradients of the loss with respect to those parameters.
    """  
    # Note: types of X, y are mxnet.ndarray
    def train_loss(X, y, W1, W2, b1, b2):
      l1 = affine_relu_forward(X, W1, b1)
      l2 = affine_forward(l1, W2, b2)
      scores = l2

      if y is None:
        return scores
   
      #[TODO]: softmax is not supported yet
      # loss, d_scores = softmax_loss(scores, y)
      loss = svm_loss(scores, y)
      loss_with_reg = loss + np.sum(W1 ** 2) * 0.5 * self.reg + np.sum(W2 ** 2) * 0.5 * self.reg

      return loss_with_reg 

    self.params_array = []
    params_list_name = ['W1', 'W2', 'b1', 'b2']
    for param_name in params_list_name:
      self.params_array.append(self.params[param_name])

    X_plain = np.reshape(X, (X.shape[0], -1))
    if y is None:
      return train_loss(X_plain, y, *self.params_array)

    grad_function = grad_and_loss(train_loss, range(2, 6))

    grads_array, loss = grad_function(X_plain, y, *self.params_array)

    grads = {}
    for i in range(len(params_list_name)):
      grads[params_list_name[i]] = grads_array[i]

    return loss, grads
예제 #13
0
 def forward(self, X, mode):
     # Flatten the input data to matrix.
     X = np.reshape(X, (self.batch_size, 784))
     # First affine layer (fully-connected layer).
     y1 = layers.affine(X, self.params['wi'], self.params['bi'])
     # ReLU activation.
     y2 = layers.relu(y1)
     # Hidden layers.
     for i in range(self.num_hidden - 1):
         y2 = layers.affine(y2, self.params['w%d' % i], self.params['b%d' % i])
         y2 = layers.relu(y2)
     # Second affine layer.
     y3 = layers.affine(y2, self.params['wo'], self.params['bo'])
     return y3
예제 #14
0
 def forward(self, X, mode):
     # Flatten the input data to matrix.
     X = np.reshape(X, (batch_size, 3 * 32 * 32))
     # First affine layer (fully-connected layer).
     y1 = layers.affine(X, self.params['w1'], self.params['b1'])
     # ReLU activation.
     y2 = layers.relu(y1)
     # Batch normalization
     y3, self.aux_params['running_mean'], self.aux_params['running_var'] = layers.batchnorm(
         y2, self.params['gamma'], self.params['beta'], running_mean=self.aux_params['running_mean'], \
         running_var=self.aux_params['running_var'])
     # Second affine layer.
     y4 = layers.affine(y3, self.params['w2'], self.params['b2'])
     # Dropout
     y5 = layers.dropout(y4, 0.5, mode=mode)
     return y5
예제 #15
0
 def forward(self, X, mode):
     # Flatten the input data to matrix.
     X = np.reshape(X, (batch_size, 3 * 32 * 32))
     # First affine layer (fully-connected layer).
     y1 = layers.affine(X, self.params['w1'], self.params['b1'])
     # ReLU activation.
     y2 = layers.relu(y1)
     # Batch normalization
     y3, self.aux_params['running_mean'], self.aux_params['running_var'] = layers.batchnorm(
         y2, self.params['gamma'], self.params['beta'], running_mean=self.aux_params['running_mean'], \
         running_var=self.aux_params['running_var'])
     # Second affine layer.
     y4 = layers.affine(y3, self.params['w2'], self.params['b2'])
     # Dropout
     y5 = layers.dropout(y4, 0.5, mode=mode)
     return y5
예제 #16
0
def test_sum_forward():

    np_x = py_np.zeros((2, 10))
    np_w = py_np.zeros((10, 3))
    np_b = py_np.zeros(3)

    x = NumpyVarToMinpy(np_x)
    w = NumpyVarToMinpy(np_w)
    b = NumpyVarToMinpy(np_b)

    x_plain = np.reshape(x, (x.shape[0], -1))
    out0 = np.dot(x_plain, w)
    out = out0 + b

    np_out = MinpyVarToNumpy(out)

    var = py_np.random.randn(2, 3)
    tmp = NumpyVarToMinpy(var)
    sum_tmp = np.sum(tmp, axis=0)

    sum_py = MinpyVarToNumpy(sum_tmp)
def test_sum_forward():

  np_x = py_np.zeros((2, 10))
  np_w = py_np.zeros((10, 3))
  np_b = py_np.zeros(3)

  x = NumpyVarToMinpy(np_x)
  w = NumpyVarToMinpy(np_w)
  b = NumpyVarToMinpy(np_b)

  x_plain = np.reshape(x, (x.shape[0], -1))
  out0 = np.dot(x_plain, w)
  out = out0 + b

  np_out = MinpyVarToNumpy(out)

  var = py_np.random.randn(2, 3)
  tmp = NumpyVarToMinpy(var)
  sum_tmp = np.sum(tmp, axis = 0)

  sum_py = MinpyVarToNumpy(sum_tmp)
예제 #18
0
 def forward(self, inputs, *args):
     shape = (inputs.shape[0], int(np.prod(np.array(inputs.shape[1:]))))
     return np.reshape(inputs, shape)
예제 #19
0
파일: test.py 프로젝트: xlong88/minpy
def loss(w, x):
    prob = predict(w, x)
    return np.reshape(-np.sum(np.log(prob) * t) / 10000, (1, 1))  + 0.5 * w * w
예제 #20
0
def test_numeric():
    # 'newaxis', 'ndarray', 'flatiter', 'nditer', 'nested_iters', 'ufunc',
    # 'arange', 'array', 'zeros', 'count_nonzero', 'empty', 'broadcast',
    # 'dtype', 'fromstring', 'fromfile', 'frombuffer', 'int_asbuffer',
    # 'where', 'argwhere', 'copyto', 'concatenate', 'fastCopyAndTranspose',
    # 'lexsort', 'set_numeric_ops', 'can_cast', 'promote_types',
    # 'min_scalar_type', 'result_type', 'asarray', 'asanyarray',
    # 'ascontiguousarray', 'asfortranarray', 'isfortran', 'empty_like',
    # 'zeros_like', 'ones_like', 'correlate', 'convolve', 'inner', 'dot',
    # 'einsum', 'outer', 'vdot', 'alterdot', 'restoredot', 'roll',
    # 'rollaxis', 'moveaxis', 'cross', 'tensordot', 'array2string',
    # 'get_printoptions', 'set_printoptions', 'array_repr', 'array_str',
    # 'set_string_function', 'little_endian', 'require', 'fromiter',
    # 'array_equal', 'array_equiv', 'indices', 'fromfunction', 'isclose', 'load',
    # 'loads', 'isscalar', 'binary_repr', 'base_repr', 'ones', 'identity',
    # 'allclose', 'compare_chararrays', 'putmask', 'seterr', 'geterr',
    # 'setbufsize', 'getbufsize', 'seterrcall', 'geterrcall', 'errstate',
    # 'flatnonzero', 'Inf', 'inf', 'infty', 'Infinity', 'nan', 'NaN', 'False_',
    # 'True_', 'bitwise_not', 'full', 'full_like', 'matmul'
    x = np.arange(6)
    x = x.reshape((2, 3))
    np.zeros_like(x)
    y = np.arange(3, dtype=np.float)
    np.zeros_like(y)
    np.ones(5)
    np.ones((5,), dtype=np.int)
    np.ones((2, 1))
    s = (2,2)
    np.ones(s)
    x = np.arange(6)
    x = x.reshape((2, 3))
    np.ones_like(x)
    y = np.arange(3, dtype=np.float)
    np.ones_like(y)
    np.full((2, 2), np.inf)
    x = np.arange(6, dtype=np.int)
    np.full_like(x, 1)
    np.full_like(x, 0.1)
    np.full_like(y, 0.1)
    np.count_nonzero(np.eye(4))
    np.count_nonzero([[0,1,7,0,0],[3,0,0,2,19]])
    np.count_nonzero([[0,1,7,0,0],[3,0,0,2,19]], axis=0)
    np.count_nonzero([[0,1,7,0,0],[3,0,0,2,19]], axis=1)
    a = [1, 2]
    np.asarray(a)
    a = np.array([1, 2])
    np.asarray(a) is a
    a = np.array([1, 2], dtype=np.float32)
    np.asarray(a, dtype=np.float32) is a
    np.asarray(a, dtype=np.float64) is a
    np.asarray(a) is a
    np.asanyarray(a) is a
    a = [1, 2]
    np.asanyarray(a)
    np.asanyarray(a) is a
    x = np.arange(6).reshape(2,3)
    np.ascontiguousarray(x, dtype=np.float32)
    x = np.arange(6).reshape(2,3)
    y = np.asfortranarray(x)
    x = np.arange(6).reshape(2,3)
    y = np.require(x, dtype=np.float32, requirements=['A', 'O', 'W', 'F'])
    a = np.array([[1, 2, 3], [4, 5, 6]], order='C')
    np.isfortran(a)
    b = np.array([[1, 2, 3], [4, 5, 6]], order='FORTRAN')
    np.isfortran(b)
    a = np.array([[1, 2, 3], [4, 5, 6]], order='C')
    np.isfortran(a)
    b = a.T
    np.isfortran(b)
    np.isfortran(np.array([1, 2], order='FORTRAN'))
    x = np.arange(6).reshape(2,3)
    np.argwhere(x>1)
    x = np.arange(-2, 3)
    np.flatnonzero(x)
    np.correlate([1, 2, 3], [0, 1, 0.5])
    np.correlate([1, 2, 3], [0, 1, 0.5], "same")
    np.correlate([1, 2, 3], [0, 1, 0.5], "full")
    np.correlate([1+1j, 2, 3-1j], [0, 1, 0.5j], 'full')
    np.correlate([0, 1, 0.5j], [1+1j, 2, 3-1j], 'full')
    np.convolve([1, 2, 3], [0, 1, 0.5])
    np.convolve([1,2,3],[0,1,0.5], 'same')
    np.convolve([1,2,3],[0,1,0.5], 'valid')
    rl = np.outer(np.ones((5,)), np.linspace(-2, 2, 5))
    # im = np.outer(1j*np.linspace(2, -2, 5), np.ones((5,)))
    # grid = rl + im
    x = np.array(['a', 'b', 'c'], dtype=object)
    np.outer(x, [1, 2, 3])
    a = np.arange(60.).reshape(3,4,5)
    b = np.arange(24.).reshape(4,3,2)
    c = np.tensordot(a,b, axes=([1,0],[0,1]))
    c.shape
    # A slower but equivalent way of computing the same...
    d = np.zeros((5,2))
    a = np.array(range(1, 9))
    A = np.array(('a', 'b', 'c', 'd'), dtype=object)
    x = np.arange(10)
    np.roll(x, 2)
    x2 = np.reshape(x, (2,5))
    np.roll(x2, 1)
    np.roll(x2, 1, axis=0)
    np.roll(x2, 1, axis=1)
    a = np.ones((3,4,5,6))
    np.rollaxis(a, 3, 1).shape
    np.rollaxis(a, 2).shape
    np.rollaxis(a, 1, 4).shape
    x = np.zeros((3, 4, 5))
    np.moveaxis(x, 0, -1).shape
    np.moveaxis(x, -1, 0).shape
    np.transpose(x).shape
    np.moveaxis(x, [0, 1], [-1, -2]).shape
    np.moveaxis(x, [0, 1, 2], [-1, -2, -3]).shape
    x = [1, 2, 3]
    y = [4, 5, 6]
    np.cross(x, y)
    x = [1, 2]
    y = [4, 5, 6]
    np.cross(x, y)
    x = [1, 2, 0]
    y = [4, 5, 6]
    np.cross(x, y)
    x = [1,2]
    y = [4,5]
    np.cross(x, y)
    x = np.array([[1,2,3], [4,5,6]])
    y = np.array([[4,5,6], [1,2,3]])
    np.cross(x, y)
    np.cross(x, y, axisc=0)
    x = np.array([[1,2,3], [4,5,6], [7, 8, 9]])
    y = np.array([[7, 8, 9], [4,5,6], [1,2,3]])
    np.cross(x, y)
    np.cross(x, y, axisa=0, axisb=0)
    # np.array_repr(np.array([1,2]))
    # np.array_repr(np.ma.array([0.]))
    # np.array_repr(np.array([], np.int32))
    x = np.array([1e-6, 4e-7, 2, 3])
    # np.array_repr(x, precision=6, suppress_small=True)
    # np.array_str(np.arange(3))
    a = np.arange(10)
    x = np.arange(4)
    np.set_string_function(lambda x:'random', repr=False)
    grid = np.indices((2, 3))
    grid.shape
    grid[0]        # row indices
    grid[1]        # column indices
    x = np.arange(20).reshape(5, 4)
    row, col = np.indices((2, 3))
    x[row, col]
    np.fromfunction(lambda i, j: i == j, (3, 3), dtype=int)
    np.fromfunction(lambda i, j: i + j, (3, 3), dtype=int)
    np.isscalar(3.1)
    np.isscalar([3.1])
    np.isscalar(False)
    # np.binary_repr(3)
    # np.binary_repr(-3)
    # np.binary_repr(3, width=4)
    # np.binary_repr(-3, width=3)
    # np.binary_repr(-3, width=5)
    # np.base_repr(5)
    # np.base_repr(6, 5)
    # np.base_repr(7, base=5, padding=3)
    # np.base_repr(10, base=16)
    # np.base_repr(32, base=16)
    np.identity(3)
    np.allclose([1e10,1e-7], [1.00001e10,1e-8])
    np.allclose([1e10,1e-8], [1.00001e10,1e-9])
    np.allclose([1e10,1e-8], [1.0001e10,1e-9])
    # np.allclose([1.0, np.nan], [1.0, np.nan])
    # np.allclose([1.0, np.nan], [1.0, np.nan], equal_nan=True)
    np.isclose([1e10,1e-7], [1.00001e10,1e-8])
    np.isclose([1e10,1e-8], [1.00001e10,1e-9])
    np.isclose([1e10,1e-8], [1.0001e10,1e-9])
    # np.isclose([1.0, np.nan], [1.0, np.nan])
    # np.isclose([1.0, np.nan], [1.0, np.nan], equal_nan=True)
    np.array_equal([1, 2], [1, 2])
    np.array_equal(np.array([1, 2]), np.array([1, 2]))
    np.array_equal([1, 2], [1, 2, 3])
    np.array_equal([1, 2], [1, 4])
    np.array_equiv([1, 2], [1, 2])
    np.array_equiv([1, 2], [1, 3])
    np.array_equiv([1, 2], [[1, 2], [1, 2]])
    np.array_equiv([1, 2], [[1, 2, 1, 2], [1, 2, 1, 2]])
    np.array_equiv([1, 2], [[1, 2], [1, 3]])
예제 #21
0
    def loss_and_derivative(self, X, y=None):
        """
    Compute loss and gradient for the fully-connected net.

    Input / output: Same as TwoLayerNet above.
    """

        X_plain = np.reshape(X, (X.shape[0], -1))
        mode = 'test' if y is None else 'train'

        if self.dropout_param is not None:
            self.dropout_param['mode'] = mode

        if self.use_batchnorm:
            for bn_param in self.bn_params:
                bn_param['mode'] = mode

        params_array = self.pack_params()

        def train_loss(*args):
            X = args[0]
            y = args[1]

            res = X
            for l in xrange(self.num_layers):
                prev_res = res
                res = affine_forward(prev_res, args[self.w_idx(l)],
                                     args[self.b_idx(l)])

                if l < (self.num_layers - 1):
                    if self.use_batchnorm:
                        res = batchnorm_forward(res, args[self.bn_ga_idx(l)],
                                                args[self.bn_bt_idx(l)],
                                                self.bn_params[l])
                    res = relu_forward(res)
                    if self.use_dropout:
                        res = dropout_forward(res, self.dropout_param)

            scores = res

            if mode == 'test':
                return scores

            #loss, _ = softmax_loss(scores, y)
            loss = svm_loss(scores, y)
            return loss

        if y is None:
            return train_loss(X_plain, y, *params_array)

        grad_function = grad_and_loss(
            train_loss,
            range(self.data_target_cnt,
                  self.data_target_cnt + len(params_array)))
        grads_array, loss = grad_function(X_plain, y, *params_array)

        grads = {}

        for i, grad in enumerate(grads_array):
            grads[self.param_keys[i]] = grad
        return loss, grads
예제 #22
0
def test_fromnumeric():
    # Functions
    # 'alen', 'all', 'alltrue', 'amax', 'amin', 'any', 'argmax',
    # 'argmin', 'argpartition', 'argsort', 'around', 'choose', 'clip',
    # 'compress', 'cumprod', 'cumproduct', 'cumsum', 'diagonal', 'mean',
    # 'ndim', 'nonzero', 'partition', 'prod', 'product', 'ptp', 'put',
    # 'rank', 'ravel', 'repeat', 'reshape', 'resize', 'round_',
    # 'searchsorted', 'shape', 'size', 'sometrue', 'sort', 'squeeze',
    # 'std', 'sum', 'swapaxes', 'take', 'trace', 'transpose', 'var',
    a = [4, 3, 5, 7, 6, 8]
    indices = [0, 1, 4]
    np.take(a, indices)
    a = np.array(a)
    # a[indices]
    np.take(a, [[0, 1], [2, 3]])
    a = np.zeros((10, 2))
    b = a.T
    a = np.arange(6).reshape((3, 2))
    np.reshape(a, (2, 3)) # C-like index ordering
    np.reshape(np.ravel(a), (2, 3)) # equivalent to C ravel then C reshape
    np.reshape(a, (2, 3), order='F') # Fortran-like index ordering
    np.reshape(np.ravel(a, order='F'), (2, 3), order='F')
    a = np.array([[1,2,3], [4,5,6]])
    np.reshape(a, 6)
    np.reshape(a, 6, order='F')
    np.reshape(a, (3,-1))       # the unspecified value is inferred to be 2
    choices = [[0, 1, 2, 3], [10, 11, 12, 13],
               [20, 21, 22, 23], [30, 31, 32, 33]]
    np.choose([2, 3, 1, 0], choices)
    np.choose([2, 4, 1, 0], choices, mode='clip') # 4 goes to 3 (4-1)
    np.choose([2, 4, 1, 0], choices, mode='wrap') # 4 goes to (4 mod 4)
    a = [[1, 0, 1], [0, 1, 0], [1, 0, 1]]
    choices = [-10, 10]
    np.choose(a, choices)
    a = np.array([0, 1]).reshape((2,1,1))
    c1 = np.array([1, 2, 3]).reshape((1,3,1))
    c2 = np.array([-1, -2, -3, -4, -5]).reshape((1,1,5))
    np.choose(a, (c1, c2)) # result is 2x3x5, res[0,:,:]=c1, res[1,:,:]=c2
    np.repeat(3, 4)
    x = np.array([[1,2],[3,4]])
    np.repeat(x, 2)
    np.repeat(x, 3, axis=1)
    np.repeat(x, [1, 2], axis=0)
    a = np.arange(5)
    np.put(a, [0, 2], [-44, -55])
    a = np.arange(5)
    np.put(a, 22, -5, mode='clip')
    x = np.array([[1,2,3]])
    np.swapaxes(x,0,1)
    x = np.array([[[0,1],[2,3]],[[4,5],[6,7]]])
    np.swapaxes(x,0,2)
    x = np.arange(4).reshape((2,2))
    np.transpose(x)
    x = np.ones((1, 2, 3))
    np.transpose(x, (1, 0, 2)).shape
    a = np.array([3, 4, 2, 1])
    np.partition(a, 3)
    np.partition(a, (1, 3))
    x = np.array([3, 4, 2, 1])
    x[np.argpartition(x, 3)]
    x[np.argpartition(x, (1, 3))]
    x = [3, 4, 2, 1]
    np.array(x)[np.argpartition(x, 3)]
    a = np.array([[1,4],[3,1]])
    np.sort(a)                # sort along the last axis
    np.sort(a, axis=None)     # sort the flattened array
    np.sort(a, axis=0)        # sort along the first axis
    dtype = [('name', 'S10'), ('height', float), ('age', int)]
    values = [('Arthur', 1.8, 41), ('Lancelot', 1.9, 38),
              ('Galahad', 1.7, 38)]
    a = np.array(values, dtype=dtype)       # create a structured array
    np.sort(a, order='height')                        # doctest: +SKIP
    np.sort(a, order=['age', 'height'])               # doctest: +SKIP
    x = np.array([3, 1, 2])
    np.argsort(x)
    x = np.array([[0, 3], [2, 2]])
    np.argsort(x, axis=0)
    np.argsort(x, axis=1)
    x = np.array([(1, 0), (0, 1)], dtype=[('x', '<i4'), ('y', '<i4')])
    np.argsort(x, order=('x','y'))
    np.argsort(x, order=('y','x'))
    a = np.arange(6).reshape(2,3)
    np.argmax(a)
    np.argmax(a, axis=0)
    np.argmax(a, axis=1)
    b = np.arange(6)
    b[1] = 5
    np.argmax(b) # Only the first occurrence is returned.
    a = np.arange(6).reshape(2,3)
    np.argmin(a)
    np.argmin(a, axis=0)
    np.argmin(a, axis=1)
    b = np.arange(6)
    b[4] = 0
    np.argmin(b) # Only the first occurrence is returned.
    np.searchsorted([1,2,3,4,5], 3)
    np.searchsorted([1,2,3,4,5], 3, side='right')
    np.searchsorted([1,2,3,4,5], [-10, 10, 2, 3])
    a=np.array([[0,1],[2,3]])
    np.resize(a,(2,3))
    np.resize(a,(1,4))
    np.resize(a,(2,4))
    x = np.array([[[0], [1], [2]]])
    x.shape
    np.squeeze(x).shape
    np.squeeze(x, axis=(2,)).shape
    a = np.arange(4).reshape(2,2)
    a = np.arange(8).reshape(2,2,2); a
    a[:,:,0] # main diagonal is [0 6]
    a[:,:,1] # main diagonal is [1 7]
    np.trace(np.eye(3))
    a = np.arange(8).reshape((2,2,2))
    np.trace(a)
    a = np.arange(24).reshape((2,2,2,3))
    np.trace(a).shape
    x = np.array([[1, 2, 3], [4, 5, 6]])
    np.ravel(x)
    x.reshape(-1)
    np.ravel(x, order='F')
    np.ravel(x.T)
    np.ravel(x.T, order='A')
    a = np.arange(3)[::-1]; a
    # a = np.arange(12).reshape(2,3,2).swapaxes(1,2); a
    x = np.eye(3)
    np.nonzero(x)
    x[np.nonzero(x)]
    np.transpose(np.nonzero(x))
    a = np.array([[1,2,3],[4,5,6],[7,8,9]])
    a > 3
    np.nonzero(a > 3)
    np.shape(np.eye(3))
    np.shape([[1, 2]])
    np.shape([0])
    np.shape(0)
    a = np.array([(1, 2), (3, 4)], dtype=[('x', 'i4'), ('y', 'i4')])
    np.shape(a)
    a.shape
    a = np.array([[1, 2], [3, 4], [5, 6]])
    np.compress([0, 1], a, axis=0)
    np.compress([False, True, True], a, axis=0)
    np.compress([False, True], a, axis=1)
    np.compress([False, True], a)
    a = np.arange(10)
    np.clip(a, 1, 8)
    np.clip(a, 3, 6, out=a)
    a = np.arange(10)
    np.clip(a, [3,4,1,1,1,4,4,4,4,4], 8)
    np.sum([])
    np.sum([0.5, 1.5])
    np.sum([0.5, 0.7, 0.2, 1.5], dtype=np.int32)
    np.sum([[0, 1], [0, 5]])
    np.sum([[0, 1], [0, 5]], axis=0)
    np.sum([[0, 1], [0, 5]], axis=1)
    # np.ones(128, dtype=np.int8).sum(dtype=np.int8)
    # np.any([[True, False], [True, True]])
    # np.any([[True, False], [False, False]], axis=0)
    # np.any([-1, 0, 5])
    # np.any(np.nan)
    # np.all([[True,False],[True,True]])
    # np.all([[True,False],[True,True]], axis=0)
    # np.all([-1, 4, 5])
    # np.all([1.0, np.nan])
    a = np.array([[1,2,3], [4,5,6]])
    np.cumsum(a)
    np.cumsum(a, dtype=float)     # specifies type of output value(s)
    np.cumsum(a,axis=0)      # sum over rows for each of the 3 columns
    np.cumsum(a,axis=1)      # sum over columns for each of the 2 rows
    x = np.arange(4).reshape((2,2))
    np.ptp(x, axis=0)
    np.ptp(x, axis=1)
    a = np.arange(4).reshape((2,2))
    np.amax(a)           # Maximum of the flattened array
    np.amax(a, axis=0)   # Maxima along the first axis
    np.amax(a, axis=1)   # Maxima along the second axis
    b = np.arange(5, dtype=np.float)
    # b[2] = np.NaN
    np.amax(b)
    np.nanmax(b)
    a = np.arange(4).reshape((2,2))
    np.amin(a)           # Minimum of the flattened array
    np.amin(a, axis=0)   # Minima along the first axis
    np.amin(a, axis=1)   # Minima along the second axis
    b = np.arange(5, dtype=np.float)
    # b[2] = np.NaN
    np.amin(b)
    np.nanmin(b)
    a = np.zeros((7,4,5))
    a.shape[0]
    np.alen(a)
    x = np.array([536870910, 536870910, 536870910, 536870910])
    np.prod(x) #random
    np.prod([])
    np.prod([1.,2.])
    np.prod([[1.,2.],[3.,4.]])
    np.prod([[1.,2.],[3.,4.]], axis=1)
    x = np.array([1, 2, 3], dtype=np.uint8)
    # np.prod(x).dtype == np.uint
    x = np.array([1, 2, 3], dtype=np.int8)
    # np.prod(x).dtype == np.int
    a = np.array([1,2,3])
    np.cumprod(a) # intermediate results 1, 1*2
    a = np.array([[1, 2, 3], [4, 5, 6]])
    np.cumprod(a, dtype=float) # specify type of output
    np.cumprod(a, axis=0)
    np.cumprod(a,axis=1)
    np.ndim([[1,2,3],[4,5,6]])
    np.ndim(np.array([[1,2,3],[4,5,6]]))
    np.ndim(1)
    a = np.array([[1,2,3],[4,5,6]])
    np.size(a)
    np.size(a,1)
    np.size(a,0)
    np.around([0.37, 1.64])
    np.around([0.37, 1.64], decimals=1)
    np.around([.5, 1.5, 2.5, 3.5, 4.5]) # rounds to nearest even value
    np.around([1,2,3,11], decimals=1) # ndarray of ints is returned
    np.around([1,2,3,11], decimals=-1)
    a = np.array([[1, 2], [3, 4]])
    np.mean(a)
    np.mean(a, axis=0)
    np.mean(a, axis=1)
    a = np.zeros((2, 512*512), dtype=np.float32)
    a[0, :] = 1.0
    a[1, :] = 0.1
    np.mean(a)
    np.mean(a, dtype=np.float64)
    a = np.array([[1, 2], [3, 4]])
    np.std(a)
    np.std(a, axis=0)
    np.std(a, axis=1)
    a = np.zeros((2, 512*512), dtype=np.float32)
    a[0, :] = 1.0
    a[1, :] = 0.1
    np.std(a)
    np.std(a, dtype=np.float64)
    a = np.array([[1, 2], [3, 4]])
    np.var(a)
    np.var(a, axis=0)
    np.var(a, axis=1)
    a = np.zeros((2, 512*512), dtype=np.float32)
    a[0, :] = 1.0
    a[1, :] = 0.1
    np.var(a)
    np.var(a, dtype=np.float64)
예제 #23
0
파일: lstm.py 프로젝트: pombredanne/minpy
 def get(self, vect, name):
     idxs, shape = self.idxs_and_shapes[name]
     return np.reshape(vect[idxs], shape)
예제 #24
0
def test_fromnumeric():
    # Functions
    # 'alen', 'all', 'alltrue', 'amax', 'amin', 'any', 'argmax',
    # 'argmin', 'argpartition', 'argsort', 'around', 'choose', 'clip',
    # 'compress', 'cumprod', 'cumproduct', 'cumsum', 'diagonal', 'mean',
    # 'ndim', 'nonzero', 'partition', 'prod', 'product', 'ptp', 'put',
    # 'rank', 'ravel', 'repeat', 'reshape', 'resize', 'round_',
    # 'searchsorted', 'shape', 'size', 'sometrue', 'sort', 'squeeze',
    # 'std', 'sum', 'swapaxes', 'take', 'trace', 'transpose', 'var',
    a = [4, 3, 5, 7, 6, 8]
    indices = [0, 1, 4]
    np.take(a, indices)
    a = np.array(a)
    # a[indices]
    np.take(a, [[0, 1], [2, 3]])
    a = np.zeros((10, 2))
    b = a.T
    a = np.arange(6).reshape((3, 2))
    np.reshape(a, (2, 3))  # C-like index ordering
    np.reshape(np.ravel(a), (2, 3))  # equivalent to C ravel then C reshape
    np.reshape(a, (2, 3), order='F')  # Fortran-like index ordering
    np.reshape(np.ravel(a, order='F'), (2, 3), order='F')
    a = np.array([[1, 2, 3], [4, 5, 6]])
    np.reshape(a, 6)
    np.reshape(a, 6, order='F')
    np.reshape(a, (3, -1))  # the unspecified value is inferred to be 2
    choices = [[0, 1, 2, 3], [10, 11, 12, 13], [20, 21, 22, 23],
               [30, 31, 32, 33]]
    np.choose([2, 3, 1, 0], choices)
    np.choose([2, 4, 1, 0], choices, mode='clip')  # 4 goes to 3 (4-1)
    np.choose([2, 4, 1, 0], choices, mode='wrap')  # 4 goes to (4 mod 4)
    a = [[1, 0, 1], [0, 1, 0], [1, 0, 1]]
    choices = [-10, 10]
    np.choose(a, choices)
    a = np.array([0, 1]).reshape((2, 1, 1))
    c1 = np.array([1, 2, 3]).reshape((1, 3, 1))
    c2 = np.array([-1, -2, -3, -4, -5]).reshape((1, 1, 5))
    np.choose(a, (c1, c2))  # result is 2x3x5, res[0,:,:]=c1, res[1,:,:]=c2
    np.repeat(3, 4)
    x = np.array([[1, 2], [3, 4]])
    np.repeat(x, 2)
    np.repeat(x, 3, axis=1)
    np.repeat(x, [1, 2], axis=0)
    a = np.arange(5)
    np.put(a, [0, 2], [-44, -55])
    a = np.arange(5)
    np.put(a, 22, -5, mode='clip')
    x = np.array([[1, 2, 3]])
    np.swapaxes(x, 0, 1)
    x = np.array([[[0, 1], [2, 3]], [[4, 5], [6, 7]]])
    np.swapaxes(x, 0, 2)
    x = np.arange(4).reshape((2, 2))
    np.transpose(x)
    x = np.ones((1, 2, 3))
    np.transpose(x, (1, 0, 2)).shape
    a = np.array([3, 4, 2, 1])
    np.partition(a, 3)
    np.partition(a, (1, 3))
    x = np.array([3, 4, 2, 1])
    x[np.argpartition(x, 3)]
    x[np.argpartition(x, (1, 3))]
    x = [3, 4, 2, 1]
    np.array(x)[np.argpartition(x, 3)]
    a = np.array([[1, 4], [3, 1]])
    np.sort(a)  # sort along the last axis
    np.sort(a, axis=None)  # sort the flattened array
    np.sort(a, axis=0)  # sort along the first axis
    dtype = [('name', 'S10'), ('height', float), ('age', int)]
    values = [('Arthur', 1.8, 41), ('Lancelot', 1.9, 38), ('Galahad', 1.7, 38)]
    a = np.array(values, dtype=dtype)  # create a structured array
    np.sort(a, order='height')  # doctest: +SKIP
    np.sort(a, order=['age', 'height'])  # doctest: +SKIP
    x = np.array([3, 1, 2])
    np.argsort(x)
    x = np.array([[0, 3], [2, 2]])
    np.argsort(x, axis=0)
    np.argsort(x, axis=1)
    x = np.array([(1, 0), (0, 1)], dtype=[('x', '<i4'), ('y', '<i4')])
    np.argsort(x, order=('x', 'y'))
    np.argsort(x, order=('y', 'x'))
    a = np.arange(6).reshape(2, 3)
    np.argmax(a)
    np.argmax(a, axis=0)
    np.argmax(a, axis=1)
    b = np.arange(6)
    b[1] = 5
    np.argmax(b)  # Only the first occurrence is returned.
    a = np.arange(6).reshape(2, 3)
    np.argmin(a)
    np.argmin(a, axis=0)
    np.argmin(a, axis=1)
    b = np.arange(6)
    b[4] = 0
    np.argmin(b)  # Only the first occurrence is returned.
    np.searchsorted([1, 2, 3, 4, 5], 3)
    np.searchsorted([1, 2, 3, 4, 5], 3, side='right')
    np.searchsorted([1, 2, 3, 4, 5], [-10, 10, 2, 3])
    a = np.array([[0, 1], [2, 3]])
    np.resize(a, (2, 3))
    np.resize(a, (1, 4))
    np.resize(a, (2, 4))
    x = np.array([[[0], [1], [2]]])
    x.shape
    np.squeeze(x).shape
    np.squeeze(x, axis=(2, )).shape
    a = np.arange(4).reshape(2, 2)
    a = np.arange(8).reshape(2, 2, 2)
    a
    a[:, :, 0]  # main diagonal is [0 6]
    a[:, :, 1]  # main diagonal is [1 7]
    np.trace(np.eye(3))
    a = np.arange(8).reshape((2, 2, 2))
    np.trace(a)
    a = np.arange(24).reshape((2, 2, 2, 3))
    np.trace(a).shape
    x = np.array([[1, 2, 3], [4, 5, 6]])
    np.ravel(x)
    x.reshape(-1)
    np.ravel(x, order='F')
    np.ravel(x.T)
    np.ravel(x.T, order='A')
    a = np.arange(3)[::-1]
    a
    # a = np.arange(12).reshape(2,3,2).swapaxes(1,2); a
    x = np.eye(3)
    np.nonzero(x)
    x[np.nonzero(x)]
    np.transpose(np.nonzero(x))
    a = np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]])
    a > 3
    np.nonzero(a > 3)
    np.shape(np.eye(3))
    np.shape([[1, 2]])
    np.shape([0])
    np.shape(0)
    a = np.array([(1, 2), (3, 4)], dtype=[('x', 'i4'), ('y', 'i4')])
    np.shape(a)
    a.shape
    a = np.array([[1, 2], [3, 4], [5, 6]])
    np.compress([0, 1], a, axis=0)
    np.compress([False, True, True], a, axis=0)
    np.compress([False, True], a, axis=1)
    np.compress([False, True], a)
    a = np.arange(10)
    np.clip(a, 1, 8)
    np.clip(a, 3, 6, out=a)
    a = np.arange(10)
    np.clip(a, [3, 4, 1, 1, 1, 4, 4, 4, 4, 4], 8)
    np.sum([])
    np.sum([0.5, 1.5])
    np.sum([0.5, 0.7, 0.2, 1.5], dtype=np.int32)
    np.sum([[0, 1], [0, 5]])
    np.sum([[0, 1], [0, 5]], axis=0)
    np.sum([[0, 1], [0, 5]], axis=1)
    # np.ones(128, dtype=np.int8).sum(dtype=np.int8)
    # np.any([[True, False], [True, True]])
    # np.any([[True, False], [False, False]], axis=0)
    # np.any([-1, 0, 5])
    # np.any(np.nan)
    # np.all([[True,False],[True,True]])
    # np.all([[True,False],[True,True]], axis=0)
    # np.all([-1, 4, 5])
    # np.all([1.0, np.nan])
    a = np.array([[1, 2, 3], [4, 5, 6]])
    np.cumsum(a)
    np.cumsum(a, dtype=float)  # specifies type of output value(s)
    np.cumsum(a, axis=0)  # sum over rows for each of the 3 columns
    np.cumsum(a, axis=1)  # sum over columns for each of the 2 rows
    x = np.arange(4).reshape((2, 2))
    np.ptp(x, axis=0)
    np.ptp(x, axis=1)
    a = np.arange(4).reshape((2, 2))
    np.amax(a)  # Maximum of the flattened array
    np.amax(a, axis=0)  # Maxima along the first axis
    np.amax(a, axis=1)  # Maxima along the second axis
    b = np.arange(5, dtype=np.float)
    # b[2] = np.NaN
    np.amax(b)
    np.nanmax(b)
    a = np.arange(4).reshape((2, 2))
    np.amin(a)  # Minimum of the flattened array
    np.amin(a, axis=0)  # Minima along the first axis
    np.amin(a, axis=1)  # Minima along the second axis
    b = np.arange(5, dtype=np.float)
    # b[2] = np.NaN
    np.amin(b)
    np.nanmin(b)
    a = np.zeros((7, 4, 5))
    a.shape[0]
    np.alen(a)
    x = np.array([536870910, 536870910, 536870910, 536870910])
    np.prod(x)  #random
    np.prod([])
    np.prod([1., 2.])
    np.prod([[1., 2.], [3., 4.]])
    np.prod([[1., 2.], [3., 4.]], axis=1)
    x = np.array([1, 2, 3], dtype=np.uint8)
    # np.prod(x).dtype == np.uint
    x = np.array([1, 2, 3], dtype=np.int8)
    # np.prod(x).dtype == np.int
    a = np.array([1, 2, 3])
    np.cumprod(a)  # intermediate results 1, 1*2
    a = np.array([[1, 2, 3], [4, 5, 6]])
    np.cumprod(a, dtype=float)  # specify type of output
    np.cumprod(a, axis=0)
    np.cumprod(a, axis=1)
    np.ndim([[1, 2, 3], [4, 5, 6]])
    np.ndim(np.array([[1, 2, 3], [4, 5, 6]]))
    np.ndim(1)
    a = np.array([[1, 2, 3], [4, 5, 6]])
    np.size(a)
    np.size(a, 1)
    np.size(a, 0)
    np.around([0.37, 1.64])
    np.around([0.37, 1.64], decimals=1)
    np.around([.5, 1.5, 2.5, 3.5, 4.5])  # rounds to nearest even value
    np.around([1, 2, 3, 11], decimals=1)  # ndarray of ints is returned
    np.around([1, 2, 3, 11], decimals=-1)
    a = np.array([[1, 2], [3, 4]])
    np.mean(a)
    np.mean(a, axis=0)
    np.mean(a, axis=1)
    a = np.zeros((2, 512 * 512), dtype=np.float32)
    a[0, :] = 1.0
    a[1, :] = 0.1
    np.mean(a)
    np.mean(a, dtype=np.float64)
    a = np.array([[1, 2], [3, 4]])
    np.std(a)
    np.std(a, axis=0)
    np.std(a, axis=1)
    a = np.zeros((2, 512 * 512), dtype=np.float32)
    a[0, :] = 1.0
    a[1, :] = 0.1
    np.std(a)
    np.std(a, dtype=np.float64)
    a = np.array([[1, 2], [3, 4]])
    np.var(a)
    np.var(a, axis=0)
    np.var(a, axis=1)
    a = np.zeros((2, 512 * 512), dtype=np.float32)
    a[0, :] = 1.0
    a[1, :] = 0.1
    np.var(a)
    np.var(a, dtype=np.float64)
예제 #25
0
 def forward(self, inputs, *args):
     shape = tuple([inputs.shape[0]] + list(self.shape))
     return np.reshape(inputs, shape)
예제 #26
0
 def set(self, vect, name, new):
     """Set the value of name in vect as new and return new array
     """
     idxs, shape = self.idxs_and_shapes[name]
     vect[idxs] = np.reshape(new, -1)
     return vect
예제 #27
0
 def forward(self, inputs, *args):
     shape = tuple([inputs.shape[0]] + list(self.shape))
     return np.reshape(inputs, shape)
예제 #28
0
def test_numeric():
    # 'newaxis', 'ndarray', 'flatiter', 'nditer', 'nested_iters', 'ufunc',
    # 'arange', 'array', 'zeros', 'count_nonzero', 'empty', 'broadcast',
    # 'dtype', 'fromstring', 'fromfile', 'frombuffer', 'int_asbuffer',
    # 'where', 'argwhere', 'copyto', 'concatenate', 'fastCopyAndTranspose',
    # 'lexsort', 'set_numeric_ops', 'can_cast', 'promote_types',
    # 'min_scalar_type', 'result_type', 'asarray', 'asanyarray',
    # 'ascontiguousarray', 'asfortranarray', 'isfortran', 'empty_like',
    # 'zeros_like', 'ones_like', 'correlate', 'convolve', 'inner', 'dot',
    # 'einsum', 'outer', 'vdot', 'alterdot', 'restoredot', 'roll',
    # 'rollaxis', 'moveaxis', 'cross', 'tensordot', 'array2string',
    # 'get_printoptions', 'set_printoptions', 'array_repr', 'array_str',
    # 'set_string_function', 'little_endian', 'require', 'fromiter',
    # 'array_equal', 'array_equiv', 'indices', 'fromfunction', 'isclose', 'load',
    # 'loads', 'isscalar', 'binary_repr', 'base_repr', 'ones', 'identity',
    # 'allclose', 'compare_chararrays', 'putmask', 'seterr', 'geterr',
    # 'setbufsize', 'getbufsize', 'seterrcall', 'geterrcall', 'errstate',
    # 'flatnonzero', 'Inf', 'inf', 'infty', 'Infinity', 'nan', 'NaN', 'False_',
    # 'True_', 'bitwise_not', 'full', 'full_like', 'matmul'
    x = np.arange(6)
    x = x.reshape((2, 3))
    np.zeros_like(x)
    y = np.arange(3, dtype=np.float)
    np.zeros_like(y)
    np.ones(5)
    np.ones((5, ), dtype=np.int)
    np.ones((2, 1))
    s = (2, 2)
    np.ones(s)
    x = np.arange(6)
    x = x.reshape((2, 3))
    np.ones_like(x)
    y = np.arange(3, dtype=np.float)
    np.ones_like(y)
    np.full((2, 2), np.inf)
    x = np.arange(6, dtype=np.int)
    np.full_like(x, 1)
    np.full_like(x, 0.1)
    np.full_like(y, 0.1)
    np.count_nonzero(np.eye(4))
    np.count_nonzero([[0, 1, 7, 0, 0], [3, 0, 0, 2, 19]])
    np.count_nonzero([[0, 1, 7, 0, 0], [3, 0, 0, 2, 19]], axis=0)
    np.count_nonzero([[0, 1, 7, 0, 0], [3, 0, 0, 2, 19]], axis=1)
    a = [1, 2]
    np.asarray(a)
    a = np.array([1, 2])
    np.asarray(a) is a
    a = np.array([1, 2], dtype=np.float32)
    np.asarray(a, dtype=np.float32) is a
    np.asarray(a, dtype=np.float64) is a
    np.asarray(a) is a
    np.asanyarray(a) is a
    a = [1, 2]
    np.asanyarray(a)
    np.asanyarray(a) is a
    x = np.arange(6).reshape(2, 3)
    np.ascontiguousarray(x, dtype=np.float32)
    x = np.arange(6).reshape(2, 3)
    y = np.asfortranarray(x)
    x = np.arange(6).reshape(2, 3)
    y = np.require(x, dtype=np.float32, requirements=['A', 'O', 'W', 'F'])
    a = np.array([[1, 2, 3], [4, 5, 6]], order='C')
    np.isfortran(a)
    b = np.array([[1, 2, 3], [4, 5, 6]], order='FORTRAN')
    np.isfortran(b)
    a = np.array([[1, 2, 3], [4, 5, 6]], order='C')
    np.isfortran(a)
    b = a.T
    np.isfortran(b)
    np.isfortran(np.array([1, 2], order='FORTRAN'))
    x = np.arange(6).reshape(2, 3)
    np.argwhere(x > 1)
    x = np.arange(-2, 3)
    np.flatnonzero(x)
    np.correlate([1, 2, 3], [0, 1, 0.5])
    np.correlate([1, 2, 3], [0, 1, 0.5], "same")
    np.correlate([1, 2, 3], [0, 1, 0.5], "full")
    np.correlate([1 + 1j, 2, 3 - 1j], [0, 1, 0.5j], 'full')
    np.correlate([0, 1, 0.5j], [1 + 1j, 2, 3 - 1j], 'full')
    np.convolve([1, 2, 3], [0, 1, 0.5])
    np.convolve([1, 2, 3], [0, 1, 0.5], 'same')
    np.convolve([1, 2, 3], [0, 1, 0.5], 'valid')
    rl = np.outer(np.ones((5, )), np.linspace(-2, 2, 5))
    # im = np.outer(1j*np.linspace(2, -2, 5), np.ones((5,)))
    # grid = rl + im
    x = np.array(['a', 'b', 'c'], dtype=object)
    np.outer(x, [1, 2, 3])
    a = np.arange(60.).reshape(3, 4, 5)
    b = np.arange(24.).reshape(4, 3, 2)
    c = np.tensordot(a, b, axes=([1, 0], [0, 1]))
    c.shape
    # A slower but equivalent way of computing the same...
    d = np.zeros((5, 2))
    a = np.array(range(1, 9))
    A = np.array(('a', 'b', 'c', 'd'), dtype=object)
    x = np.arange(10)
    np.roll(x, 2)
    x2 = np.reshape(x, (2, 5))
    np.roll(x2, 1)
    np.roll(x2, 1, axis=0)
    np.roll(x2, 1, axis=1)
    a = np.ones((3, 4, 5, 6))
    np.rollaxis(a, 3, 1).shape
    np.rollaxis(a, 2).shape
    np.rollaxis(a, 1, 4).shape
    x = np.zeros((3, 4, 5))
    np.moveaxis(x, 0, -1).shape
    np.moveaxis(x, -1, 0).shape
    np.transpose(x).shape
    np.moveaxis(x, [0, 1], [-1, -2]).shape
    np.moveaxis(x, [0, 1, 2], [-1, -2, -3]).shape
    x = [1, 2, 3]
    y = [4, 5, 6]
    np.cross(x, y)
    x = [1, 2]
    y = [4, 5, 6]
    np.cross(x, y)
    x = [1, 2, 0]
    y = [4, 5, 6]
    np.cross(x, y)
    x = [1, 2]
    y = [4, 5]
    np.cross(x, y)
    x = np.array([[1, 2, 3], [4, 5, 6]])
    y = np.array([[4, 5, 6], [1, 2, 3]])
    np.cross(x, y)
    np.cross(x, y, axisc=0)
    x = np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]])
    y = np.array([[7, 8, 9], [4, 5, 6], [1, 2, 3]])
    np.cross(x, y)
    np.cross(x, y, axisa=0, axisb=0)
    # np.array_repr(np.array([1,2]))
    # np.array_repr(np.ma.array([0.]))
    # np.array_repr(np.array([], np.int32))
    x = np.array([1e-6, 4e-7, 2, 3])
    # np.array_repr(x, precision=6, suppress_small=True)
    # np.array_str(np.arange(3))
    a = np.arange(10)
    x = np.arange(4)
    np.set_string_function(lambda x: 'random', repr=False)
    grid = np.indices((2, 3))
    grid.shape
    grid[0]  # row indices
    grid[1]  # column indices
    x = np.arange(20).reshape(5, 4)
    row, col = np.indices((2, 3))
    x[row, col]
    np.fromfunction(lambda i, j: i == j, (3, 3), dtype=int)
    np.fromfunction(lambda i, j: i + j, (3, 3), dtype=int)
    np.isscalar(3.1)
    np.isscalar([3.1])
    np.isscalar(False)
    # np.binary_repr(3)
    # np.binary_repr(-3)
    # np.binary_repr(3, width=4)
    # np.binary_repr(-3, width=3)
    # np.binary_repr(-3, width=5)
    # np.base_repr(5)
    # np.base_repr(6, 5)
    # np.base_repr(7, base=5, padding=3)
    # np.base_repr(10, base=16)
    # np.base_repr(32, base=16)
    np.identity(3)
    np.allclose([1e10, 1e-7], [1.00001e10, 1e-8])
    np.allclose([1e10, 1e-8], [1.00001e10, 1e-9])
    np.allclose([1e10, 1e-8], [1.0001e10, 1e-9])
    # np.allclose([1.0, np.nan], [1.0, np.nan])
    # np.allclose([1.0, np.nan], [1.0, np.nan], equal_nan=True)
    np.isclose([1e10, 1e-7], [1.00001e10, 1e-8])
    np.isclose([1e10, 1e-8], [1.00001e10, 1e-9])
    np.isclose([1e10, 1e-8], [1.0001e10, 1e-9])
    # np.isclose([1.0, np.nan], [1.0, np.nan])
    # np.isclose([1.0, np.nan], [1.0, np.nan], equal_nan=True)
    np.array_equal([1, 2], [1, 2])
    np.array_equal(np.array([1, 2]), np.array([1, 2]))
    np.array_equal([1, 2], [1, 2, 3])
    np.array_equal([1, 2], [1, 4])
    np.array_equiv([1, 2], [1, 2])
    np.array_equiv([1, 2], [1, 3])
    np.array_equiv([1, 2], [[1, 2], [1, 2]])
    np.array_equiv([1, 2], [[1, 2, 1, 2], [1, 2, 1, 2]])
    np.array_equiv([1, 2], [[1, 2], [1, 3]])
예제 #29
0
 def get(self, vect, name):
     """Return the values of the variable with name in vect
     with original shape"""
     idxs, shape = self.idxs_and_shapes[name]
     return np.reshape(vect[idxs], shape)  # reshape to original shape
예제 #30
0
 def forward(self, inputs, *args):
     shape = (inputs.shape[0], int(np.prod(np.array(inputs.shape[1:]))))
     return np.reshape(inputs, shape)