def svm_loss(x, y, mode):
  """
  Computes the loss and gradient using for multiclass SVM classification.

  Inputs:
  - x: Input data, of shape (N, C) where x[i, j] is the score for the jth class
    for the ith input.
  - y: Vector of labels, of shape (N,) where y[i] is the label for x[i] and
    0 <= y[i] < C

  Returns a tuple of:
  - loss: Scalar giving the loss
  - dx: Gradient of the loss with respect to x
  """
  if mode == 'cpu':
    np.set_policy(policy.OnlyNumpyPolicy())
  else:
    np.set_policy(policy.PreferMXNetPolicy())

  N = x.shape[0]
  correct_class_scores = x[np.arange(N), y]
  
  #margins = np.maximum(0, x - correct_class_scores[:, np.newaxis] + 1.0)
  margins = np.maximum(0, x - np.expand_dims(correct_class_scores, axis = 1) + 1.0)

  margins[np.arange(N), y] = 0
  loss = np.sum(margins) / N
  num_pos = np.sum(margins > 0, axis=1)
  dx = np.zeros_like(x)
  dx[margins > 0] = 1
  dx[np.arange(N), y] -= num_pos
  dx /= N

  return loss, dx
Beispiel #2
0
def test_policy_2():
    with minpy.OnlyNumPyPolicy():
        print(np.policy)
        print(np.random.policy)
    np.set_policy(minpy.PreferMXNetPolicy())
    set_context(cpu())
    print(np.policy)
    print(np.random.policy)
Beispiel #3
0
def test_policy_2():
    with minpy.OnlyNumPyPolicy():
        print(np.policy)
        print(np.random.policy)
    np.set_policy(minpy.PreferMXNetPolicy())
    set_context(cpu())
    print(np.policy)
    print(np.random.policy)
Beispiel #4
0
def svm_loss(x, y, mode):
    """
  Computes the loss and gradient using for multiclass SVM classification.

  Inputs:
  - x: Input data, of shape (N, C) where x[i, j] is the score for the jth class
    for the ith input.
  - y: Vector of labels, of shape (N,) where y[i] is the label for x[i] and
    0 <= y[i] < C

  Returns a tuple of:
  - loss: Scalar giving the loss
  - dx: Gradient of the loss with respect to x
  """
    if mode == 'cpu':
        np.set_policy(policy.OnlyNumpyPolicy())
    else:
        np.set_policy(policy.PreferMXNetPolicy())

    N = x.shape[0]
    correct_class_scores = x[np.arange(N), y]

    #TODO: Support broadcast case: (X,) (X, Y)
    #margins = np.maximum(0, x - correct_class_scores + 1.0)
    margins = np.transpose(
        np.maximum(0,
                   np.transpose(x) - np.transpose(correct_class_scores) + 1.0))

    #margins[np.arange(N), y] = 0
    #loss = np.sum(margins) / N
    loss = (np.sum(margins) - np.sum(margins[np.arange(N), y])) / N
    margins[np.arange(N), y] = 0

    num_pos = np.sum(margins > 0, axis=1)
    dx = np.zeros_like(x)
    dx[margins > 0] = 1
    dx[np.arange(N), y] -= num_pos
    dx /= N

    return loss, dx
Beispiel #5
0
"""Simple multi-layer perception neural network on MNIST."""
import argparse
import os.path
import struct
import numpy as real_numpy

import minpy.numpy as np
from minpy.nn import io
from minpy.nn import layers
import minpy.nn.model
import minpy.nn.solver
import minpy.dispatch.policy
np.set_policy(minpy.dispatch.policy.OnlyNumPyPolicy())

# import logging
# logging.getLogger('minpy.array').setLevel(logging.DEBUG)
# logging.getLogger('minpy.core').setLevel(logging.DEBUG)
# logging.getLogger('minpy.primitive').setLevel(logging.DEBUG)

batch_size = 256
flattened_input_size = 784
hidden_size = 256
num_classes = 10


class TwoLayerNet(minpy.nn.model.ModelBase):
    def __init__(self):
        super(TwoLayerNet, self).__init__()
        self.add_param(name='w1', shape=(flattened_input_size, hidden_size)) \
            .add_param(name='b1', shape=(hidden_size,)) \
            .add_param(name='w2', shape=(hidden_size, num_classes)) \
Beispiel #6
0
import minpy
import minpy.numpy as np
import minpy.core
import minpy.array
from minpy.array_variants import ArrayType
import minpy.dispatch.policy as policy
import minpy.numpy.random as random

np.set_policy(policy.OnlyNumpyPolicy())
#np.set_policy(policy.PreferMXNetPolicy())


def affine_forward(x, w, b):
    """
  Computes the forward pass for an affine (fully-connected) layer.

  The input x has shape (N, d_1, ..., d_k) and contains a minibatch of N
  examples, where each example x[i] has shape (d_1, ..., d_k). We will
  reshape each input into a vector of dimension D = d_1 * ... * d_k, and
  then transform it to an output vector of dimension M.

  Inputs:
  - x: A numpy array containing input data, of shape (N, d_1, ..., d_k)
  - w: A numpy array of weights, of shape (D, M)
  - b: A numpy array of biases, of shape (M,)
  
  Returns a tuple of:
  - out: output, of shape (N, M)
  - cache: (x, w, b)
  """
Beispiel #7
0
import logging
import minpy.numpy as np
import minpy.numpy.random as random
from minpy import core
from minpy.dispatch import policy

logging.getLogger('minpy.array').setLevel(logging.DEBUG)
logging.getLogger('minpy.core').setLevel(logging.DEBUG)
logging.getLogger('minpy.primitive').setLevel(logging.DEBUG)
np.set_policy(policy.OnlyNumPyPolicy())


def f(x, y):
    return np.multiply(x, y)


def main():
    x = random.rand(3, 3)
    y = random.rand(3, 3)
    print('x: {}'.format(x.asnumpy()))
    print('y: {}'.format(y.asnumpy()))
    g = core.grad(f, argnum=[0, 1])
    gr = g(x, y)
    print('grad_x: {}'.format(gr[0].asnumpy()))
    print('grad_y: {}'.format(gr[1].asnumpy()))


if __name__ == '__main__':
    main()