コード例 #1
0
def softmax_loss(x, y):
  """
  Computes the loss and gradient for softmax classification.

  Inputs:
  - x: Input data, of shape (N, C) where x[i, j] is the score for the jth class
    for the ith input.
  - y: Vector of labels, of shape (N,) where y[i] is the label for x[i] and
    0 <= y[i] < C

  Returns a tuple of:
  - loss: Scalar giving the loss
  - dx: Gradient of the loss with respect to x
  """
  #np.expand_dims(correct_class_scores, axis = 1)
  #probs = np.exp(x - np.max(x, axis=1, keepdims=True))
  #print "x.shape", x.shape

  #Somehow Buggy. Max doesn't work.
  probs = np.exp(x - np.expand_dims(np.max(x, axis=1), axis = 1))
  probs /= np.expand_dims(np.sum(probs, axis=1), axis = 1)
  N = x.shape[0]
  loss = -np.sum(np.log(probs[np.arange(N), y])) / N

  dx = probs.copy()
  dx[np.arange(N), y] -= 1
  dx /= N

  return loss, dx
コード例 #2
0
def softmax_loss(x, y):
    """
  Computes the loss and gradient for softmax classification.

  Inputs:
  - x: Input data, of shape (N, C) where x[i, j] is the score for the jth class
    for the ith input.
  - y: Vector of labels, of shape (N,) where y[i] is the label for x[i] and
    0 <= y[i] < C

  Returns a tuple of:
  - loss: Scalar giving the loss
  - dx: Gradient of the loss with respect to x
  """
    #np.expand_dims(correct_class_scores, axis = 1)
    #probs = np.exp(x - np.max(x, axis=1, keepdims=True))
    #print "x.shape", x.shape

    #Somehow Buggy. Max doesn't work.
    probs = np.exp(x - np.max(x, axis=1))
    #probs /= np.expand_dims(np.sum(probs, axis=1), axis = 1)
    probs /= np.expand_dims(np.sum(probs, axis=1), axis=1)
    N = x.shape[0]
    loss = -np.sum(np.log(probs[np.arange(N), y])) / N

    dx = probs.copy()
    dx[np.arange(N), y] -= 1
    dx /= N

    return loss, dx
コード例 #3
0
def affine_forward(x, w, b):
  """
  Computes the forward pass for an affine (fully-connected) layer.

  The input x has shape (N, d_1, ..., d_k) and contains a minibatch of N
  examples, where each example x[i] has shape (d_1, ..., d_k). We will
  reshape each input into a vector of dimension D = d_1 * ... * d_k, and
  then transform it to an output vector of dimension M.

  Inputs:
  - x: A numpy array containing input data, of shape (N, d_1, ..., d_k)
  - w: A numpy array of weights, of shape (D, M)
  - b: A numpy array of biases, of shape (M,)
  
  Returns a tuple of:
  - out: output, of shape (N, M)
  - cache: (x, w, b)
  """

  x_plain = np.reshape(x, (x.shape[0], -1))

  # Note: GPU has no automatically broadcast feature?
  out = np.dot(x_plain, w) + np.repeat(np.expand_dims(b, axis=0), x_plain.shape[0], axis = 0)

  cache = (x, w, b) 
  
  return out, cache
コード例 #4
0
def svm_loss(x, y, mode):
  """
  Computes the loss and gradient using for multiclass SVM classification.

  Inputs:
  - x: Input data, of shape (N, C) where x[i, j] is the score for the jth class
    for the ith input.
  - y: Vector of labels, of shape (N,) where y[i] is the label for x[i] and
    0 <= y[i] < C

  Returns a tuple of:
  - loss: Scalar giving the loss
  - dx: Gradient of the loss with respect to x
  """
  if mode == 'cpu':
    np.set_policy(policy.OnlyNumpyPolicy())
  else:
    np.set_policy(policy.PreferMXNetPolicy())

  N = x.shape[0]
  correct_class_scores = x[np.arange(N), y]
  
  #margins = np.maximum(0, x - correct_class_scores[:, np.newaxis] + 1.0)
  margins = np.maximum(0, x - np.expand_dims(correct_class_scores, axis = 1) + 1.0)

  margins[np.arange(N), y] = 0
  loss = np.sum(margins) / N
  num_pos = np.sum(margins > 0, axis=1)
  dx = np.zeros_like(x)
  dx[margins > 0] = 1
  dx[np.arange(N), y] -= num_pos
  dx /= N

  return loss, dx
コード例 #5
0
ファイル: layers.py プロジェクト: dsqx71/minpy
def softmax_loss(x, y):
    """
  Computes the loss and gradient for softmax classification.

  Inputs:
  - x: Input data, of shape (N, C) where x[i, j] is the score for the jth class
    for the ith input.
  - y: Vector of labels, of shape (N,) where y[i] is the label for x[i] and
    0 <= y[i] < C

  Returns a tuple of:
  - loss: Scalar giving the loss
  """
    #TODO: Missing Max Operator
    probs = np.exp(x - np.expand_dims(np.max(x, axis=1), axis=1))
    probs = probs / np.expand_dims(np.sum(probs, axis=1), axis=1)
    N = x.shape[0]
    loss = -np.sum(np.log(probs[np.arange(N), y])) / N

    return loss
コード例 #6
0
def softmax_loss(x, y):
  """
  Computes the loss and gradient for softmax classification.

  Inputs:
  - x: Input data, of shape (N, C) where x[i, j] is the score for the jth class
    for the ith input.
  - y: Vector of labels, of shape (N,) where y[i] is the label for x[i] and
    0 <= y[i] < C

  Returns a tuple of:
  - loss: Scalar giving the loss
  """
  #TODO: Missing Max Operator 
  probs = np.exp(x - np.expand_dims(np.max(x, axis=1), axis = 1))
  probs = probs / np.expand_dims(np.sum(probs, axis=1), axis = 1)
  N = x.shape[0]
  loss = -np.sum(np.log(probs[np.arange(N), y])) / N

  return loss
コード例 #7
0
ファイル: pong_model.py プロジェクト: HrWangChengdu/minpy
 def preprocess(self, img):
     """ Preprocess a 210x160x3 uint8 frame into a 6400 (80x80) (1 x input_size) float vector."""
     # Crop, down-sample, erase background and set foreground to 1.
     # Ref: https://gist.github.com/karpathy/a4166c7fe253700972fcbc77e4ea32c5
     img = img[35:195]
     img = img[::2, ::2, 0]
     img[img == 144] = 0
     img[img == 109] = 0
     img[img != 0] = 1
     curr = np.expand_dims(img.astype(numpy.float).ravel(), axis=0)
     # Subtract the last preprocessed image.
     diff = curr - self.prev if self.prev is not None else np.zeros((1, curr.shape[1]))
     self.prev = curr
     return diff
コード例 #8
0
    def run_episode(self):
        """Run an episode using the current model to generate training data.

        Specifically, this involves repeatedly getting an observation from the environment,
        performing a forward pass using the single observation to get a distribution over actions
        (in binary case a probability of a single action), and choosing an action.
        Finally, rewards are discounted when the episode completes.

        Returns
        -------
        (xs, ys, rs) : tuple
            The N x input_size observations, N x 1 action labels, and N x 1 discounted rewards
            obtained from running the episode's N steps.
        """
        observation = self.env.reset()
        self.model.preprocessor.reset()
        self.episode_reward = 0

        xs, ys, rs = [], [], []
        done = False
        game_number = 1
        game_start = time.time()
        while not done:
            if self.render:
                self.env.render()
            x = self.model.preprocessor.preprocess(observation)
            p = self.model.forward(x)
            a, y = self.model.choose_action(p.asnumpy().ravel()[0])
            observation, r, done, info = self.env.step(a)

            xs.append(x.asnumpy().ravel())
            ys.append(y)
            rs.append(r)
            self.episode_reward += r
            if self._game_complete(r):
                game_time = time.time() - game_start
                if self.verbose:
                    print('game %d complete (%.2fs), reward: %f' %
                          (game_number, game_time, r))
                game_number += 1
                game_start = time.time()

        # Episode finished.
        self.running_reward = self.episode_reward if self.running_reward is None else (
            0.99 * self.running_reward + 0.01 * self.episode_reward)
        xs = np.vstack(xs)
        ys = np.vstack(ys)
        rs = np.expand_dims(self.model.discount_rewards(rs), axis=1)
        return xs, ys, rs
コード例 #9
0
 def preprocess(self, img):
     """ Preprocess a 210x160x3 uint8 frame into a 6400 (80x80) (1 x input_size) float vector."""
     # Crop, down-sample, erase background and set foreground to 1.
     # Ref: https://gist.github.com/karpathy/a4166c7fe253700972fcbc77e4ea32c5
     img = img[35:195]
     img = img[::2, ::2, 0]
     img[img == 144] = 0
     img[img == 109] = 0
     img[img != 0] = 1
     curr = np.expand_dims(img.astype(numpy.float).ravel(), axis=0)
     # Subtract the last preprocessed image.
     diff = curr - self.prev if self.prev is not None else np.zeros(
         (1, curr.shape[1]))
     self.prev = curr
     return diff
コード例 #10
0
    def run_episode(self):
        """Run an episode using the current model to generate training data.

        Specifically, this involves repeatedly getting an observation from the environment,
        performing a forward pass using the single observation to get a distribution over actions
        (in binary case a probability of a single action), and choosing an action.
        Finally, rewards are discounted when the episode completes.

        Returns
        -------
        (xs, ys, rs) : tuple
            The N x input_size observations, N x 1 action labels, and N x 1 discounted rewards
            obtained from running the episode's N steps.
        """
        observation = self.env.reset()
        self.model.preprocessor.reset()
        self.episode_reward = 0

        xs, ys, rs = [], [], []
        done = False
        game_number = 1
        game_start = time.time()
        while not done:
            if self.render:
                self.env.render()
            x = self.model.preprocessor.preprocess(observation)
            p = self.model.forward(x)
            a, y = self.model.choose_action(p.asnumpy().ravel()[0])
            observation, r, done, info = self.env.step(a)

            xs.append(x.asnumpy().ravel())
            ys.append(y)
            rs.append(r)
            self.episode_reward += r
            if self._game_complete(r):
                game_time = time.time() - game_start
                if self.verbose:
                    print('game %d complete (%.2fs), reward: %f' % (game_number, game_time, r))
                game_number += 1
                game_start = time.time()

        # Episode finished.
        self.running_reward = self.episode_reward if self.running_reward is None else (
            0.99*self.running_reward + 0.01*self.episode_reward)
        xs = np.vstack(xs)
        ys = np.vstack(ys)
        rs = np.expand_dims(self.model.discount_rewards(rs), axis=1)
        return xs, ys, rs
コード例 #11
0
def test_sum_forward():

  np_x = py_np.zeros((2, 10))
  np_w = py_np.zeros((10, 3))
  np_b = py_np.zeros(3)

  x = NumpyVarToMinpy(np_x)
  w = NumpyVarToMinpy(np_w)
  b = NumpyVarToMinpy(np_b)

  x_plain = np.reshape(x, (x.shape[0], -1))
  out0 = np.dot(x_plain, w)
  out = out0 + np.repeat(np.expand_dims(b, axis=0), out0.shape[0], axis = 0)

  np_out = MinpyVarToNumpy(out)

  var = py_np.random.randn(2, 3)
  tmp = NumpyVarToMinpy(var)
  sum_tmp = np.sum(tmp, axis = 0)

  sum_py = MinpyVarToNumpy(sum_tmp)
コード例 #12
0
def batchnorm(x,
              gamma,
              beta,
              mode='train',
              eps=1e-5,
              momentum=0.9,
              running_mean=None,
              running_var=None):
    """
    Forward pass for batch normalization.

    During training the sample mean and (uncorrected) sample variance are
    computed from minibatch statistics and used to normalize the incoming data.
    During training we also keep an exponentially decaying running mean of the mean
    and variance of each feature, and these averages are used to normalize data
    at test-time.

    At each timestep we update the running averages for mean and variance using
    an exponential decay based on the momentum parameter:

    running_mean = momentum * running_mean + (1 - momentum) * sample_mean
    running_var = momentum * running_var + (1 - momentum) * sample_var

    Note that the batch normalization paper suggests a different test-time
    behavior: they compute sample mean and variance for each feature using a
    large number of training images rather than using a running average. For
    this implementation we have chosen to use running averages instead since
    they do not require an additional estimation step; the torch7 implementation
    of batch normalization also uses running averages.

    Input:
    - x: Data of shape (N, D)
    - gamma: Scale parameter of shape (D,)
    - beta: Shift paremeter of shape (D,)
    - mode: 'train' or 'test'
    - eps: Constant for numeric stability
    - momentum: Constant for running mean / variance.
    - running_mean: Array of shape (D,) giving running mean of features
    - running_var Array of shape (D,) giving running variance of features

    Returns a tuple of:
    - out: of shape (N, D)
    - running_mean: updated running_mean
    - running_var: updated running_var
    """
    # TODO: fix NDArray type system
    N, D = x.shape
    N, D = int(N), int(D)
    if running_mean is None:
        running_mean = np.zeros(D)
    if running_var is None:
        running_var = np.zeros(D)

    out = None
    if mode == 'train':
        mean = np.sum(x, axis=0) / N
        x_mean = (x - np.expand_dims(mean, axis=0))
        sqr_x_mean = x_mean**2
        var = np.sum(sqr_x_mean, axis=0) / N
        sqrt_var = np.sqrt(var + eps)
        inv_sqrt_var = 1.0 / sqrt_var
        x_hat = x_mean * np.expand_dims(inv_sqrt_var, axis=0)
        out = gamma * x_hat + beta

        running_mean = momentum * running_mean + (1.0 - momentum) * mean
        running_var = momentum * running_var + (1.0 - momentum) * var
    elif mode == 'test':
        x_hat = (x - running_mean) / np.sqrt(running_var + eps)
        out = gamma * x_hat + beta
    else:
        raise ValueError('Invalid forward batchnorm mode "%s"' % mode)

    # return the updated running means
    return out, running_mean, running_var
コード例 #13
0
ファイル: layers.py プロジェクト: ZihengJiang/minpy
def batchnorm(x,
              gamma,
              beta,
              mode='train',
              eps=1e-5,
              momentum=0.9,
              running_mean=None,
              running_var=None):
    """
    Forward pass for batch normalization.

    During training the sample mean and (uncorrected) sample variance are
    computed from minibatch statistics and used to normalize the incoming data.
    During training we also keep an exponentially decaying running mean of the mean
    and variance of each feature, and these averages are used to normalize data
    at test-time.

    At each timestep we update the running averages for mean and variance using
    an exponential decay based on the momentum parameter:

    running_mean = momentum * running_mean + (1 - momentum) * sample_mean
    running_var = momentum * running_var + (1 - momentum) * sample_var

    Note that the batch normalization paper suggests a different test-time
    behavior: they compute sample mean and variance for each feature using a
    large number of training images rather than using a running average. For
    this implementation we have chosen to use running averages instead since
    they do not require an additional estimation step; the torch7 implementation
    of batch normalization also uses running averages.

    Input:
    - x: Data of shape (N, D)
    - gamma: Scale parameter of shape (D,)
    - beta: Shift paremeter of shape (D,)
    - mode: 'train' or 'test'
    - eps: Constant for numeric stability
    - momentum: Constant for running mean / variance.
    - running_mean: Array of shape (D,) giving running mean of features
    - running_var Array of shape (D,) giving running variance of features

    Returns a tuple of:
    - out: of shape (N, D)
    - running_mean: updated running_mean
    - running_var: updated running_var
    """
    # TODO: fix NDArray type system
    N, D = x.shape
    N, D = int(N), int(D)
    if running_mean is None:
        running_mean = np.zeros(D)
    if running_var is None:
        running_var = np.zeros(D)

    out = None
    if mode == 'train':
        mean = np.sum(x, axis=0) / N
        x_mean = (x - np.expand_dims(mean, axis=0))
        sqr_x_mean = x_mean ** 2
        var = np.sum(sqr_x_mean, axis=0) / N
        sqrt_var = np.sqrt(var + eps)
        inv_sqrt_var = 1.0 / sqrt_var
        x_hat = x_mean * np.expand_dims(inv_sqrt_var, axis=0)
        out = gamma * x_hat + beta

        running_mean = momentum * running_mean + (1.0 - momentum) * mean
        running_var = momentum * running_var + (1.0 - momentum) * var
    elif mode == 'test':
        x_hat = (x - running_mean) / np.sqrt(running_var + eps)
        out = gamma * x_hat + beta
    else:
        raise ValueError('Invalid forward batchnorm mode "%s"' % mode)

    # return the updated running means
    return out, running_mean, running_var
コード例 #14
0
    w3 = np.random.randn(150, 1)

    v1, v2, v3 = 0, 0, 0

    # 2.8 Open a figure
    plt.figure()

    # 3. Create the model and train
    for iter in range(number_of_epoch):

        current_x, current_label = sklearn.utils.shuffle(data_x, label_x)

        for i in range(0, current_x.shape[0], 100):

            current_x_batch = current_x[i:i + 100]
            current_label_batch = np.expand_dims(current_label[i:i + 100],
                                                 axis=1)

            layer_1 = current_x_batch.dot(w1)
            layer_1_act = tanh(layer_1)

            layer_2 = layer_1_act.dot(w2)
            layer_2_act = tanh(layer_2)

            layer_3 = layer_2_act.dot(w3)
            layer_3_act = sigmoid(layer_3)

            cost = np.square(current_label_batch -
                             layer_3_act) / (current_x.shape[0] * 2)

            grad_3_part_1 = current_label_batch - layer_3_act
            grad_3_part_2 = d_sigmoid(layer_3)
コード例 #15
0
ファイル: hmc-minpy.py プロジェクト: AndreyKolev/MLLibMark
    def dU(beta):
        return mp.dot(X.T, (mp.exp(mp.dot(X,beta))/(1+mp.exp(mp.dot(X,beta))) - y)) + beta/alpha

    D = X.shape[1]
    q = mp.zeros((D, 1), dtype=mp.float32)
    out = mp.zeros((n_iter, D), dtype=mp.float32)
    for i in range(n_iter):
        q = hmc(U, dU, epsilon, L, q)
        out[i,:] = mp.ravel(q)
    return out

with cpu() if args.mode == 'cpu' else gpu(0):
    with open('params.json') as params_file:
        out = {}
        params = json.load(params_file)
        X_train, y_train, X_test, y_test = get_data()
        X_train = mp.array(X_train)
        y_train = mp.array(y_train)
        X_test = mp.array(X_test)
        y_test = mp.array(y_test)
        y_train = mp.expand_dims(y_train, 1)
        z = lr_hmc(y_train, X_train, params['epsilon'], params['n_leaps'], params['alpha'], 1)  # Warm-up
        t = time.perf_counter()
        z = lr_hmc(y_train, X_train, params['epsilon'], params['n_leaps'], params['alpha'], params['n_iter'])  
        t = time.perf_counter() - t
        out[f'minpy-{args.mode}'] = t
        coef_ = mp.mean(z[params['burn_in']:], 0)
        acc = mp.mean((sigmoid(mp.dot(X_test, coef_)) > 0.5) == y_test)[0]
        assert acc > 0.8
        print(json.dumps(out))