Esempio n. 1
0
def test_autoblocklist_policy():
    p = AutoBlacklistPolicy(gen_rule=True, append_rule=True)

    minpy.set_global_policy(p)

    a = np.array([100, 100])
    print(a)
    b = np.array([50, 50])
    c = np.array([0, 0])
    np.add(a, b, c)
    print(c)
    np.add(a, b, out=c)
    np.add(a, b, out=c)
    print(c)
def test_autoblocklist_policy():
    p = AutoBlacklistPolicy(gen_rule=True, append_rule=True)
    
    minpy.set_global_policy(p)
    
    a = np.array([100, 100])
    print(a)
    b = np.array([50, 50])
    c = np.array([0, 0])
    np.add(a, b, c)
    print(c)
    np.add(a, b, out=c)
    np.add(a, b, out=c)
    print(c)
import minpy.numpy as np
from minpy.core import convert_args
import minpy
minpy.set_global_policy(minpy.OnlyNumPyPolicy())


# This function returns minpy array.
@convert_args
def sigmoid(x):
    return 1 / (1 + np.exp(-x))


@convert_args
def affine_forward(x, w, b):
    """
  Computes the forward pass for an affine (fully-connected) layer.
  The input x has shape (N, d_1, ..., d_k) where x[i] is the ith input.
  We multiply this against a weight matrix of shape (D, M) where
  D = \prod_i d_i
  Inputs:
  x - Input data, of shape (N, d_1, ..., d_k)
  w - Weights, of shape (D, M)
  b - Biases, of shape (M,)

  Returns a tuple of:
  - out: output, of shape (N, M)
  - cache: (x, w, b)
  """
    out = x.reshape(x.shape[0], -1).dot(w) + b
    return out
Esempio n. 4
0
 def policy_wrapper(*args, **kwargs):
     old_policy = minpy.Config['default_policy']
     minpy.set_global_policy(plc)
     result = func(*args, **kwargs)
     minpy.set_global_policy(old_policy)
     return result
Esempio n. 5
0
    np.std(a)
    np.std(a, dtype=np.float64)
    a = np.array([[1, 2], [3, 4]])
    np.var(a)
    np.var(a, axis=0)
    np.var(a, axis=1)
    a = np.zeros((2, 512 * 512), dtype=np.float32)
    a[0, :] = 1.0
    a[1, :] = 0.1
    np.var(a)
    np.var(a, dtype=np.float64)


def generate_default_blacklist():
    p = AutoBlacklistPolicy(gen_rule=True, append_rule=True)
    with p:
        test_ufunc()
        test_numeric()
        test_fromnumeric()
    p.save_rules()


if __name__ == '__main__':
    p = AutoBlacklistPolicy(gen_rule=True, append_rule=True)
    minpy.set_global_policy(p)
    logging.getLogger('minpy.dispatch.policy').setLevel(logging.DEBUG)
    test_ufunc()
    test_numeric()
    test_fromnumeric()
    logging.getLogger('minpy.dispatch.policy').setLevel(logging.WARN)
Esempio n. 6
0
 def __exit__(self, ptype, value, trace):
     minpy.set_global_policy(self._old_policy)
Esempio n. 7
0
 def __enter__(self):
     self._old_policy = minpy.Config['default_policy']
     minpy.set_global_policy(self)
     return self
Esempio n. 8
0
 def policy_wrapper(*args, **kwargs):
     old_policy = minpy.Config['default_policy']
     minpy.set_global_policy(policy)
     result = func(*args, **kwargs)
     minpy.set_global_policy(old_policy)
     return result
Esempio n. 9
0
"""Benchmark vanilla RNN using MinPy CPU."""
import argparse
import time

import minpy
import minpy.core as core
import minpy.numpy as np
from minpy.nn import io
from minpy.nn import layers
from minpy.nn.model import ModelBase
import minpy.dispatch.policy
minpy.set_global_policy('only_numpy')

# import logging
# logging.getLogger('minpy.array').setLevel(logging.DEBUG)
# logging.getLogger('minpy.core').setLevel(logging.DEBUG)
# logging.getLogger('minpy.primitive').setLevel(logging.DEBUG)

num_cold = 5

class RNNNet(ModelBase):
    def __init__(self, args):
        super(RNNNet, self).__init__()
        self.add_param(name='Wx', shape=(args.input_size, args.hidden_size)) \
            .add_param(name='Wh', shape=(args.hidden_size, args.hidden_size))\
            .add_param(name='b', shape=(args.hidden_size,))                  \
            .add_param(name='Wa', shape=(args.hidden_size, args.num_classes))\
            .add_param(name='ba', shape=(args.num_classes,))
        self.num_unroll_steps = args.num_unroll_steps
        self.hshape = (args.batch_size, args.hidden_size)
Esempio n. 10
0
import logging

import minpy
import minpy.numpy as np
import minpy.numpy.random as random
from minpy import core

logging.getLogger('minpy.array').setLevel(logging.DEBUG)
logging.getLogger('minpy.core').setLevel(logging.DEBUG)
logging.getLogger('minpy.primitive').setLevel(logging.DEBUG)
minpy.set_global_policy('only_numpy')


def f(x, y):
    return np.multiply(x, y)


def main():
    x = random.rand(3, 3)
    y = random.rand(3, 3)
    print('x: {}'.format(x.asnumpy()))
    print('y: {}'.format(y.asnumpy()))
    g = core.grad(f, argnum=[0, 1])
    gr = g(x, y)
    print('grad_x: {}'.format(gr[0].asnumpy()))
    print('grad_y: {}'.format(gr[1].asnumpy()))


if __name__ == '__main__':
    main()
Esempio n. 11
0
import minpy
# minpy.set_global_policy(minpy.AutoBlacklistPolicy())
# minpy.set_global_policy(minpy.OnlyNumPyPolicy())
minpy.set_global_policy(minpy.PreferMXNetPolicy())
# minpy.set_global_policy(minpy.OnlyMXNetPolicy())
from minpy.context import set_context, cpu, gpu
set_context(cpu())
# set_context(gpu(0))

from facility import *
from solver_primitives import *
from rnn import FastWeightRNN

import sys
sys.path.append('../../mnist/utilities')
from data_utility import load_mnist

INPUT_SIZE = 4 * 28
N_HIDDEN = 200
N_CLASSES = 10  # 10 digits
INNER_LENGTH = 1
model = FastWeightRNN(INPUT_SIZE, N_HIDDEN, N_CLASSES, INNER_LENGTH)
initialize(model)

LEARNING_RATE = float(sys.argv[1])
# updater = Updater(model, 'sgd', {'learning_rate' : LEARNING_RATE})
updater = Updater(model, 'adam', {'learning_rate': LEARNING_RATE})

training_X, training_Y, validation_X, validation_Y, test_X, test_Y = \
  load_mnist('../../mnist/utilities', shape=(7, 4 * 28))
validation_X, validation_Y = validation_X[:1000], validation_Y[:1000]
Esempio n. 12
0
    a[0, :] = 1.0
    a[1, :] = 0.1
    np.std(a)
    np.std(a, dtype=np.float64)
    a = np.array([[1, 2], [3, 4]])
    np.var(a)
    np.var(a, axis=0)
    np.var(a, axis=1)
    a = np.zeros((2, 512*512), dtype=np.float32)
    a[0, :] = 1.0
    a[1, :] = 0.1
    np.var(a)
    np.var(a, dtype=np.float64)

def generate_default_blacklist():
    p = AutoBlacklistPolicy(gen_rule=True, append_rule=True)
    with p:
        test_ufunc()
        test_numeric()
        test_fromnumeric()
    p.save_rules()

if __name__ == '__main__':
    p = AutoBlacklistPolicy(gen_rule=True, append_rule=True)
    minpy.set_global_policy(p)
    logging.getLogger('minpy.dispatch.policy').setLevel(logging.DEBUG)
    test_ufunc()
    test_numeric()
    test_fromnumeric()
    logging.getLogger('minpy.dispatch.policy').setLevel(logging.WARN)