Esempio n. 1
1
def pushSomething(lua, something):
    if isinstance(something, int):
        lua.pushNumber(something)
        return

    if isinstance(something, float):
        lua.pushNumber(something)
        return

    if isinstance(something, str):
        lua.pushString(something)
        return

    if isinstance(something, dict):
        pushTable(lua, something)
        return

    if isinstance(something, list):
        pushArray(lua, something)
        return

    for pythonClass in pushFunctionByPythonClass:
        if isinstance(something, pythonClass):
            pushFunctionByPythonClass[pythonClass](something)
            return

    if type(something) in luaClassesReverse:
        pushObject(lua, something)
        return

    typestring = str(type(something))
    if typestring in ["<class 'numpy.ndarray'>", "<type 'numpy.ndarray'>"]:
      dtypestr = str(something.dtype)
      if dtypestr == 'float32':
        pushSomething(lua, PyTorch._asFloatTensor(something))
        return
      if dtypestr == 'float64':
        pushSomething(lua, PyTorch._asDoubleTensor(something))
        return
      if dtypestr == 'uint8':
        pushSomething(lua, PyTorch._asByteTensor(something))
        return
      raise Exception('pushing numpy array with elements of type ' + dtypestr + ' it not currently implemented')

    raise Exception('pushing type ' + str(type(something)) + ' not implemented, value ', something)
Esempio n. 2
1
import sys
import os
import PyTorch
import PyTorchHelpers
import numpy as np

Luabit = PyTorchHelpers.load_lua_class('luabit.lua', 'Luabit')

batchSize = 2
numFrames = 4
inSize = 3
outSize = 3
kernelSize = 3

luabit = Luabit('green')
print(luabit.getName())
print('type(luabit)', type(luabit))

inTensor = np.random.randn(batchSize, numFrames, inSize).astype('float32')
luain = PyTorch.asFloatTensor(inTensor)

luaout = luabit.getOut(luain, outSize, kernelSize)

outTensor = luaout.asNumpyTensor()
print('outTensor', outTensor)

res = luabit.printTable({'color': 'red', 'weather': 'sunny', 'anumber': 10, 'afloat': 1.234}, 'mistletoe', {
  'row1': 'col1', 'meta': 'data'})
print('res', res)

Esempio n. 3
0
def test_double_tensor():
    PyTorch.manualSeed(123)
    LongTensor = PyTorch.LongTensor
    DoubleTensor = PyTorch.DoubleTensor
    print("dir(G)", dir())
    print("test_double_tensor")
    a = PyTorch.DoubleTensor(3, 2)
    print("got double a")
    myeval("a.dims()")
    a.uniform()
    myeval("a")
    myexec("a[1][1] = 9")
    myeval("a")
    myeval("a.size()")
    myeval("a + 2")
    myexec("a.resize2d(3,3).fill(1)")
    myeval("a")
    myexec("size = LongTensor(2)")
    myexec("size[0] = 4")
    myexec("size[1] = 2")
    myexec("a.resize(size)")
    myeval("a")
    myeval("DoubleTensor(3,4).uniform()")
    myeval("DoubleTensor(3,4).bernoulli()")
    myeval("DoubleTensor(3,4).normal()")
    myeval("DoubleTensor(3,4).cauchy()")
    myeval("DoubleTensor(3,4).exponential()")
    myeval("DoubleTensor(3,4).logNormal()")
    myeval("DoubleTensor(3,4).geometric()")
Esempio n. 4
0
def test_double_tensor():
    PyTorch.manualSeed(123)
    LongTensor = PyTorch.LongTensor
    DoubleTensor = PyTorch.DoubleTensor
    LongStorage = PyTorch.LongStorage
    print('dir(G)', dir())
    print('test_double_tensor')
    a = PyTorch.DoubleTensor(3, 2)
    print('got double a')
    myeval('a.dims()')
    a.uniform()
    myeval('a')
    myexec('a[1][1] = 9')
    myeval('a')
    myeval('a.size()')
    myeval('a + 2')
    myexec('a.resize2d(3,3).fill(1)')
    myeval('a')
    myexec('size = LongStorage(2)')
    myexec('size[0] = 4')
    myexec('size[1] = 2')
    myexec('a.resize(size)')
    myeval('a')
    myeval('DoubleTensor(3,4).uniform()')
    myeval('DoubleTensor(3,4).bernoulli()')
    myeval('DoubleTensor(3,4).normal()')
    myeval('DoubleTensor(3,4).cauchy()')
    myeval('DoubleTensor(3,4).exponential()')
    myeval('DoubleTensor(3,4).logNormal()')
    myeval('DoubleTensor(3,4).geometric()')
Esempio n. 5
0
def test_double_tensor():
    PyTorch.manualSeed(123)
    LongTensor = PyTorch.LongTensor
    DoubleTensor = PyTorch.DoubleTensor
    LongStorage = PyTorch.LongStorage
    print('LongStorage', LongStorage)
    print('LongTensor', LongTensor)
    print('DoubleTensor', DoubleTensor)
    print('dir(G)', dir())
    print('test_double_tensor')
    a = PyTorch.DoubleTensor(3, 2)
    print('got double a')
    myeval('a.dims()')
    a.uniform()
    myeval('a')
    myexec('a[1][1] = 9')
    myeval('a')
    myeval('a.size()')
    myeval('a + 2')
    myexec('a.resize2d(3,3).fill(1)')
    myeval('a')
    myexec('size = LongStorage(2)')
    myexec('size[0] = 4')
    myexec('size[1] = 2')
    myexec('a.resize(size)')
    myeval('a')
    myeval('DoubleTensor(3,4).uniform()')
    myeval('DoubleTensor(3,4).bernoulli()')
    myeval('DoubleTensor(3,4).normal()')
    myeval('DoubleTensor(3,4).cauchy()')
    myeval('DoubleTensor(3,4).exponential()')
    myeval('DoubleTensor(3,4).logNormal()')
    myeval('DoubleTensor(3,4).geometric()')
Esempio n. 6
0
def test_nnx():
    # net = nn.Minus()
    inputTensor = PyTorch.DoubleTensor(2, 3).uniform()
    print('inputTensor', inputTensor)

    PyTorch.require('nnx')
    net = nn.Minus()
    print(net.forward(inputTensor))
Esempio n. 7
0
def test_nnx():
    # net = nn.Minus()
    inputTensor = PyTorch.DoubleTensor(2, 3).uniform()
    print("inputTensor", inputTensor)

    PyTorch.require("nnx")
    net = nn.Minus()
    print(net.forward(inputTensor))
Esempio n. 8
0
 def toTorch(self, numpy_array):
     if numpy_array.dtype == 'float32':
         return PyTorch.asFloatTensor(numpy_array)
     elif numpy_array.dtype == 'float64':
         return PyTorch.asDoubleTensor(numpy_array)
     elif numpy_array.dtype == 'uint8':
         return PyTorch.asByteTensor(numpy_array)
     else:
         return PyTorch.asDoubleTensor(numpy_array)
Esempio n. 9
0
def test_long_tensor():
    PyTorch.manualSeed(123)
    print('test_long_tensor')
    a = PyTorch.LongTensor(3, 2).geometric()
    myeval('a')
    myexec('a[1][1] = 9')
    myeval('a')
    myeval('a.size()')
    myeval('a + 2')
    myexec('a.resize2d(3,3).fill(1)')
    myeval('a')
Esempio n. 10
0
def test_byte_tensor():
    PyTorch.manualSeed(123)
    print('test_byte_tensor')
    a = PyTorch.ByteTensor(3,2).geometric()
    myeval('a')
    myexec('a[1][1] = 9')
    myeval('a')
    myeval('a.size()')
    myeval('a + 2')
    myexec('a.resize2d(3,3).fill(1)')
    myeval('a')
Esempio n. 11
0
def test_float_tensor():
    PyTorch.manualSeed(123)
    print('dir(G)', dir())
    print('test_float_tensor')
    a = PyTorch.FloatTensor(3, 2)
    print('got float a')
    myeval('a.dims()')
    a.uniform()
    myeval('a')
    myexec('a[1][1] = 9')
    myeval('a')
    myeval('a.size()')
Esempio n. 12
0
def pushSomething(lua, something):
    if isinstance(something, int):
        lua.pushNumber(something)
        return

    if isinstance(something, float):
        lua.pushNumber(something)
        return

    if isinstance(something, str):
        lua.pushString(something)
        return

    if isinstance(something, dict):
        pushTable(lua, something)
        return

    if isinstance(something, (list, tuple)):
        pushTable(lua, OrderedDict(zip(range(1,
                                             len(something) + 1), something)))
        return

    for pythonClass in pushFunctionByPythonClass:
        if isinstance(something, pythonClass):
            pushFunctionByPythonClass[pythonClass](something)
            return

    if type(something) in luaClassesReverse:
        pushObject(lua, something)
        return

    typestring = str(type(something))
    if typestring in ["<class 'numpy.ndarray'>", "<type 'numpy.ndarray'>"]:
        dtypestr = str(something.dtype)
        if dtypestr == 'float32':
            pushSomething(lua, PyTorch._asFloatTensor(something))
            return
        if dtypestr == 'float64':
            pushSomething(lua, PyTorch._asDoubleTensor(something))
            return
        if dtypestr == 'uint8':
            pushSomething(lua, PyTorch._asByteTensor(something))
            return
        raise Exception('pushing numpy array with elements of type ' +
                        dtypestr + ' it not currently implemented')

    raise Exception(
        'pushing type ' + str(type(something)) + ' not implemented, value ',
        something)
Esempio n. 13
0
def run(model, im, box):

    result = PyTorch.DoubleTensor(len(box), 16, 2)

    input = im.astype(np.double) / 255
    result = model.predict(input, box)
    # print result

    return result.asNumpyTensor()
Esempio n. 14
0
def load_lua_class(lua_filename, lua_classname):
    module = lua_filename.replace('.lua', '')
    PyTorch.require(module)
    splitName = lua_classname.split('.')
    class LuaWrapper(PyTorchAug.LuaClass):
        def __init__(self, *args):
            _fromLua = False
            if len(args) >= 1:
                if args[0] == '__FROMLUA__':
                   _fromLua = True
                   args = args[1:]
#            print('LuaWrapper.__init__', lua_classname, 'fromLua', _fromLua, 'args', args)
            self.luaclass = lua_classname
            if not _fromLua:
                PyTorchAug.LuaClass.__init__(self, splitName, *args)
            else:
                self.__dict__['__objectId'] = PyTorchAug.getNextObjectId()
    renamedClass = PyTorchLua.renameClass(LuaWrapper, module, lua_classname)
    return renamedClass
Esempio n. 15
0
def load_lua_class(lua_filename, lua_classname):
    module = lua_filename.replace('.lua', '')
    PyTorch.require(module)
    splitName = lua_classname.split('.')
    class LuaWrapper(PyTorchAug.LuaClass):
        def __init__(self, *args):
            _fromLua = False
            if len(args) >= 1:
                if args[0] == '__FROMLUA__':
                   _fromLua = True
                   args = args[1:]
#            print('LuaWrapper.__init__', lua_classname, 'fromLua', _fromLua, 'args', args)
            self.luaclass = lua_classname
            if not _fromLua:
                PyTorchAug.LuaClass.__init__(self, splitName, *args)
            else:
                self.__dict__['__objectId'] = PyTorchAug.getNextObjectId()
    renamedClass = PyTorchLua.renameClass(LuaWrapper, module, lua_classname)
    return renamedClass
Esempio n. 16
0
def loadModuleClass(module,lua_classname,load_module=True,makeLookupKey = lambda m,c: m+'.'+c):
    if load_module:
        PyTorch.require(module)
    class LuaWrapper(LuaClass):
        def __init__(self, *args):
            _fromLua = False
            if len(args) >= 1:
                if args[0] == '__FROMLUA__':
                   _fromLua = True
                   args = args[1:]
            #print('LuaWrapper.__init__', lua_classname, 'fromLua', _fromLua, 'args', args)
            #print([module,lua_classname])
            self.luaclass = makeLookupKey(module,lua_classname)
            if not _fromLua:
                splitNames = self.luaclass.split('.')
                LuaClass.__init__(self, splitNames, *args)                
            else:
                self.__dict__['__objectId'] = getNextObjectId()
    renamedClass = PyTorchLua.renameClass(LuaWrapper, module, lua_classname)
    return renamedClass
Esempio n. 17
0
def test_call_lua():
    TestCallLua = PyTorchHelpers.load_lua_class('test/test_call_lua.lua', 'TestCallLua')

    batchSize = 2
    numFrames = 4
    inSize = 3
    outSize = 3
    kernelSize = 3

    luabit = TestCallLua('green')
    print(luabit.getName())
    assert luabit.getName() == 'green'
    print('type(luabit)', type(luabit))
    assert str(type(luabit)) == '<class \'PyTorchLua.TestCallLua\'>'

    np.random.seed(123)
    inTensor = np.random.randn(batchSize, numFrames, inSize).astype('float32')
    luain = PyTorch.asFloatTensor(inTensor)

    luaout = luabit.getOut(luain, outSize, kernelSize)

    outTensor = luaout.asNumpyTensor()
    print('outTensor', outTensor)
    # I guess we just assume if we got to this point, with no exceptions, then thats a good thing...
    # lets add some new test...

    outTensor = luabit.addThree(luain).asNumpyTensor()
    assert isinstance(outTensor, np.ndarray)
    assert inTensor.shape == outTensor.shape
    assert np.abs((inTensor + 3) - outTensor).max() < 1e-4

    res = luabit.printTable({'color': 'red', 'weather': 'sunny', 'anumber': 10, 'afloat': 1.234}, 'mistletoe', {
        'row1': 'col1', 'meta': 'data'})
    print('res', res)
    assert res == {'foo': 'bar', 'result': 12.345, 'bear': 'happy'}

    # List and tuple support by conversion to dictionary
    reslist = luabit.modifyList([3.1415, r'~Python\omega', 42])
    restuple = luabit.modifyList((3.1415, r'~Python\omega', 42))
    assert len(reslist) == len(restuple) == 4
    assert list(reslist.keys()) == list(restuple.keys()) == [1, 2, 3, 4]
    assert reslist[1] == restuple[1]
    assert (reslist[1] - 3.1415) < 1e-7
    reslist.pop(1)
    restuple.pop(1)
    assert reslist == restuple == {2: r'~Python\omega', 3: 42, 4: 'Lorem Ipsum'}

    # Get an object created from scratch by Lua
    res = luabit.getList()
    assert res[1] == 3.1415
    res.pop(1)
    assert res == {2: 'Lua', 3: 123}
Esempio n. 18
0
def test_save_load():
    np.random.seed(123)
    a_np = np.random.randn(3, 2).astype(np.float32)
    a = PyTorch.asFloatTensor(a_np)
    print('a', a)

    filename = '/tmp/foo.t7'  # TODO: should use tempfile to get this
    PyTorchAug.save(filename, a)

    b = PyTorchAug.load(filename)
    print('type(b)', type(b))
    print('b', b)

    assert np.abs(a_np - b.asNumpyTensor()).max() < 1e-4
Esempio n. 19
0
def save(filepath, target):
    lua = PyTorch.getGlobalState().getLua()

    topStart = lua.getTop()

    pushGlobal(lua, 'torch', 'saveobj')
    pushSomething(lua, filepath)
    pushSomething(lua, target)
    res = lua.pcall(2, 0, 1)
    if res != 0:
        errorMessage = popString(lua)
        raise Exception(errorMessage)

    topEnd = lua.getTop()
    assert topStart == topEnd
Esempio n. 20
0
def save(filepath, target):
    lua = PyTorch.getGlobalState().getLua()

    topStart = lua.getTop()

    pushGlobal(lua, 'torch', 'saveobj')
    pushSomething(lua, filepath)
    pushSomething(lua, target)
    res = lua.pcall(2, 0, 1)
    if res != 0:
        errorMessage = popString(lua)
        raise Exception(errorMessage)

    topEnd = lua.getTop()
    assert topStart == topEnd
Esempio n. 21
0
def load(filepath):
    lua = PyTorch.getGlobalState().getLua()
    topStart = lua.getTop()

    pushGlobal(lua, 'torch', 'loadobj')
    pushSomething(lua, filepath)

    res = lua.pcall(1, 1, 1)
    if res != 0:
        errorMessage = popString(lua)
        raise Exception(errorMessage)

    res = popSomething(lua)

    topEnd = lua.getTop()
    assert topStart == topEnd

    return res
Esempio n. 22
0
def load(filepath):
    lua = PyTorch.getGlobalState().getLua()
    topStart = lua.getTop()

    pushGlobal(lua, 'torch', 'loadobj')
    pushSomething(lua, filepath)

    res = lua.pcall(1, 1, 1)
    if res != 0:
        errorMessage = popString(lua)
        raise Exception(errorMessage)

    res = popSomething(lua)

    topEnd = lua.getTop()
    assert topStart == topEnd

    return res
Esempio n. 23
0
    def __init__(self, nameList, *args):
        lua = PyTorch.getGlobalState().getLua()
        self.__dict__['__objectId'] = getNextObjectId()
        topStart = lua.getTop()
        pushGlobalFromList(lua, nameList)
        for arg in args:
            pushSomething(lua, arg)
#        print('nameList', nameList)
#        print('args', args)
        res = lua.pcall(len(args), 1)
        if res != 0:
            errorMessage = popString(lua)
            raise Exception(errorMessage)
#        lua.call(len(args), 1)
        registerObject(lua, self)

        topEnd = lua.getTop()
        assert topStart == topEnd
Esempio n. 24
0
    def __init__(self, nameList, *args):
        lua = PyTorch.getGlobalState().getLua()
        self.__dict__['__objectId'] = getNextObjectId()
        topStart = lua.getTop()
        pushGlobalFromList(lua, nameList)
        for arg in args:
            pushSomething(lua, arg)
#        print('nameList', nameList)
#        print('args', args)
        res = lua.pcall(len(args), 1)
        if res != 0:
          errorMessage = popString(lua)
          raise Exception(errorMessage)
#        lua.call(len(args), 1)
        registerObject(lua, self)

        topEnd = lua.getTop()
        assert topStart == topEnd
Esempio n. 25
0
    def __init__(self, nameList, *args):
        lua = PyTorch.getGlobalState().getLua()
        self.__dict__['__objectId'] = getNextObjectId()
        topStart = lua.getTop()
        pushGlobalFromList(lua, nameList)
        for arg in args:
            if isinstance(arg, int):
                lua.pushNumber(arg)
            else:
                raise Exception('arg type ' + str(type(arg)) + ' not implemented')
        lua.call(len(args), 1)
        registerObject(lua, self)

#        nameList = nameList[:]
#        nameList.append('float')
#        pushGlobalFromList(lua, nameList)
#        pushObject(lua, self)
#        lua.call(1, 0)

        topEnd = lua.getTop()
        assert topStart == topEnd
Esempio n. 26
0
import sys
import os
import PyTorchAug
import PyTorch
import numpy as np

PyTorch.require('luabit')
class Luabit(PyTorchAug.LuaClass):
    def __init__(self, _fromLua=False):
        self.luaclass = 'Luabit'
        if not _fromLua:
            name = self.__class__.__name__
            super(self.__class__, self).__init__([name])
        else:
            self.__dict__['__objectId'] = getNextObjectId()

batchSize = 2
numFrames = 4
inSize = 3
outSize = 3
kernelSize = 3

luabit = Luabit()

inTensor = np.random.randn(batchSize, numFrames, inSize).astype('float32')
luain = PyTorch.asFloatTensor(inTensor)

luaout = luabit.getOut(luain, outSize, kernelSize)

outTensor = luaout.asNumpyTensor()
print('outTensor', outTensor)
Esempio n. 27
0

{%- set types = [
    {'Real': 'Long','real': 'long'},
    {'Real': 'Float', 'real': 'float'}, 
    {'Real': 'Double', 'real': 'double'},
    {'Real': 'Byte', 'real': 'unsigned char'}
]
%}


{% for typedict in types -%}
{%- set Real = typedict['Real'] -%}
{%- set real = typedict['real'] -%}
def test_pytorch{{Real}}():
    PyTorch.manualSeed(123)
    numpy.random.seed(123)

    {{Real}}Tensor = PyTorch.{{Real}}Tensor

    {% if Real == 'Float' -%}
    A = numpy.random.rand(6).reshape(3, 2).astype(numpy.float32)
    B = numpy.random.rand(8).reshape(2, 4).astype(numpy.float32)

    C = A.dot(B)
    print('C', C)

    print('calling .asTensor...')
    tensorA = PyTorch.asFloatTensor(A)
    tensorB = PyTorch.asFloatTensor(B)
    print(' ... asTensor called')
Esempio n. 28
0
from __future__ import print_function
import PyTorch
from PyTorchAug import nn
PyTorch.require('rnn')

if __name__ == '__main__':
    lstm = nn.LSTM(3, 4)
    print('lstm', lstm)
Esempio n. 29
0
import ipdb
import PyTorchAug
import PyTorch
nn = PyTorch.Nn()

lua = PyTorchAug.lua
lua.getGlobal("require")
lua.pushString('modules/LinearCR')
lua.call(1, 0)

lua = PyTorchAug.lua
lua.getGlobal("require")
lua.pushString('modules/Reparametrize')
lua.call(1, 0)

lua = PyTorchAug.lua
lua.getGlobal("require")
lua.pushString('modules/SelectiveOutputClamp')
lua.call(1, 0)

lua = PyTorchAug.lua
lua.getGlobal("require")
lua.pushString('modules/SelectiveGradientFilter')
lua.call(1, 0)

dim_hidden = 200
feature_maps = 96

filter_size = 5
colorchaPyTorchAugels = 1
Esempio n. 30
0
from __future__ import print_function
import threading
import PyTorch
import PyTorchLua
import PyTorchHelpers

lua = PyTorch.getGlobalState().getLua()

# this is so we can ctrl-c lua functions.  we have to run them in a separate thread
# for this, so that the python event log can continue ('event loop' might not be quite
# the right technical term, but the concept is approximately right.  I think)
def interruptableCall(function, args):
    mythread = threading.Thread(target=function, args = args)
    mythread.daemon = True
    mythread.start()
    while mythread.isAlive():
        mythread.join(0.1)
        #print('join timed out')

nextObjectId = 1
def getNextObjectId():
    global nextObjectId
    res = nextObjectId
    nextObjectId += 1
    return res

def pushGlobal(lua, name1, name2=None, name3=None):
    lua.getGlobal(name1)
    if name2 is None:
        return
    lua.getField(-1, name2)
Esempio n. 31
0
class Nn(object):
    def __init__(self):
        self.classes = {}

    def __getattr__(self, name):
        if name not in self.classes:
            self.classes[name] = setupNnClass(name)
        thisClass = self.classes[name]
        return thisClass


def populateLuaClassesReverse():
    luaClassesReverse.clear()
    for name in luaClasses:
        classtype = luaClasses[name]
        luaClassesReverse[classtype] = name

lua = PyTorch.getGlobalState().getLua()
nn = Nn()

cythonClasses = {}
cythonClasses['torch.FloatTensor'] = {'popFunction': PyTorch._popFloatTensor}
cythonClasses['torch.DoubleTensor'] = {'popFunction': PyTorch._popDoubleTensor}
cythonClasses['torch.ByteTensor'] = {'popFunction': PyTorch._popByteTensor}

pushFunctionByPythonClass = {}
pushFunctionByPythonClass[PyTorch._FloatTensor] = PyTorch._pushFloatTensor
pushFunctionByPythonClass[PyTorch._DoubleTensor] = PyTorch._pushDoubleTensor
pushFunctionByPythonClass[PyTorch._ByteTensor] = PyTorch._pushByteTensor
Esempio n. 32
0
# PyClTorch.newfunction(123)

import PyTorch
from PyTorchAug import *


def myeval(expr):
    print(expr, ':', eval(expr))


if __name__ == '__main__':
    # a = PyTorch.foo(3,2)
    # print('a', a)
    # print(PyTorch.FloatTensor(3,2))

    a = PyTorch.FloatTensor(4, 3).uniform()
    print('a', a)
    a = a.cl()
    print(type(a))

    print('a.dims()', a.dims())
    print('a.size()', a.size())
    print('a', a)

    print('sum:', a.sum())
    myeval('a + 1')
    b = PyClTorch.ClTensor()
    print('got b')
    myeval('b')
    b.resizeAs(a)
    myeval('b')
Esempio n. 33
0
def asFloatTensor(myarray):
    f1 = PyTorch._asFloatTensor(myarray)
    #    print('type(f1)', type(f1))
    return FloatTensor(f1)
Esempio n. 34
0
def asByteTensor(myarray):
    f1 = PyTorch._asByteTensor(myarray)
    #    print('type(f1)', type(f1))
    return ByteTensor(f1)
Esempio n. 35
0
def test_pytorchByte():
    PyTorch.manualSeed(123)
    numpy.random.seed(123)

    ByteTensor = PyTorch.ByteTensor

    D = PyTorch.ByteTensor(5, 3).fill(1)
    print('D', D)

    D[2][2] = 4
    print('D', D)

    D[3].fill(9)
    print('D', D)

    D.narrow(1, 2, 1).fill(0)
    print('D', D)

    print(PyTorch.ByteTensor(3, 4).bernoulli())
    print(PyTorch.ByteTensor(3, 4).geometric())
    print(PyTorch.ByteTensor(3, 4).geometric())
    PyTorch.manualSeed(3)
    print(PyTorch.ByteTensor(3, 4).geometric())
    PyTorch.manualSeed(3)
    print(PyTorch.ByteTensor(3, 4).geometric())

    print(type(PyTorch.ByteTensor(2, 3)))

    size = PyTorch.LongStorage(2)
    size[0] = 4
    size[1] = 3
    D.resize(size)
    print('D after resize:\n', D)

    print('resize1d', PyTorch.ByteTensor().resize1d(3).fill(1))
    print('resize2d', PyTorch.ByteTensor().resize2d(2, 3).fill(1))
    print('resize', PyTorch.ByteTensor().resize(size).fill(1))

    D = PyTorch.ByteTensor(size).geometric()

    #    def myeval(expr):
    #        print(expr, ':', eval(expr))

    #    def myexec(expr):
    #        print(expr)
    #        exec(expr)

    myeval('ByteTensor(3,2).nElement()')
    myeval('ByteTensor().nElement()')
    myeval('ByteTensor(1).nElement()')

    A = ByteTensor(3, 4).geometric(0.9)
    myeval('A')
    myexec('A += 3')
    myeval('A')
    myexec('A *= 3')
    myeval('A')
    myeval('A')
    print('A //= 3')
    A //= 3
    myeval('A')

    myeval('A + 5')
    myeval('A * 5')
    print('A // 2')
    A // 2
    B = ByteTensor().resizeAs(A).geometric(0.9)
    myeval('B')
    myeval('A + B')
    myexec('A += B')
    myeval('A')
Esempio n. 36
0
def test_pynn():
    PyTorch.manualSeed(123)
    linear = nn.Linear(3, 5)
    linear
    print('linear', linear)
    print('linear.weight', linear.weight)
    print('linear.output', linear.output)
    print('linear.gradInput', linear.gradInput)

    input = PyTorch.DoubleTensor(4, 3).uniform()
    print('input', input)
    output = linear.updateOutput(input)
    print('output', output)

    gradInput = linear.updateGradInput(input, output)
    print('gradInput', gradInput)

    criterion = nn.ClassNLLCriterion()
    print('criterion', criterion)

    print('dir(linear)', dir(linear))

    mlp = nn.Sequential()
    mlp.add(linear)

    output = mlp.forward(input)
    print('output', output)

#    import sys
#    sys.path.append('thirdparty/python-mnist')
    from mnist import MNIST
    import numpy
    import array

    numpy.random.seed(123)

    mlp = nn.Sequential()

    mlp.add(nn.SpatialConvolutionMM(1, 16, 5, 5, 1, 1, 2, 2))
    res = mlp.add(nn.ReLU())
    print('res', res)
    mlp.add(nn.SpatialMaxPooling(3, 3, 3, 3))

    mlp.add(nn.SpatialConvolutionMM(16, 32, 3, 3, 1, 1, 1, 1))
    mlp.add(nn.ReLU())
    mlp.add(nn.SpatialMaxPooling(2, 2, 2, 2))

    mlp.add(nn.Reshape(32 * 4 * 4))
    mlp.add(nn.Linear(32 * 4 * 4, 150))
    mlp.add(nn.Tanh())
    mlp.add(nn.Linear(150, 10))
    mlp.add(nn.LogSoftMax())

    criterion = nn.ClassNLLCriterion()
    print('got criterion')

    learningRate = 0.02

    mndata = MNIST('data/mnist')
    imagesList, labelsB = mndata.load_training()
    images = numpy.array(imagesList).astype(numpy.float64)
    # print('imagesArray', images.shape)

    # print(images[0].shape)

    labelsf = array.array('d', labelsB.tolist())
    imagesTensor = PyTorch.asDoubleTensor(images)

    # imagesTensor = PyTorch.FloatTensor(100,784)
    # labels = numpy.array(20,).astype(numpy.int32)
    # labelsTensor = PyTorch.FloatTensor(100).fill(1)
    # print('labels', labels)
    # print(imagesTensor.size())

    def printStorageAddr(name, tensor):
        print('printStorageAddr START')
        storage = tensor.storage()
        if storage is None:
            print(name, 'storage is None')
        else:
            print(name, 'storage is ', hex(storage.dataAddr()))
        print('printStorageAddr END')

    labelsTensor = PyTorch.asDoubleTensor(labelsf)
    labelsTensor += 1
    # print('calling size on imagestensor...')
    # print('   (called size)')

    desiredN = 128
    maxN = int(imagesTensor.size()[0])
    desiredN = min(maxN, desiredN)
    imagesTensor = imagesTensor.narrow(0, 0, desiredN)
    labelsTensor = labelsTensor.narrow(0, 0, desiredN)
    print('imagesTensor.size()', imagesTensor.size())
    print('labelsTensor.size()', labelsTensor.size())
    N = int(imagesTensor.size()[0])
    print('type(imagesTensor)', type(imagesTensor))
    size = PyTorch.LongStorage(4)
    size[0] = N
    size[1] = 1
    size[2] = 28
    size[3] = 28
    imagesTensor.resize(size)
    imagesTensor /= 255.0
    imagesTensor -= 0.2
    print('imagesTensor.size()', imagesTensor.size())

    print('start training...')
    for epoch in range(4):
        numRight = 0
        for n in range(N):
            # print('n', n)
            input = imagesTensor[n]
            label = labelsTensor[n]
            labelTensor = PyTorch.DoubleTensor(1)
            labelTensor[0] = label
    #        print('label', label)
            output = mlp.forward(input)
            prediction = PyTorch.getDoublePrediction(output)
    #        print('prediction', prediction)
            if prediction == label:
                numRight += 1
            criterion.forward(output, labelTensor)
            mlp.zeroGradParameters()
            gradOutput = criterion.backward(output, labelTensor)
            mlp.backward(input, gradOutput)
            mlp.updateParameters(learningRate)
    #        PyTorch.collectgarbage()
    #        if n % 100 == 0:
    #            print('n=', n)
        print('epoch ' + str(epoch) + ' accuracy: ' + str(numRight * 100.0 / N) + '%')
Esempio n. 37
0
def load_lua_buffer(chunkbuffer,chunkname):
    "Compile and run the Lua code in chunkbuffer and put objects in module chunkname"
    lua = PyTorch.getGlobalState().getLua()
    return lua.loadBufferAndCall(chunkbuffer,chunkname)
Esempio n. 38
0
from __future__ import print_function
import PyTorch
import array
import numpy

A = numpy.random.rand(6).reshape(3,2).astype(numpy.float32)
B = numpy.random.rand(8).reshape(2,4).astype(numpy.float32)

C = A.dot(B)
print('C', C)

print('calling .asTensor...')
tensorA = PyTorch.asTensor(A)
tensorB = PyTorch.asTensor(B)
print(' ... asTensor called')

print('tensorA', tensorA)

tensorA.set2d(1, 1, 56.4)
tensorA.set2d(2, 0, 76.5)
print('tensorA', tensorA)
print('A', A)

tensorA += 5
print('tensorA', tensorA)
print('A', A)

tensorA2 = tensorA + 7
print('tensorA2', tensorA2)
print('tensorA', tensorA)
Esempio n. 39
0
def test_pytorchLong():
    PyTorch.manualSeed(123)
    numpy.random.seed(123)

    LongTensor = PyTorch.LongTensor

    

    D = PyTorch.LongTensor(5,3).fill(1)
    print('D', D)

    D[2][2] = 4
    print('D', D)

    D[3].fill(9)
    print('D', D)

    D.narrow(1,2,1).fill(0)
    print('D', D)

    
    print(PyTorch.LongTensor(3,4).bernoulli())
    print(PyTorch.LongTensor(3,4).geometric())
    print(PyTorch.LongTensor(3,4).geometric())
    PyTorch.manualSeed(3)
    print(PyTorch.LongTensor(3,4).geometric())
    PyTorch.manualSeed(3)
    print(PyTorch.LongTensor(3,4).geometric())

    print(type(PyTorch.LongTensor(2,3)))

    size = PyTorch.LongStorage(2)
    size[0] = 4
    size[1] = 3
    D.resize(size)
    print('D after resize:\n', D)

    print('resize1d', PyTorch.LongTensor().resize1d(3).fill(1))
    print('resize2d', PyTorch.LongTensor().resize2d(2, 3).fill(1))
    print('resize', PyTorch.LongTensor().resize(size).fill(1))

    D = PyTorch.LongTensor(size).geometric()

#    def myeval(expr):
#        print(expr, ':', eval(expr))

#    def myexec(expr):
#        print(expr)
#        exec(expr)

    myeval('LongTensor(3,2).nElement()')
    myeval('LongTensor().nElement()')
    myeval('LongTensor(1).nElement()')

    A = LongTensor(3,4).geometric(0.9)
    myeval('A')
    myexec('A += 3')
    myeval('A')
    myexec('A *= 3')
    myeval('A')
    
    myexec('A -= 3')
    
    myeval('A')
    myexec('A /= 3')
    myeval('A')

    myeval('A + 5')
    
    myeval('A - 5')
    
    myeval('A * 5')
    myeval('A / 2')

    B = LongTensor().resizeAs(A).geometric(0.9)
    myeval('B')
    myeval('A + B')
    
    myeval('A - B')
    
    myexec('A += B')
    myeval('A')
    
    myexec('A -= B')
    myeval('A')
Esempio n. 40
0
from __future__ import print_function
import PyTorch
import array
import numpy

A = numpy.random.rand(6).reshape(3, 2).astype(numpy.float32)
B = numpy.random.rand(8).reshape(2, 4).astype(numpy.float32)

C = A.dot(B)
print('C', C)

print('calling .asTensor...')
tensorA = PyTorch.asTensor(A)
tensorB = PyTorch.asTensor(B)
print(' ... asTensor called')

print('tensorA', tensorA)

tensorA.set2d(1, 1, 56.4)
tensorA.set2d(2, 0, 76.5)
print('tensorA', tensorA)
print('A', A)

tensorA += 5
print('tensorA', tensorA)
print('A', A)

tensorA2 = tensorA + 7
print('tensorA2', tensorA2)
print('tensorA', tensorA)
Esempio n. 41
0
from __future__ import print_function
import PyTorch
from PyTorchAug import nn

PyTorch.require("rnn")


if __name__ == "__main__":
    lstm = nn.LSTM(3, 4)
    print("lstm", lstm)
Esempio n. 42
0
def test_call_lua():
    TestCallLua = PyTorchHelpers.load_lua_class('test/test_call_lua.lua',
                                                'TestCallLua')

    batchSize = 2
    numFrames = 4
    inSize = 3
    outSize = 3
    kernelSize = 3

    luabit = TestCallLua('green')
    print(luabit.getName())
    assert luabit.getName() == 'green'
    print('type(luabit)', type(luabit))
    assert str(type(luabit)) == '<class \'PyTorchLua.TestCallLua\'>'

    np.random.seed(123)
    inTensor = np.random.randn(batchSize, numFrames, inSize).astype('float32')
    luain = PyTorch.asFloatTensor(inTensor)

    luaout = luabit.getOut(luain, outSize, kernelSize)

    outTensor = luaout.asNumpyTensor()
    print('outTensor', outTensor)
    # I guess we just assume if we got to this point, with no exceptions, then thats a good thing...
    # lets add some new test...

    outTensor = luabit.addThree(luain).asNumpyTensor()
    assert isinstance(outTensor, np.ndarray)
    assert inTensor.shape == outTensor.shape
    assert np.abs((inTensor + 3) - outTensor).max() < 1e-4

    res = luabit.printTable(
        {
            'color': 'red',
            'weather': 'sunny',
            'anumber': 10,
            'afloat': 1.234
        }, 'mistletoe', {
            'row1': 'col1',
            'meta': 'data'
        })
    print('res', res)
    assert res == {'foo': 'bar', 'result': 12.345, 'bear': 'happy'}

    # List and tuple support by conversion to dictionary
    reslist = luabit.modifyList([3.1415, r'~Python\omega', 42])
    restuple = luabit.modifyList((3.1415, r'~Python\omega', 42))
    assert len(reslist) == len(restuple) == 4
    assert list(reslist.keys()) == list(restuple.keys()) == [1, 2, 3, 4]
    assert reslist[1] == restuple[1]
    assert (reslist[1] - 3.1415) < 1e-7
    reslist.pop(1)
    restuple.pop(1)
    assert reslist == restuple == {
        2: r'~Python\omega',
        3: 42,
        4: 'Lorem Ipsum'
    }

    # Get an object created from scratch by Lua
    res = luabit.getList()
    assert res[1] == 3.1415
    res.pop(1)
    assert res == {2: 'Lua', 3: 123}
Esempio n. 43
0
def test_cltorch():
    if "ALLOW_NON_GPUS" in os.environ:
        PyClTorch.setAllowNonGpus(True)

    # a = PyTorch.foo(3,2)
    # print('a', a)
    # print(PyTorch.FloatTensor(3,2))

    a = PyClTorch.ClTensor([3, 4, 9])
    assert a[0] == 3
    assert a[1] == 4
    assert a[2] == 9
    print("a", a)

    a = PyClTorch.ClTensor([[3, 5, 7], [9, 2, 4]])
    print("a", a)
    print("a[0]", a[0])
    print("a[0][0]", a[0][0])
    assert a[0][0] == 3
    assert a[1][0] == 9
    assert a[1][2] == 4

    PyTorch.manualSeed(123)
    a = PyTorch.FloatTensor(4, 3).uniform()
    print("a", a)
    a_cl = a.cl()
    print(type(a_cl))
    assert str(type(a_cl)) == "<class 'PyClTorch.ClTensor'>"
    print("a_cl[0]", a_cl[0])
    print("a_cl[0][0]", a_cl[0][0])
    assert a[0][0] == a_cl[0][0]
    assert a[0][1] == a_cl[0][1]
    assert a[1][1] == a_cl[1][1]

    print("a.dims()", a.dims())
    print("a.size()", a.size())
    print("a", a)
    assert a.dims() == 2
    assert a.size()[0] == 4
    assert a.size()[1] == 3

    a_sum = a.sum()
    a_cl_sum = a_cl.sum()
    assert abs(a_sum - a_cl_sum) < 1e-4
    a_cl2 = a_cl + 3.2
    assert abs(a_cl2[1][0] - a[1][0] - 3.2) < 1e-4

    b = PyClTorch.ClTensor()
    print("got b")
    myeval("b")
    assert b.dims() == -1
    b.resizeAs(a)
    myeval("b")
    assert b.dims() == 2
    assert b.size()[0] == 4
    assert b.size()[1] == 3
    print("run uniform")
    b.uniform()
    myeval("b")

    print("create new b")
    b = PyClTorch.ClTensor()
    print("b.dims()", b.dims())
    print("b.size()", b.size())
    print("b", b)

    c = PyTorch.FloatTensor().cl()
    print("c.dims()", c.dims())
    print("c.size()", c.size())
    print("c", c)
    assert b.dims() == -1
    assert b.size() is None

    print("creating Linear...")
    linear = nn.Linear(3, 5).float()
    print("created linear")
    print("linear:", linear)
    myeval("linear.output")
    myeval("linear.output.dims()")
    myeval("linear.output.size()")
    myeval("linear.output.nElement()")

    linear_cl = linear.clone().cl()
    print("type(linear.output)", type(linear.output))
    print("type(linear_cl.output)", type(linear_cl.output))
    assert str(type(linear.output)) == "<class 'PyTorch._FloatTensor'>"
    assert str(type(linear_cl.output)) == "<class 'PyClTorch.ClTensor'>"
    # myeval('type(linear)')
    # myeval('type(linear.output)')
    myeval("linear_cl.output.dims()")
    myeval("linear_cl.output.size()")
    # myeval('linear.output')
    assert str(type(linear)) == "<class 'PyTorchAug.Linear'>"
    assert str(type(linear_cl)) == "<class 'PyTorchAug.Linear'>"
    # assert str(type(linear.output)) == '<class \'PyClTorch.ClTensor\'>'
    # assert linear.output.dims() == -1  # why is this 0? should be -1???
    # assert linear.output.size() is None  # again, should be None?

    a_cl = PyClTorch.ClTensor(4, 3).uniform()
    # print('a_cl', a_cl)
    output_cl = linear_cl.forward(a_cl)
    # print('output', output)
    assert str(type(output_cl)) == "<class 'PyClTorch.ClTensor'>"
    assert output_cl.dims() == 2
    assert output_cl.size()[0] == 4
    assert output_cl.size()[1] == 5

    a = a_cl.float()
    output = linear.forward(a)
    assert str(type(output)) == "<class 'PyTorch._FloatTensor'>"
    assert output.dims() == 2
    assert output.size()[0] == 4
    assert output.size()[1] == 5
    print("a.size()", a.size())
    print("a_cl.size()", a_cl.size())
    assert a[1][0] == a_cl[1][0]
    assert a[2][1] == a_cl[2][1]

    mlp = nn.Sequential()
    mlp.add(nn.SpatialConvolutionMM(1, 16, 5, 5, 1, 1, 2, 2))
    mlp.add(nn.ReLU())
    mlp.add(nn.SpatialMaxPooling(3, 3, 3, 3))
    mlp.add(nn.SpatialConvolutionMM(16, 32, 5, 5, 1, 1, 2, 2))
    mlp.add(nn.ReLU())
    mlp.add(nn.SpatialMaxPooling(2, 2, 2, 2))
    mlp.add(nn.Reshape(32 * 4 * 4))
    mlp.add(nn.Linear(32 * 4 * 4, 150))
    mlp.add(nn.Tanh())
    mlp.add(nn.Linear(150, 10))
    mlp.add(nn.LogSoftMax())
    mlp.float()

    mlp_cl = mlp.clone().cl()

    print("mlp_cl", mlp_cl)
    # myeval('mlp.output')
    input = PyTorch.FloatTensor(128, 1, 28, 28).uniform()
    input_cl = PyClTorch.FloatTensorToClTensor(input.clone())  # This is a bit hacky...

    output = mlp.forward(input)
    # myeval('input[0]')
    output_cl = mlp_cl.forward(input_cl)
    # myeval('output[0]')

    assert (output_cl.float() - output).abs().max() < 1e-4
Esempio n. 44
0
def test_pytorchFloat():
    PyTorch.manualSeed(123)
    numpy.random.seed(123)

    FloatTensor = PyTorch.FloatTensor

    
    A = numpy.random.rand(6).reshape(3,2).astype(numpy.float32)
    B = numpy.random.rand(8).reshape(2,4).astype(numpy.float32)

    C = A.dot(B)
    print('C', C)

    print('calling .asTensor...')
    tensorA = PyTorch.asFloatTensor(A)
    tensorB = PyTorch.asFloatTensor(B)
    print(' ... asTensor called')

    print('tensorA', tensorA)

    tensorA.set2d(1, 1, 56.4)
    tensorA.set2d(2, 0, 76.5)
    print('tensorA', tensorA)
    print('A', A)

    print('add 5 to tensorA')
    tensorA += 5
    print('tensorA', tensorA)
    print('A', A)

    print('add 7 to tensorA')
    tensorA2 = tensorA + 7
    print('tensorA2', tensorA2)
    print('tensorA', tensorA)

    tensorAB = tensorA * tensorB
    print('tensorAB', tensorAB)

    print('A.dot(B)', A.dot(B))

    print('tensorA[2]', tensorA[2])
    

    D = PyTorch.FloatTensor(5,3).fill(1)
    print('D', D)

    D[2][2] = 4
    print('D', D)

    D[3].fill(9)
    print('D', D)

    D.narrow(1,2,1).fill(0)
    print('D', D)

    
    print(PyTorch.FloatTensor(3,4).uniform())
    print(PyTorch.FloatTensor(3,4).normal())
    print(PyTorch.FloatTensor(3,4).cauchy())
    print(PyTorch.FloatTensor(3,4).exponential())
    print(PyTorch.FloatTensor(3,4).logNormal())
    
    print(PyTorch.FloatTensor(3,4).bernoulli())
    print(PyTorch.FloatTensor(3,4).geometric())
    print(PyTorch.FloatTensor(3,4).geometric())
    PyTorch.manualSeed(3)
    print(PyTorch.FloatTensor(3,4).geometric())
    PyTorch.manualSeed(3)
    print(PyTorch.FloatTensor(3,4).geometric())

    print(type(PyTorch.FloatTensor(2,3)))

    size = PyTorch.LongStorage(2)
    size[0] = 4
    size[1] = 3
    D.resize(size)
    print('D after resize:\n', D)

    print('resize1d', PyTorch.FloatTensor().resize1d(3).fill(1))
    print('resize2d', PyTorch.FloatTensor().resize2d(2, 3).fill(1))
    print('resize', PyTorch.FloatTensor().resize(size).fill(1))

    D = PyTorch.FloatTensor(size).geometric()

#    def myeval(expr):
#        print(expr, ':', eval(expr))

#    def myexec(expr):
#        print(expr)
#        exec(expr)

    myeval('FloatTensor(3,2).nElement()')
    myeval('FloatTensor().nElement()')
    myeval('FloatTensor(1).nElement()')

    A = FloatTensor(3,4).geometric(0.9)
    myeval('A')
    myexec('A += 3')
    myeval('A')
    myexec('A *= 3')
    myeval('A')
    
    myexec('A -= 3')
    
    myeval('A')
    myexec('A /= 3')
    myeval('A')

    myeval('A + 5')
    
    myeval('A - 5')
    
    myeval('A * 5')
    myeval('A / 2')

    B = FloatTensor().resizeAs(A).geometric(0.9)
    myeval('B')
    myeval('A + B')
    
    myeval('A - B')
    
    myexec('A += B')
    myeval('A')
    
    myexec('A -= B')
    myeval('A')
Esempio n. 45
0
def asDoubleTensor(myarray):
    return DoubleTensor(PyTorch._asDoubleTensor(myarray))
Esempio n. 46
0
def test_cltorch():
    if 'ALLOW_NON_GPUS' in os.environ:
        PyClTorch.setAllowNonGpus(True)

    # a = PyTorch.foo(3,2)
    # print('a', a)
    # print(PyTorch.FloatTensor(3,2))

    a = PyClTorch.ClTensor([3, 4, 9])
    assert a[0] == 3
    assert a[1] == 4
    assert a[2] == 9
    print('a', a)

    a = PyClTorch.ClTensor([[3, 5, 7], [9, 2, 4]])
    print('a', a)
    print('a[0]', a[0])
    print('a[0][0]', a[0][0])
    assert a[0][0] == 3
    assert a[1][0] == 9
    assert a[1][2] == 4

    PyTorch.manualSeed(123)
    a = PyTorch.FloatTensor(4, 3).uniform()
    print('a', a)
    a_cl = a.cl()
    print(type(a_cl))
    assert str(type(a_cl)) == '<class \'PyClTorch.ClTensor\'>'
    print('a_cl[0]', a_cl[0])
    print('a_cl[0][0]', a_cl[0][0])
    assert a[0][0] == a_cl[0][0]
    assert a[0][1] == a_cl[0][1]
    assert a[1][1] == a_cl[1][1]

    print('a.dims()', a.dims())
    print('a.size()', a.size())
    print('a', a)
    assert a.dims() == 2
    assert a.size()[0] == 4
    assert a.size()[1] == 3

    a_sum = a.sum()
    a_cl_sum = a_cl.sum()
    assert abs(a_sum - a_cl_sum) < 1e-4
    a_cl2 = a_cl + 3.2
    assert abs(a_cl2[1][0] - a[1][0] - 3.2) < 1e-4

    b = PyClTorch.ClTensor()
    print('got b')
    myeval('b')
    assert b.dims() == -1
    b.resizeAs(a)
    myeval('b')
    assert b.dims() == 2
    assert b.size()[0] == 4
    assert b.size()[1] == 3
    print('run uniform')
    b.uniform()
    myeval('b')

    print('create new b')
    b = PyClTorch.ClTensor()
    print('b.dims()', b.dims())
    print('b.size()', b.size())
    print('b', b)

    c = PyTorch.FloatTensor().cl()
    print('c.dims()', c.dims())
    print('c.size()', c.size())
    print('c', c)
    assert b.dims() == -1
    assert b.size() is None

    print('creating Linear...')
    linear = nn.Linear(3, 5).float()
    print('created linear')
    print('linear:', linear)
    myeval('linear.output')
    myeval('linear.output.dims()')
    myeval('linear.output.size()')
    myeval('linear.output.nElement()')

    linear_cl = linear.clone().cl()
    print('type(linear.output)', type(linear.output))
    print('type(linear_cl.output)', type(linear_cl.output))
    assert str(type(linear.output)) == '<class \'PyTorch._FloatTensor\'>'
    assert str(type(linear_cl.output)) == '<class \'PyClTorch.ClTensor\'>'
    # myeval('type(linear)')
    # myeval('type(linear.output)')
    myeval('linear_cl.output.dims()')
    myeval('linear_cl.output.size()')
    # myeval('linear.output')
    assert str(type(linear)) == '<class \'PyTorchAug.Linear\'>'
    assert str(type(linear_cl)) == '<class \'PyTorchAug.Linear\'>'
    # assert str(type(linear.output)) == '<class \'PyClTorch.ClTensor\'>'
    # assert linear.output.dims() == -1  # why is this 0? should be -1???
    # assert linear.output.size() is None  # again, should be None?

    a_cl = PyClTorch.ClTensor(4, 3).uniform()
    # print('a_cl', a_cl)
    output_cl = linear_cl.forward(a_cl)
    # print('output', output)
    assert str(type(output_cl)) == '<class \'PyClTorch.ClTensor\'>'
    assert output_cl.dims() == 2
    assert output_cl.size()[0] == 4
    assert output_cl.size()[1] == 5

    a = a_cl.float()
    output = linear.forward(a)
    assert str(type(output)) == '<class \'PyTorch._FloatTensor\'>'
    assert output.dims() == 2
    assert output.size()[0] == 4
    assert output.size()[1] == 5
    print('a.size()', a.size())
    print('a_cl.size()', a_cl.size())
    assert a[1][0] == a_cl[1][0]
    assert a[2][1] == a_cl[2][1]

    mlp = nn.Sequential()
    mlp.add(nn.SpatialConvolutionMM(1, 16, 5, 5, 1, 1, 2, 2))
    mlp.add(nn.ReLU())
    mlp.add(nn.SpatialMaxPooling(3, 3, 3, 3))
    mlp.add(nn.SpatialConvolutionMM(16, 32, 5, 5, 1, 1, 2, 2))
    mlp.add(nn.ReLU())
    mlp.add(nn.SpatialMaxPooling(2, 2, 2, 2))
    mlp.add(nn.Reshape(32 * 4 * 4))
    mlp.add(nn.Linear(32 * 4 * 4, 150))
    mlp.add(nn.Tanh())
    mlp.add(nn.Linear(150, 10))
    mlp.add(nn.LogSoftMax())
    mlp.float()

    mlp_cl = mlp.clone().cl()

    print('mlp_cl', mlp_cl)
    # myeval('mlp.output')
    input = PyTorch.FloatTensor(128, 1, 28, 28).uniform()
    input_cl = PyClTorch.FloatTensorToClTensor(
        input.clone())  # This is a bit hacky...

    output = mlp.forward(input)
    # myeval('input[0]')
    output_cl = mlp_cl.forward(input_cl)
    # myeval('output[0]')

    assert (output_cl.float() - output).abs().max() < 1e-4
Esempio n. 47
0
def asFloatTensor(myarray):
    f1 = PyTorch._asFloatTensor(myarray)
#    print('type(f1)', type(f1))
    return FloatTensor(f1)
Esempio n. 48
0
def asByteTensor(myarray):
    f1 = PyTorch._asByteTensor(myarray)
#    print('type(f1)', type(f1))
    return ByteTensor(f1)
Esempio n. 49
0
import numpy as np

Luabit = PyTorchHelpers.load_lua_class('luabit.lua', 'Luabit')

batchSize = 2
numFrames = 4
inSize = 3
outSize = 3
kernelSize = 3

luabit = Luabit('green')
print(luabit.getName())
print('type(luabit)', type(luabit))

inTensor = np.random.randn(batchSize, numFrames, inSize).astype('float32')
luain = PyTorch.asFloatTensor(inTensor)

luaout = luabit.getOut(luain, outSize, kernelSize)

outTensor = luaout.asNumpyTensor()
print('outTensor', outTensor)

res = luabit.printTable(
    {
        'color': 'red',
        'weather': 'sunny',
        'anumber': 10,
        'afloat': 1.234
    }, 'mistletoe', {
        'row1': 'col1',
        'meta': 'data'
Esempio n. 50
0
from __future__ import print_function
import PyTorch
import array
import numpy
import sys

A = numpy.random.rand(6).reshape(2,3).astype(numpy.float32)

tensorA = PyTorch.asTensor(A)

nn = PyTorch.Nn()
linear = nn.Linear(3, 8)
output = linear.updateOutput(tensorA)
print('output', output)
print('weight', linear.weight)

#dataset = nn.Dataset()

#criterion = nn.MSECriterion()
#trainer = nn.StochasticGradient(linear, criterion)

sys.path.append('thirdparty/python-mnist')
from mnist import MNIST

mlp = nn.Sequential()
mlp.add(nn.Linear(784, 10))
mlp.add(nn.LogSoftMax())

criterion = nn.ClassNLLCriterion()

learningRate = 0.0001
Esempio n. 51
0
def test_pynn():
    PyTorch.manualSeed(123)
    linear = Linear(3, 5)
    linear
    print('linear', linear)
    print('linear.weight', linear.weight)
    print('linear.output', linear.output)
    print('linear.gradInput', linear.gradInput)

    input = PyTorch.DoubleTensor(4, 3).uniform()
    print('input', input)
    output = linear.updateOutput(input)
    print('output', output)

    gradInput = linear.updateGradInput(input, output)
    print('gradInput', gradInput)

    criterion = ClassNLLCriterion()
    print('criterion', criterion)

    print('dir(linear)', dir(linear))

    mlp = Sequential()
    mlp.add(linear)

    output = mlp.forward(input)
    print('output', output)

    import sys
    sys.path.append('thirdparty/python-mnist')
    from mnist import MNIST
    import numpy
    import array

    numpy.random.seed(123)

    mlp = Sequential()
    linear = Linear(784, 10)
    mlp.add(linear)
    logSoftMax = LogSoftMax()
    mlp.add(logSoftMax)
    mlp

    criterion = ClassNLLCriterion()
    print('got criterion')

    learningRate = 0.0001

    mndata = MNIST('/norep/data/mnist')
    imagesList, labelsB = mndata.load_training()
    images = numpy.array(imagesList).astype(numpy.float64)
    #print('imagesArray', images.shape)

    #print(images[0].shape)

    labelsf = array.array('d', labelsB.tolist())
    imagesTensor = PyTorch.asDoubleTensor(images)

    #imagesTensor = PyTorch.FloatTensor(100,784)
    #labels = numpy.array(20,).astype(numpy.int32)
    #labelsTensor = PyTorch.FloatTensor(100).fill(1)
    #print('labels', labels)
    #print(imagesTensor.size())

    def printStorageAddr(name, tensor):
        print('printStorageAddr START')
        storage = tensor.storage()
        if storage is None:
            print(name, 'storage is None')
        else:
            print(name, 'storage is ', hex(storage.dataAddr()))
        print('printStorageAddr END')

    labelsTensor = PyTorch.asDoubleTensor(labelsf)
    labelsTensor += 1
    #print('calling size on imagestensor...')
    #print('   (called size)')

    desiredN = 128
    maxN = int(imagesTensor.size()[0])
    desiredN = min(maxN, desiredN)
    imagesTensor = imagesTensor.narrow(0, 0, desiredN)
    labelsTensor = labelsTensor.narrow(0, 0, desiredN)
    print('imagesTensor.size()', imagesTensor.size())
    print('labelsTensor.size()', labelsTensor.size())
    N = int(imagesTensor.size()[0])
    print('type(imagesTensor)', type(imagesTensor))

    print('start training...')
    for epoch in range(4):
        numRight = 0
        for n in range(N):
    #        print('n', n)
            input = imagesTensor[n]
            label = labelsTensor[n]
            labelTensor = PyTorch.DoubleTensor(1)
            labelTensor[0] = label
    #        print('label', label)
            output = mlp.forward(input)
            prediction = PyTorch.getDoublePrediction(output)
    #        print('prediction', prediction)
            if prediction == label:
                numRight += 1
            criterion.forward(output, labelTensor)
            mlp.zeroGradParameters()
            gradOutput = criterion.backward(output, labelTensor)
            mlp.backward(input, gradOutput)
            mlp.updateParameters(learningRate)
    #        PyTorch.collectgarbage()
    #        if n % 100 == 0:
    #            print('n=', n)
        print('epoch ' + str(epoch) + ' accuracy: ' + str(numRight * 100.0 / N) + '%')
Esempio n. 52
0
def test_pycudann():
#    PyTorch.manualSeed(123)
    linear = Linear(3, 5).cuda()
    print('linear', linear)
    print('linear.weight', linear.weight)
    print('linear.output', linear.output)
    print('linear.gradInput', linear.gradInput)

    input = PyTorch.DoubleTensor(4, 3).uniform().cuda()
    print('input', input)
    output = linear.updateOutput(input)
    print('output', output)

    gradInput = linear.updateGradInput(input, output)
    print('gradInput', gradInput)

    criterion = ClassNLLCriterion().cuda()
    print('criterion', criterion)

    print('dir(linear)', dir(linear))

    mlp = Sequential()
    mlp.add(linear)
    mlp.cuda()

    output = mlp.forward(input)
    print('output', output)

    import sys
    sys.path.append('../pytorch/thirdparty/python-mnist')
    from mnist import MNIST
    import numpy
    import array

#    numpy.random.seed(123)

    mlp = Sequential()

    mlp.add(SpatialConvolutionMM(1, 16, 5, 5, 1, 1, 2, 2))
    mlp.add(ReLU())
    mlp.add(SpatialMaxPooling(3, 3, 3, 3))

    mlp.add(SpatialConvolutionMM(16, 32, 3, 3, 1, 1, 1, 1))
    mlp.add(ReLU())
    mlp.add(SpatialMaxPooling(2, 2, 2, 2))

    mlp.add(Reshape(32 * 4 * 4))
    mlp.add(Linear(32 * 4 * 4, 150))
    mlp.add(Tanh())
    mlp.add(Linear(150, 10))
    mlp.add(LogSoftMax())
    mlp.cuda()

    criterion = ClassNLLCriterion().cuda()
    print('got criterion')

    learningRate = 0.02

    mndata = MNIST('/norep/data/mnist')
    imagesList, labelsB = mndata.load_training()
    images = numpy.array(imagesList).astype(numpy.float64)
    #print('imagesArray', images.shape)

    #print(images[0].shape)

    labelsf = array.array('d', labelsB.tolist())
    imagesTensor = PyTorch.asDoubleTensor(images).cuda()

    #imagesTensor = PyTorch.FloatTensor(100,784)
    #labels = numpy.array(20,).astype(numpy.int32)
    #labelsTensor = PyTorch.FloatTensor(100).fill(1)
    #print('labels', labels)
    #print(imagesTensor.size())

    def printStorageAddr(name, tensor):
        print('printStorageAddr START')
        storage = tensor.storage()
        if storage is None:
            print(name, 'storage is None')
        else:
            print(name, 'storage is ', hex(storage.dataAddr()))
        print('printStorageAddr END')

    labelsTensor = PyTorch.asDoubleTensor(labelsf).cuda()
    labelsTensor += 1
    #print('calling size on imagestensor...')
    #print('   (called size)')

    desiredN = 1280
    maxN = int(imagesTensor.size()[0])
    desiredN = min(maxN, desiredN)
    imagesTensor = imagesTensor.narrow(0, 0, desiredN)
    labelsTensor = labelsTensor.narrow(0, 0, desiredN)
    print('imagesTensor.size()', imagesTensor.size())
    print('labelsTensor.size()', labelsTensor.size())
    N = int(imagesTensor.size()[0])
    print('type(imagesTensor)', type(imagesTensor))
    size = PyTorch.LongStorage(4)
    size[0] = N
    size[1] = 1
    size[2] = 28
    size[3] = 28
    imagesTensor.resize(size)
    imagesTensor /= 255.0
    imagesTensor -= 0.2
    print('imagesTensor.size()', imagesTensor.size())

    print('start training...')
    for epoch in range(12):
        numRight = 0
        for n in range(N):
    #        print('n', n)
            input = imagesTensor[n]
            label = labelsTensor[n]
            labelTensor = PyTorch.DoubleTensor(1).cuda()
            labelTensor[0] = label
    #        print('label', label)
            output = mlp.forward(input)
            prediction = PyCudaTorch.getPrediction(output)
    #        print('prediction', prediction)
            if prediction == label:
                numRight += 1
            criterion.forward(output, labelTensor)
            mlp.zeroGradParameters()
            gradOutput = criterion.backward(output, labelTensor)
            mlp.backward(input, gradOutput)
            mlp.updateParameters(learningRate)
    #        PyTorch.collectgarbage()
    #        if n % 100 == 0:
    #            print('n=', n)
        print('epoch ' + str(epoch) + ' accuracy: ' + str(numRight * 100.0 / N) + '%')
Esempio n. 53
0
from __future__ import print_function, division
import PyTorch

from PyTorchAug import nn

#net = nn.Minus()
inputTensor = PyTorch.DoubleTensor(2, 3).uniform()
print('inputTensor', inputTensor)

PyTorch.require('nnx')
net = nn.Minus()
print(net.forward(inputTensor))
Esempio n. 54
0
def asDoubleTensor(myarray):
    return DoubleTensor(PyTorch._asDoubleTensor(myarray))
Esempio n. 55
0
from __future__ import print_function
import PyTorch

PyTorch.require('rnn')

from PyTorchAug import nn

lstm = nn.LSTM(3,4)
print('lstm', lstm)