def test_float_tensor(): PyTorch.manualSeed(123) print('dir(G)', dir()) print('test_float_tensor') a = PyTorch.FloatTensor(3, 2) print('got float a') myeval('a.dims()') a.uniform() myeval('a') myexec('a[1][1] = 9') myeval('a') myeval('a.size()')
tensorA.set2d(2, 0, 76.5) print('tensorA', tensorA) print('A', A) tensorA += 5 print('tensorA', tensorA) print('A', A) tensorA2 = tensorA + 7 print('tensorA2', tensorA2) print('tensorA', tensorA) tensorAB = tensorA * tensorB print('tensorAB', tensorAB) print('A.dot(B)', A.dot(B)) print('tensorA[2]', tensorA[2]) D = PyTorch.FloatTensor(5, 3).fill(1) print('D', D) D[2][2] = 4 print('D', D) D[3].fill(9) print('D', D) D.narrow(1, 2, 1).fill(0) print('D', D)
# PyClTorch.newfunction(123) import PyTorch from PyTorchAug import * def myeval(expr): print(expr, ':', eval(expr)) if __name__ == '__main__': # a = PyTorch.foo(3,2) # print('a', a) # print(PyTorch.FloatTensor(3,2)) a = PyTorch.FloatTensor(4, 3).uniform() print('a', a) a = a.cl() print(type(a)) print('a.dims()', a.dims()) print('a.size()', a.size()) print('a', a) print('sum:', a.sum()) myeval('a + 1') b = PyClTorch.ClTensor() print('got b') myeval('b') b.resizeAs(a) myeval('b')
def test_cltorch(): if 'ALLOW_NON_GPUS' in os.environ: PyClTorch.setAllowNonGpus(True) # a = PyTorch.foo(3,2) # print('a', a) # print(PyTorch.FloatTensor(3,2)) a = PyClTorch.ClTensor([3, 4, 9]) assert a[0] == 3 assert a[1] == 4 assert a[2] == 9 print('a', a) a = PyClTorch.ClTensor([[3, 5, 7], [9, 2, 4]]) print('a', a) print('a[0]', a[0]) print('a[0][0]', a[0][0]) assert a[0][0] == 3 assert a[1][0] == 9 assert a[1][2] == 4 PyTorch.manualSeed(123) a = PyTorch.FloatTensor(4, 3).uniform() print('a', a) a_cl = a.cl() print(type(a_cl)) assert str(type(a_cl)) == '<class \'PyClTorch.ClTensor\'>' print('a_cl[0]', a_cl[0]) print('a_cl[0][0]', a_cl[0][0]) assert a[0][0] == a_cl[0][0] assert a[0][1] == a_cl[0][1] assert a[1][1] == a_cl[1][1] print('a.dims()', a.dims()) print('a.size()', a.size()) print('a', a) assert a.dims() == 2 assert a.size()[0] == 4 assert a.size()[1] == 3 a_sum = a.sum() a_cl_sum = a_cl.sum() assert abs(a_sum - a_cl_sum) < 1e-4 a_cl2 = a_cl + 3.2 assert abs(a_cl2[1][0] - a[1][0] - 3.2) < 1e-4 b = PyClTorch.ClTensor() print('got b') myeval('b') assert b.dims() == -1 b.resizeAs(a) myeval('b') assert b.dims() == 2 assert b.size()[0] == 4 assert b.size()[1] == 3 print('run uniform') b.uniform() myeval('b') print('create new b') b = PyClTorch.ClTensor() print('b.dims()', b.dims()) print('b.size()', b.size()) print('b', b) c = PyTorch.FloatTensor().cl() print('c.dims()', c.dims()) print('c.size()', c.size()) print('c', c) assert b.dims() == -1 assert b.size() is None print('creating Linear...') linear = nn.Linear(3, 5).float() print('created linear') print('linear:', linear) myeval('linear.output') myeval('linear.output.dims()') myeval('linear.output.size()') myeval('linear.output.nElement()') linear_cl = linear.clone().cl() print('type(linear.output)', type(linear.output)) print('type(linear_cl.output)', type(linear_cl.output)) assert str(type(linear.output)) == '<class \'PyTorch._FloatTensor\'>' assert str(type(linear_cl.output)) == '<class \'PyClTorch.ClTensor\'>' # myeval('type(linear)') # myeval('type(linear.output)') myeval('linear_cl.output.dims()') myeval('linear_cl.output.size()') # myeval('linear.output') assert str(type(linear)) == '<class \'PyTorchAug.Linear\'>' assert str(type(linear_cl)) == '<class \'PyTorchAug.Linear\'>' # assert str(type(linear.output)) == '<class \'PyClTorch.ClTensor\'>' # assert linear.output.dims() == -1 # why is this 0? should be -1??? # assert linear.output.size() is None # again, should be None? a_cl = PyClTorch.ClTensor(4, 3).uniform() # print('a_cl', a_cl) output_cl = linear_cl.forward(a_cl) # print('output', output) assert str(type(output_cl)) == '<class \'PyClTorch.ClTensor\'>' assert output_cl.dims() == 2 assert output_cl.size()[0] == 4 assert output_cl.size()[1] == 5 a = a_cl.float() output = linear.forward(a) assert str(type(output)) == '<class \'PyTorch._FloatTensor\'>' assert output.dims() == 2 assert output.size()[0] == 4 assert output.size()[1] == 5 print('a.size()', a.size()) print('a_cl.size()', a_cl.size()) assert a[1][0] == a_cl[1][0] assert a[2][1] == a_cl[2][1] mlp = nn.Sequential() mlp.add(nn.SpatialConvolutionMM(1, 16, 5, 5, 1, 1, 2, 2)) mlp.add(nn.ReLU()) mlp.add(nn.SpatialMaxPooling(3, 3, 3, 3)) mlp.add(nn.SpatialConvolutionMM(16, 32, 5, 5, 1, 1, 2, 2)) mlp.add(nn.ReLU()) mlp.add(nn.SpatialMaxPooling(2, 2, 2, 2)) mlp.add(nn.Reshape(32 * 4 * 4)) mlp.add(nn.Linear(32 * 4 * 4, 150)) mlp.add(nn.Tanh()) mlp.add(nn.Linear(150, 10)) mlp.add(nn.LogSoftMax()) mlp.float() mlp_cl = mlp.clone().cl() print('mlp_cl', mlp_cl) # myeval('mlp.output') input = PyTorch.FloatTensor(128, 1, 28, 28).uniform() input_cl = PyClTorch.FloatTensorToClTensor( input.clone()) # This is a bit hacky... output = mlp.forward(input) # myeval('input[0]') output_cl = mlp_cl.forward(input_cl) # myeval('output[0]') assert (output_cl.float() - output).abs().max() < 1e-4
def test_pytorchFloat(): PyTorch.manualSeed(123) numpy.random.seed(123) FloatTensor = PyTorch.FloatTensor A = numpy.random.rand(6).reshape(3, 2).astype(numpy.float32) B = numpy.random.rand(8).reshape(2, 4).astype(numpy.float32) C = A.dot(B) print('C', C) print('calling .asTensor...') tensorA = PyTorch.asFloatTensor(A) tensorB = PyTorch.asFloatTensor(B) print(' ... asTensor called') print('tensorA', tensorA) tensorA.set2d(1, 1, 56.4) tensorA.set2d(2, 0, 76.5) print('tensorA', tensorA) print('A', A) print('add 5 to tensorA') tensorA += 5 print('tensorA', tensorA) print('A', A) print('add 7 to tensorA') tensorA2 = tensorA + 7 print('tensorA2', tensorA2) print('tensorA', tensorA) tensorAB = tensorA * tensorB print('tensorAB', tensorAB) print('A.dot(B)', A.dot(B)) print('tensorA[2]', tensorA[2]) D = PyTorch.FloatTensor(5, 3).fill(1) print('D', D) D[2][2] = 4 print('D', D) D[3].fill(9) print('D', D) D.narrow(1, 2, 1).fill(0) print('D', D) print(PyTorch.FloatTensor(3, 4).uniform()) print(PyTorch.FloatTensor(3, 4).normal()) print(PyTorch.FloatTensor(3, 4).cauchy()) print(PyTorch.FloatTensor(3, 4).exponential()) print(PyTorch.FloatTensor(3, 4).logNormal()) print(PyTorch.FloatTensor(3, 4).bernoulli()) print(PyTorch.FloatTensor(3, 4).geometric()) print(PyTorch.FloatTensor(3, 4).geometric()) PyTorch.manualSeed(3) print(PyTorch.FloatTensor(3, 4).geometric()) PyTorch.manualSeed(3) print(PyTorch.FloatTensor(3, 4).geometric()) print(type(PyTorch.FloatTensor(2, 3))) size = PyTorch.LongStorage(2) size[0] = 4 size[1] = 3 D.resize(size) print('D after resize:\n', D) print('resize1d', PyTorch.FloatTensor().resize1d(3).fill(1)) print('resize2d', PyTorch.FloatTensor().resize2d(2, 3).fill(1)) print('resize', PyTorch.FloatTensor().resize(size).fill(1)) D = PyTorch.FloatTensor(size).geometric() # def myeval(expr): # print(expr, ':', eval(expr)) # def myexec(expr): # print(expr) # exec(expr) myeval('FloatTensor(3,2).nElement()') myeval('FloatTensor().nElement()') myeval('FloatTensor(1).nElement()') A = FloatTensor(3, 4).geometric(0.9) myeval('A') myexec('A += 3') myeval('A') myexec('A *= 3') myeval('A') myexec('A -= 3') myeval('A') print('A /= 3') A /= 3 myeval('A') myeval('A + 5') myeval('A - 5') myeval('A * 5') print('A / 2') A / 2 B = FloatTensor().resizeAs(A).geometric(0.9) myeval('B') myeval('A + B') myeval('A - B') myexec('A += B') myeval('A') myexec('A -= B') myeval('A')
def test_refcount(): D = PyTorch.FloatTensor(1000, 1000).fill(1) myeval('D.isContiguous()') myeval('D.refCount') assert D.refCount == 1 print('\nget storage into Ds') Ds = D.storage() myeval('D.refCount') myeval('Ds.refCount') assert D.refCount == 1 assert Ds.refCount == 2 print('\nget E') E = D.narrow(1, 100, 800) myeval('Ds.refCount') myeval('E.isContiguous()') myeval('D.refCount') myeval('E.refCount') assert Ds.refCount == 3 assert E.refCount == 1 assert D.refCount == 1 print('\nget Es') Es = E.storage() myeval('Ds.refCount') myeval('Es.refCount') myeval('E.isContiguous()') myeval('D.refCount') myeval('E.refCount') assert Es.refCount == 4 assert Ds.refCount == 4 assert E.refCount == 1 assert D.refCount == 1 print('\nget Ec') Ec = E.contiguous() myeval('Ds.refCount') myeval('Es.refCount') myeval('D.refCount') myeval('E.refCount') myeval('Ec.refCount') assert Es.refCount == 4 assert Ds.refCount == 4 assert E.refCount == 1 assert D.refCount == 1 assert Ec.refCount == 1 print('\nget Ecs') Ecs = Ec.storage() myeval('Ds.refCount') myeval('Es.refCount') myeval('D.refCount') myeval('E.refCount') myeval('Ec.refCount') myeval('Ecs.refCount') assert Es.refCount == 4 assert Ds.refCount == 4 assert E.refCount == 1 assert D.refCount == 1 assert Ec.refCount == 1 assert Ecs.refCount == 2 Dc = D.contiguous() print('\nafter creating Dc') myeval('D.refCount') myeval('Dc.refCount') myeval('Ds.refCount') assert D.refCount == 2 assert Dc.refCount == 2 assert Ds.refCount == 4 Dcs = Dc.storage() print('\n after get Dcs') assert D.refCount == 2 assert Dc.refCount == 2 myeval('Ds.refCount') myeval('Dcs.refCount') assert Ds.refCount == 5 assert Dcs.refCount == 5 D = None E = None Ec = None Dc = None gc.collect() print('\n after setting tensors to None') myeval('Ds.refCount') myeval('Es.refCount') myeval('Ecs.refCount') assert Dcs.refCount == 3 assert Ds.refCount == 3 assert Es.refCount == 3 assert Ecs.refCount == 1