def test_relu(self): top = hm.zeros((4, 12, 15, 15)) bottom = hm.random((4, 12, 15, 15), _range=(-1, 1)) @compose def fn(bottom, top): top = ReluForward(bottom) return top fn(bottom, top) top.sync_host() expected = np.copy(bottom) expected[expected < 0] = 0 self._check(top, expected) top_diff = hm.random(top.shape) bottom_diff = hmarray(top.shape) @compose def fn(top_diff, bottom, bottom_diff): bottom_diff = ReluBackward(bottom, top_diff) return bottom_diff fn(top_diff, bottom, bottom_diff) bottom_diff.sync_host() expected = np.copy(top_diff) expected[bottom < 0] = 0 self._check(bottom_diff, expected)
def visit_Assign(self, node): node.value = self.visit(node.value) if not isinstance(node.targets[0], ast.Name) or node.targets[0].id in self.symbol_table: return node if isinstance(node.value, ast.BinOp) and len(node.targets) == 1: if isinstance(node.value, ast.Call): # TODO: Operations should specify an output generator self.symbol_table[node.targets[0].id] = hm.zeros( self.symbol_table[node.value.args[0].id].shape) else: if isinstance(node.value, ast.Call) and \ inspect.isclass(self.symbol_table[node.value.func.id]) and \ issubclass(self.symbol_table[node.value.func.id], HMOperation): # TODO: Operations should specify an output generator self.symbol_table[node.targets[0].id] = hm.zeros( self.symbol_table[node.value.args[0].id].shape) return node
def test_pool(self): shape = (3, 16, 24, 24) a = hm.random(shape, _range=(0, 255)) actual_mask = hmarray((3, 16, 12, 12)) actual = hmarray((3, 16, 12, 12)) expected_mask = hmarray((3, 16, 12, 12)) expected = hmarray((3, 16, 12, 12)) expected.fill(float('-inf')) @compose def fn(bottom, mask, top): top, mask = PoolForward(bottom, kernel_size=(2, 2), padding=(0, 0), stride=(2, 2)) return top, mask fn(a, actual_mask, actual) actual.sync_host() actual_mask.sync_host() reference_pool(a, expected, expected_mask, (2, 2), (2, 2), (0, 0)) self._check(actual, expected) self._check(actual_mask, expected_mask) bottom_diff = hm.zeros(shape) expected_bottom_diff = hm.zeros(shape) mask = actual_mask top_diff = hm.random((3, 16, 12, 12)) @compose def fn(top_diff, mask, bottom_diff): bottom_diff = PoolBackward(top_diff, mask, kernel_size=(2, 2), padding=(0, 0), stride=(2, 2)) return bottom_diff fn(top_diff, mask, bottom_diff) bottom_diff.sync_host() reference_pool_backward(top_diff, mask, expected_bottom_diff, (2, 2), (2, 2), (0, 0)) self._check(bottom_diff, expected_bottom_diff)
def test_conv_backward(self): # TODO: Check bias diff @compose def fn(top_diff, weights, bottom, bottom_diff, weights_diff, bias_diff): bottom_diff, weights_diff, bias_diff = \ ConvBackward(bottom, top_diff, weights, kernel_size=(11, 11), padding=(0, 0), stride=(4, 4)) return bottom_diff, weights_diff, bias_diff top_diff = hm.random((3, 12, 25), _range=(0, 5)) bottom = hm.random((3, 3, 27, 27), _range=(0, 255)) weights = hm.random((12, 363), _range=(-.2, .2)) weights_diff = hm.zeros((12, 363)) bias_diff = hm.zeros((12, )) bottom_diff = hm.zeros((3, 3, 27, 27)) fn(top_diff, weights, bottom, bottom_diff, weights_diff, bias_diff) expected_weights_diff = np.zeros_like(weights) expected_bottom_diff = np.zeros_like(bottom_diff) for i in range(top_diff.shape[0]): col_data = reference_im2col(bottom[i], (11, 11), (4, 4), (0, 0)) expected_weights_diff += top_diff[i].dot(col_data.T) expected_bottom_diff[i] = reference_col2im( weights.T.dot(top_diff[i]), (11, 11), (4, 4), (0, 0), expected_bottom_diff[i].shape) weights_diff.sync_host() np.testing.assert_array_almost_equal(weights_diff, expected_weights_diff, decimal=2) bottom_diff.sync_host() np.testing.assert_array_almost_equal(bottom_diff, expected_bottom_diff, decimal=2)
def test_forward(self): @compose def fn(a, b, c, d): d = ConcatForward(a, b, c) return d a = hm.random((16, 12, 55, 55)) b = hm.random((16, 12, 55, 55)) c = hm.random((16, 12, 55, 55)) d = hm.zeros((16, 36, 55, 55)) d = fn(a, b, c, d) d.sync_host() np.testing.assert_array_almost_equal(d[:16, 0:12, ...], a) np.testing.assert_array_almost_equal(d[:16, 12:24, ...], b) np.testing.assert_array_almost_equal(d[:16, 24:36, ...], c)
def test_conv_forward(self): @compose def fn(a, weights, out, bias): out = ConvForward(a, weights, bias, kernel_size=(11, 11), padding=(0, 0), stride=(4, 4)) return out a = hm.random((3, 3, 27, 27), _range=(0, 255)) weights = hm.random((12, 363), _range=(-.2, .2)) out = hm.zeros((3, 12, 5, 5)) bias = hm.random((12, )) fn(a, weights, out, bias) weights = weights.reshape(12, 3, 11, 11) expected = np.zeros((3, 12, 5, 5), np.float32) reference_conv(a, weights, bias, expected, (4, 4), (0, 0)) out.sync_host() np.testing.assert_array_almost_equal(out, expected, decimal=1)
from hindemith.core import compose import caffe import numpy as np import time prototxt = "models/alexnet-ng/deploy.prototxt" caffemodel = "models/alexnet-ng/alexnet-ng.caffemodel" # caffe.set_mode_gpu() # caffe.set_device(2) # caffe.set_mode_cpu() caffe_net = caffe.Net(prototxt, caffemodel, caffe.TEST) conv1_filters = caffe_net.params['conv1'][0].data.view(hmarray) conv1_bias = caffe_net.params['conv1'][1].data.view(hmarray) conv1 = hm.zeros(caffe_net.blobs['conv1'].data.shape) norm1 = hm.zeros(caffe_net.blobs['norm1'].data.shape) norm1_scale = hm.zeros(norm1.shape) pool1 = hm.zeros(caffe_net.blobs['pool1'].data.shape) pool1_mask = hm.zeros(pool1.shape) conv2_filters = caffe_net.params['conv2'][0].data.view(hmarray) conv2_bias = caffe_net.params['conv2'][1].data.view(hmarray) conv2 = hm.zeros(caffe_net.blobs['conv2'].data.shape) norm2 = hm.zeros(caffe_net.blobs['norm2'].data.shape) norm2_scale = hm.zeros(norm2.shape) pool2 = hm.zeros(caffe_net.blobs['pool2'].data.shape)
import pycl as cl import caffe import numpy as np import time prototxt = "benchmarks/alexnet.prototxt" caffemodel = "models/alexnet-ng/alexnet-ng.caffemodel" caffe.set_mode_gpu() caffe.set_device(2) # caffe.set_mode_cpu() caffe_net = caffe.Net(prototxt, caffemodel, caffe.TEST) conv1_filters = caffe_net.params['conv1'][0].data.view(hmarray) conv1_bias = caffe_net.params['conv1'][1].data.view(hmarray) conv1 = hm.zeros(caffe_net.blobs['conv1'].data.shape) norm1 = hm.zeros(caffe_net.blobs['norm1'].data.shape) norm1_scale = hm.zeros(norm1.shape) pool1 = hm.zeros(caffe_net.blobs['pool1'].data.shape) pool1_mask = hm.zeros(pool1.shape) conv2_filters = caffe_net.params['conv2'][0].data.view(hmarray) conv2_bias = caffe_net.params['conv2'][1].data.view(hmarray) conv2 = hm.zeros(caffe_net.blobs['conv2'].data.shape) norm2 = hm.zeros(caffe_net.blobs['norm2'].data.shape) norm2_scale = hm.zeros(norm2.shape) pool2 = hm.zeros(caffe_net.blobs['pool2'].data.shape)
import pycl as cl import caffe import numpy as np import time prototxt = "benchmarks/alexnet.prototxt" caffemodel = "models/alexnet-ng/alexnet-ng.caffemodel" caffe.set_mode_gpu() caffe.set_device(2) # caffe.set_mode_cpu() caffe_net = caffe.Net(prototxt, caffemodel, caffe.TEST) conv1_filters = caffe_net.params["conv1"][0].data.view(hmarray) conv1_bias = caffe_net.params["conv1"][1].data.view(hmarray) conv1 = hm.zeros(caffe_net.blobs["conv1"].data.shape) norm1 = hm.zeros(caffe_net.blobs["norm1"].data.shape) norm1_scale = hm.zeros(norm1.shape) pool1 = hm.zeros(caffe_net.blobs["pool1"].data.shape) pool1_mask = hm.zeros(pool1.shape) conv2_filters = caffe_net.params["conv2"][0].data.view(hmarray) conv2_bias = caffe_net.params["conv2"][1].data.view(hmarray) conv2 = hm.zeros(caffe_net.blobs["conv2"].data.shape) norm2 = hm.zeros(caffe_net.blobs["norm2"].data.shape) norm2_scale = hm.zeros(norm2.shape) pool2 = hm.zeros(caffe_net.blobs["pool2"].data.shape)