def injectFaultConv2D(a, b): "Function to call injectFault on Conv2D" logging.debug("Calling Operator conv2D " + getArgs(a, b)) # FIXME: Make this work with any type, not just float32 res = np.float32( conv2d.conv2d(a, b) ) res = condPerturb(Ops.CONV2D, res) if logReturn: logging.debug("\tReturning from Conv2D " + str(res) ) return res
def test(x, w, pad='SAME', stride=(1, 1)): y = conv2d(x, w, pad=pad, stride=stride).ravel() xx = tf.constant(x, dtype='float32') ww = tf.constant(w, dtype='float32') yy = tf.nn.conv2d(xx, ww, strides=[1, stride[0], stride[1], 1], padding=pad) y_tf = yy.numpy() np.testing.assert_almost_equal(y, y_tf.ravel(), decimal=3)
def test_gradx(x, w, pad='SAME', stride=(1, 1)): # [N, H, W, K] y = conv2d(x, w, pad=pad, stride=stride) dy = np.random.rand(*y.shape) dx = conv2d_gradx(w, dy, x.shape[1:3], pad=pad, stride=stride) # Tensorflow checks xx = tf.constant(x, dtype='float32') ww = tf.constant(w, dtype='float32') dyy = tf.constant(dy, dtype='float32') yy = tf.nn.conv2d(xx, ww, strides=[1, stride[0], stride[1], 1], padding=pad) dww = tf.squeeze(tf.gradients(yy, xx, dyy), [0]) with tf.Session() as sess: dx_tf = dww.eval() np.testing.assert_almost_equal(dx.ravel(), dx_tf.ravel(), decimal=3)
def test_gradx(x, w, pad='SAME', stride=(1, 1)): # [N, H, W, K] y = conv2d(x, w, pad=pad, stride=stride) dy = np.random.rand(*y.shape) dx = conv2d_gradx(w, dy, x.shape[1:3], pad=pad, stride=stride) # Tensorflow checks with tf.GradientTape() as tape: xx = tf.constant(x, dtype='float32') tape.watch(xx) ww = tf.constant(w, dtype='float32') dyy = tf.constant(dy, dtype='float32') yy = tf.nn.conv2d(xx, ww, strides=[1, stride[0], stride[1], 1], padding=pad) dww = tape.gradient(yy, xx, output_gradients=dyy) dx_tf = dww.numpy() np.testing.assert_almost_equal(dx.ravel(), dx_tf.ravel(), decimal=3)