def testWhenSideEffect(self): with self.session() as sess: def f_1(x): rand_num = 10 * random_ops.random_uniform(shape=[2, 2], minval=1, maxval=9, dtype=dtypes.int32, name="namef1") return rand_num * x def f_cond(x1, z): cond_1 = control_flow_ops.cond(math_ops.less(z[0], z[1]), lambda: f_1(x1), lambda: f_1(x1)) return cond_1 with ops.device('cpu'): x1 = array_ops.placeholder(dtypes.int32, [2, 2]) z = array_ops.placeholder(dtypes.int32, [2]) with ipu.scopes.ipu_scope("/device:IPU:0"): r1 = ipu.ipu_compiler.compile(f_cond, inputs=[x1, z]) i_x1 = np.full((2, 2), 10) i_z = np.full((2), 8) report = ReportJSON(self, sess) sess.run(r1, {x1: i_x1, z: i_z}) report.parse_log() report.assert_compute_sets_matches( '*namef1*', 2, "f1 should be on the list twice as it should not be cashed " "due to SideEffect.")
def testSimpleCaching(self): with self.session() as sess: def f_1(x): return math_ops.square(x, name="namef1") def f_cond(x1, z): cond_1 = control_flow_ops.cond(math_ops.less(z[0], z[1]), lambda: f_1(x1), lambda: f_1(x1)) return cond_1 with ops.device('cpu'): x1 = array_ops.placeholder(dtypes.int32, [2, 2]) z = array_ops.placeholder(dtypes.int32, [2]) with ipu.scopes.ipu_scope("/device:IPU:0"): r1 = ipu.ipu_compiler.compile(f_cond, inputs=[x1, z]) i_x1 = np.full((2, 2), 10) i_z = np.full((2), 8) report = ReportJSON(self, sess) sess.run(r1, {x1: i_x1, z: i_z}) report.parse_log() report.assert_compute_sets_matches( '*namef1*', 1, "There should be only one f_1 due to cash.")
def testGRUNotCached(self): with self.session() as sess: # Note here the second GRU is larger. pinputs1 = array_ops.placeholder(dataType, [seq_len, batch_size, input_size], name="inputs1") pinputs2 = array_ops.placeholder(dataType, [seq_len * 2, batch_size, input_size], name="inputs2") plabels = array_ops.placeholder(np.int32, [batch_size], name="labels") with ops.device("/device:IPU:0"): def gru_layer(inputs, name): initial_state = _get_variable( "initial_state", shape=[batch_size, num_channels], initializer=init_ops.constant_initializer(0.1, dataType)) return self._GRULayer(inputs=inputs, weights_value=1., initial_state=initial_state, training=True, name=name) with variable_scope.variable_scope("gru_layer1", use_resource=True): logits1 = gru_layer(pinputs1, "layer1") with variable_scope.variable_scope("gru_layer2", use_resource=True): logits2 = gru_layer(pinputs2, "layer2") logits = (math_ops.reduce_mean(logits1, axis=0) + math_ops.reduce_mean(logits2, axis=0)) softmax = nn.sparse_softmax_cross_entropy_with_logits_v2( logits=logits, labels=array_ops.stop_gradient(plabels)) loss = math_ops.reduce_mean(softmax) train = gradient_descent.GradientDescentOptimizer(0.01).minimize(loss) report = ReportJSON(self, sess) sess.run(variables.global_variables_initializer()) report.reset() sess.run( [loss, train], { pinputs1: _createGRUInput(0.5, batch_size, seq_len, input_size), pinputs2: _createGRUInput(1.5, batch_size, seq_len * 2, input_size), plabels: np.ones(shape=[batch_size], dtype=np.int32), }) report.parse_log() report.assert_compute_sets_matches( '*BasicGruCell/ProcessUnits/Weight/Conv*/Convolve', 4, "There should be four fwd GRUs") report.assert_compute_sets_matches('*/MulOGate/Op/Multiply', 2, "There should be two bwd GRUs")
def testSameFunctions(self): # f_1, f_2 are the same with self.session() as sess: def f_1(x): return math_ops.square(x, name="namef1") def f_2(x): return math_ops.square(x, name="namef2") def f_cond(x1): cond_1 = control_flow_ops.cond(math_ops.less(1, 0), lambda: f_1(x1), lambda: f_1(x1)) cond_2 = control_flow_ops.cond(math_ops.less(1, 0), lambda: f_2(x1), lambda: f_2(x1)) return cond_1 + cond_2 with ops.device('cpu'): x1 = array_ops.placeholder(dtypes.int32, [2, 2]) with ipu.scopes.ipu_scope("/device:IPU:0"): r1 = ipu.ipu_compiler.compile(f_cond, inputs=[x1]) i_x1 = np.full((2, 2), 10) report = ReportJSON(self, sess) sess.run(r1, {x1: i_x1}) report.parse_log() report.assert_compute_sets_matches( '*namef1*', 1, "There should be only one f_1 due to cash.") report.assert_compute_sets_matches( '*namef2*', 0, "There should not be f_2, as it is the same as f_1, due to cash." )