def testArcCosineFeatureMap(self, batch_size, input_dims, output_dims, s, scale): X = np.random.normal(size=(batch_size, input_dims)).astype(np.float32) input_record = self.new_record( schema.Scalar((np.float32, (input_dims, )))) schema.FeedRecord(input_record, [X]) input_blob = input_record.field_blobs()[0] ac_output = self.model.ArcCosineFeatureMap(input_record, output_dims, s=s, scale=scale) self.model.output_schema = schema.Struct() self.assertEqual(schema.Scalar((np.float32, (output_dims, ))), ac_output) init_ops_list = [ OpSpec("GaussianFill", None, None), OpSpec("UniformFill", None, None), ] train_init_net, train_net = self.get_training_nets() # Init net assertions init_ops = self._test_net(train_init_net, init_ops_list) workspace.RunNetOnce(self.model.param_init_net) W = workspace.FetchBlob(self.model.layers[0].random_w) b = workspace.FetchBlob(self.model.layers[0].random_b) # Operation specifications fc_spec = OpSpec( "FC", [input_blob, init_ops[0].output[0], init_ops[1].output[0]], None) gt_spec = OpSpec("GT", None, None, {'broadcast': 1}) cast_spec = OpSpec("Cast", None, ac_output.field_blobs()) relu_spec = OpSpec("Relu", None, None) relu_spec_output = OpSpec("Relu", None, ac_output.field_blobs()) pow_spec = OpSpec("Pow", None, None, {'exponent': float(s - 1)}) mul_spec = OpSpec("Mul", None, ac_output.field_blobs()) if s == 0: ops_list = [ fc_spec, gt_spec, cast_spec, ] elif s == 1: ops_list = [ fc_spec, relu_spec_output, ] else: ops_list = [ fc_spec, relu_spec, pow_spec, mul_spec, ] # Train net assertions self._test_net(train_net, ops_list) self._arc_cosine_hypothesis_test(ac_output(), X, W, b, s) # Eval net assertions eval_net = self.get_eval_net() self._test_net(eval_net, ops_list) self._arc_cosine_hypothesis_test(ac_output(), X, W, b, s) # Predict net assertions predict_net = self.get_predict_net() self._test_net(predict_net, ops_list) self._arc_cosine_hypothesis_test(ac_output(), X, W, b, s)
def testSamplingTrain(self): output_dims = 1000 indices = self.new_record(schema.Scalar((np.int32, (10, )))) sampling_prob = self.new_record(schema.Scalar((np.float32, (10, )))) sampled_fc = self.model.SamplingTrain( schema.Struct( ('input', self.model.input_feature_schema.float_features), ('indices', indices), ('sampling_prob', sampling_prob), ), "FC", output_dims, ) # Check that we don't add prediction layer into the model self.assertEqual(1, len(self.model.layers)) self.assertEqual(schema.Scalar((np.float32, (output_dims, ))), sampled_fc) train_init_net, train_net = self.get_training_nets() init_ops = self.assertNetContainOps(train_init_net, [ OpSpec("UniformFill", None, None), OpSpec("UniformFill", None, None), ]) sampled_fc_layer = self.model.layers[0] gather_w_spec = OpSpec("Gather", [ init_ops[0].output[0], indices(), ], [sampled_fc_layer._prediction_layer.train_param_blobs[0]]) gather_b_spec = OpSpec("Gather", [ init_ops[1].output[0], indices(), ], [sampled_fc_layer._prediction_layer.train_param_blobs[1]]) train_fc_spec = OpSpec("FC", [ self.model.input_feature_schema.float_features(), ] + sampled_fc_layer._prediction_layer.train_param_blobs, sampled_fc.field_blobs()) log_spec = OpSpec("Log", [sampling_prob()], [None]) sub_spec = OpSpec("Sub", [sampled_fc.field_blobs()[0], None], sampled_fc.field_blobs()) train_ops = self.assertNetContainOps( train_net, [gather_w_spec, gather_b_spec, train_fc_spec, log_spec, sub_spec]) self.assertEqual(train_ops[3].output[0], train_ops[4].input[1]) predict_net = self.get_predict_net() self.assertNetContainOps(predict_net, [ OpSpec("FC", [ self.model.input_feature_schema.float_features(), init_ops[0].output[0], init_ops[1].output[0], ], sampled_fc.field_blobs()) ])
def testSemiRandomFeatures(self, batch_size, input_dims, output_dims, s, scale, set_weight_as_global_constant, use_struct_input): def _semi_random_hypothesis_test(srf_output, X_full, X_random, rand_w, rand_b, s): """ Runs hypothesis test for Semi Random Features layer. Inputs: srf_output -- output of net after running semi random features layer X_full -- full input data X_random -- random-output input data rand_w -- random-initialized weight parameter from train_init_net rand_b -- random-initialized bias parameter from train_init_net s -- degree parameter """ # Get output from net net_output = workspace.FetchBlob(srf_output) # Fetch learned parameter blobs learned_w = workspace.FetchBlob(self.model.layers[0].learned_w) learned_b = workspace.FetchBlob(self.model.layers[0].learned_b) # Computing output directly x_rand = np.matmul(X_random, np.transpose(rand_w)) + rand_b x_learn = np.matmul(X_full, np.transpose(learned_w)) + learned_b x_pow = np.power(x_rand, s) if s > 0: h_rand_features = np.piecewise(x_rand, [x_rand <= 0, x_rand > 0], [0, 1]) else: h_rand_features = np.piecewise(x_rand, [x_rand <= 0, x_rand > 0], [0, lambda x: x / (1 + x)]) output_ref = np.multiply(np.multiply(x_pow, h_rand_features), x_learn) # Comparing net output and computed output npt.assert_allclose(net_output, output_ref, rtol=1e-3, atol=1e-3) X_full = np.random.normal(size=(batch_size, input_dims)).astype(np.float32) if use_struct_input: X_random = np.random.normal(size=(batch_size, input_dims)).\ astype(np.float32) input_data = [X_full, X_random] input_record = self.new_record( schema.Struct( ('full', schema.Scalar((np.float32, (input_dims, )))), ('random', schema.Scalar((np.float32, (input_dims, )))))) else: X_random = X_full input_data = [X_full] input_record = self.new_record( schema.Scalar((np.float32, (input_dims, )))) schema.FeedRecord(input_record, input_data) srf_output = self.model.SemiRandomFeatures( input_record, output_dims, s=s, scale_random=scale, scale_learned=scale, set_weight_as_global_constant=set_weight_as_global_constant) self.model.output_schema = schema.Struct() self.assertEqual( schema.Struct( ('full', schema.Scalar((np.float32, (output_dims, )))), ('random', schema.Scalar((np.float32, (output_dims, ))))), srf_output) init_ops_list = [ OpSpec("GaussianFill", None, None), OpSpec("UniformFill", None, None), OpSpec("GaussianFill", None, None), OpSpec("UniformFill", None, None), ] train_init_net, train_net = self.get_training_nets() # Need to run to initialize the global constants for layer workspace.RunNetOnce(self.model.create_init_net(name='init_net')) if set_weight_as_global_constant: # If weight params are global constants, they won't be in train_init_net init_ops = self._test_net(train_init_net, init_ops_list[:2]) rand_w = workspace.FetchBlob( self.model. global_constants['semi_random_features_fixed_rand_W']) rand_b = workspace.FetchBlob( self.model. global_constants['semi_random_features_fixed_rand_b']) # Operation specifications fc_random_spec = OpSpec("FC", [None, None, None], None) fc_learned_spec = OpSpec( "FC", [None, init_ops[0].output[0], init_ops[1].output[0]], None) else: init_ops = self._test_net(train_init_net, init_ops_list) rand_w = workspace.FetchBlob(self.model.layers[0].random_w) rand_b = workspace.FetchBlob(self.model.layers[0].random_b) # Operation specifications fc_random_spec = OpSpec( "FC", [None, init_ops[0].output[0], init_ops[1].output[0]], None) fc_learned_spec = OpSpec( "FC", [None, init_ops[2].output[0], init_ops[3].output[0]], None) softsign_spec = OpSpec("Softsign", None, None) relu_spec = OpSpec("Relu", None, None) relu_output_spec = OpSpec("Relu", None, srf_output.random.field_blobs()) pow_spec = OpSpec("Pow", None, None, {'exponent': float(s - 1)}) mul_interim_spec = OpSpec("Mul", None, srf_output.random.field_blobs()) mul_spec = OpSpec("Mul", None, srf_output.full.field_blobs()) if s == 0: ops_list = [ fc_learned_spec, fc_random_spec, softsign_spec, relu_output_spec, mul_spec, ] elif s == 1: ops_list = [ fc_learned_spec, fc_random_spec, relu_output_spec, mul_spec, ] else: ops_list = [ fc_learned_spec, fc_random_spec, relu_spec, pow_spec, mul_interim_spec, mul_spec, ] # Train net assertions self._test_net(train_net, ops_list) _semi_random_hypothesis_test(srf_output.full(), X_full, X_random, rand_w, rand_b, s) # Eval net assertions eval_net = self.get_eval_net() self._test_net(eval_net, ops_list) _semi_random_hypothesis_test(srf_output.full(), X_full, X_random, rand_w, rand_b, s) # Predict net assertions predict_net = self.get_predict_net() self._test_net(predict_net, ops_list) _semi_random_hypothesis_test(srf_output.full(), X_full, X_random, rand_w, rand_b, s)
def testBatchNormalization(self, X): input_record = self.new_record(schema.Scalar((np.float32, (5, )))) schema.FeedRecord(input_record, [X]) bn_output = self.model.BatchNormalization(input_record) self.assertEqual(schema.Scalar((np.float32, (5, ))), bn_output) self.model.output_schema = schema.Struct() train_init_net, train_net = self.get_training_nets() init_ops = self.assertNetContainOps(train_init_net, [ OpSpec("ConstantFill", None, None), OpSpec("ConstantFill", None, None), OpSpec("ConstantFill", None, None), OpSpec("ConstantFill", None, None), ]) input_blob = input_record.field_blobs()[0] output_blob = bn_output.field_blobs()[0] expand_dims_spec = OpSpec( "ExpandDims", [input_blob], [input_blob], ) train_bn_spec = OpSpec( "SpatialBN", [ input_blob, init_ops[0].output[0], init_ops[1].output[0], init_ops[2].output[0], init_ops[3].output[0] ], [ output_blob, init_ops[2].output[0], init_ops[3].output[0], None, None ], { 'is_test': 0, 'order': 'NCHW', 'momentum': 0.9 }, ) test_bn_spec = OpSpec( "SpatialBN", [ input_blob, init_ops[0].output[0], init_ops[1].output[0], init_ops[2].output[0], init_ops[3].output[0] ], [output_blob], { 'is_test': 1, 'order': 'NCHW', 'momentum': 0.9 }, ) squeeze_spec = OpSpec( "Squeeze", [output_blob], [output_blob], ) self.assertNetContainOps( train_net, [expand_dims_spec, train_bn_spec, squeeze_spec]) eval_net = self.get_eval_net() self.assertNetContainOps( eval_net, [expand_dims_spec, test_bn_spec, squeeze_spec]) predict_net = self.get_predict_net() self.assertNetContainOps( predict_net, [expand_dims_spec, test_bn_spec, squeeze_spec]) workspace.RunNetOnce(train_init_net) workspace.RunNetOnce(train_net) schema.FeedRecord(input_record, [X]) workspace.RunNetOnce(eval_net) schema.FeedRecord(input_record, [X]) workspace.RunNetOnce(predict_net)
def testArcCosineFeatureMap(self, batch_size, input_dims, output_dims, s, scale, set_weight_as_global_constant): def _arc_cosine_hypothesis_test(ac_output, X, W, b, s): """ Runs hypothesis test for Arc Cosine layer. Inputs: ac_output -- output of net after running arc cosine layer X -- input data W -- weight parameter from train_init_net b -- bias parameter from train_init_net s -- degree parameter """ # Get output from net net_output = workspace.FetchBlob(ac_output) # Computing output directly x_rand = np.matmul(X, np.transpose(W)) + b x_pow = np.power(x_rand, s) if s > 0: h_rand_features = np.piecewise(x_rand, [x_rand <= 0, x_rand > 0], [0, 1]) else: h_rand_features = np.piecewise(x_rand, [x_rand <= 0, x_rand > 0], [0, lambda x: x / (1 + x)]) output_ref = np.multiply(x_pow, h_rand_features) # Comparing net output and computed output npt.assert_allclose(net_output, output_ref, rtol=1e-3, atol=1e-3) X = np.random.normal(size=(batch_size, input_dims)).astype(np.float32) input_record = self.new_record( schema.Scalar((np.float32, (input_dims, )))) schema.FeedRecord(input_record, [X]) input_blob = input_record.field_blobs()[0] ac_output = self.model.ArcCosineFeatureMap( input_record, output_dims, s=s, scale=scale, set_weight_as_global_constant=set_weight_as_global_constant) self.model.output_schema = schema.Struct() self.assertEqual(schema.Scalar((np.float32, (output_dims, ))), ac_output) train_init_net, train_net = self.get_training_nets() # Run create_init_net to initialize the global constants, and W and b workspace.RunNetOnce(train_init_net) workspace.RunNetOnce(self.model.create_init_net(name='init_net')) if set_weight_as_global_constant: W = workspace.FetchBlob( self.model. global_constants['arc_cosine_feature_map_fixed_rand_W']) b = workspace.FetchBlob( self.model. global_constants['arc_cosine_feature_map_fixed_rand_b']) else: W = workspace.FetchBlob(self.model.layers[0].random_w) b = workspace.FetchBlob(self.model.layers[0].random_b) # Operation specifications fc_spec = OpSpec("FC", [input_blob, None, None], None) softsign_spec = OpSpec("Softsign", None, None) relu_spec = OpSpec("Relu", None, None) relu_spec_output = OpSpec("Relu", None, ac_output.field_blobs()) pow_spec = OpSpec("Pow", None, None, {'exponent': float(s - 1)}) mul_spec = OpSpec("Mul", None, ac_output.field_blobs()) if s == 0: ops_list = [ fc_spec, softsign_spec, relu_spec_output, ] elif s == 1: ops_list = [ fc_spec, relu_spec_output, ] else: ops_list = [ fc_spec, relu_spec, pow_spec, mul_spec, ] # Train net assertions self._test_net(train_net, ops_list) _arc_cosine_hypothesis_test(ac_output(), X, W, b, s) # Eval net assertions eval_net = self.get_eval_net() self._test_net(eval_net, ops_list) _arc_cosine_hypothesis_test(ac_output(), X, W, b, s) # Predict net assertions predict_net = self.get_predict_net() self._test_net(predict_net, ops_list) _arc_cosine_hypothesis_test(ac_output(), X, W, b, s)
def testRandomFourierFeatures(self, batch_size, input_dims, output_dims, bandwidth): def _rff_hypothesis_test(rff_output, X, W, b, scale): """ Runs hypothesis test for Semi Random Features layer. Inputs: rff_output -- output of net after running random fourier features layer X -- input data W -- weight parameter from train_init_net b -- bias parameter from train_init_net scale -- value by which to scale the output vector """ output = workspace.FetchBlob(rff_output) output_ref = scale * np.cos(np.dot(X, np.transpose(W)) + b) npt.assert_allclose(output, output_ref, rtol=1e-3, atol=1e-3) X = np.random.random((batch_size, input_dims)).astype(np.float32) scale = np.sqrt(2.0 / output_dims) input_record = self.new_record( schema.Scalar((np.float32, (input_dims, )))) schema.FeedRecord(input_record, [X]) input_blob = input_record.field_blobs()[0] rff_output = self.model.RandomFourierFeatures(input_record, output_dims, bandwidth) self.model.output_schema = schema.Struct() self.assertEqual(schema.Scalar((np.float32, (output_dims, ))), rff_output) train_init_net, train_net = self.get_training_nets() # Init net assertions init_ops_list = [ OpSpec("GaussianFill", None, None), OpSpec("UniformFill", None, None), ] init_ops = self._test_net(train_init_net, init_ops_list) W = workspace.FetchBlob(self.model.layers[0].w) b = workspace.FetchBlob(self.model.layers[0].b) # Operation specifications fc_spec = OpSpec( "FC", [input_blob, init_ops[0].output[0], init_ops[1].output[0]], None) cosine_spec = OpSpec("Cos", None, None) scale_spec = OpSpec("Scale", None, rff_output.field_blobs(), {'scale': scale}) ops_list = [fc_spec, cosine_spec, scale_spec] # Train net assertions self._test_net(train_net, ops_list) _rff_hypothesis_test(rff_output(), X, W, b, scale) # Eval net assertions eval_net = self.get_eval_net() self._test_net(eval_net, ops_list) _rff_hypothesis_test(rff_output(), X, W, b, scale) # Predict net assertions predict_net = self.get_predict_net() self._test_net(predict_net, ops_list) _rff_hypothesis_test(rff_output(), X, W, b, scale)
def testRandomFourierFeatures(self, batch_size, input_dims, output_dims, bandwidth): X = np.random.random((batch_size, input_dims)).astype(np.float32) scale = np.sqrt(2.0 / output_dims) input_record = self.new_record( schema.Scalar((np.float32, (input_dims, )))) schema.FeedRecord(input_record, [X]) input_blob = input_record.field_blobs()[0] rff_output = self.model.RandomFourierFeatures(input_record, output_dims, bandwidth) self.model.output_schema = schema.Struct() self.assertEqual(schema.Scalar((np.float32, (output_dims, ))), rff_output) train_init_net, train_net = self.get_training_nets() # Init net assertions init_ops = self.assertNetContainOps(train_init_net, [ OpSpec("GaussianFill", None, None), OpSpec("UniformFill", None, None), ]) # Operation specifications mat_mul_spec = OpSpec("MatMul", [input_blob, init_ops[0].output[0]], None) add_spec = OpSpec("Add", [None, init_ops[1].output[0]], None, { 'broadcast': 1, 'axis': 1 }) cosine_spec = OpSpec("Cos", None, None) scale_spec = OpSpec("Scale", None, rff_output.field_blobs(), {'scale': scale}) # Train net assertions self.assertNetContainOps( train_net, [mat_mul_spec, add_spec, cosine_spec, scale_spec]) workspace.RunNetOnce(train_init_net) W = workspace.FetchBlob(self.model.layers[0].w) b = workspace.FetchBlob(self.model.layers[0].b) workspace.RunNetOnce(train_net) train_output = workspace.FetchBlob(rff_output()) train_ref = scale * np.cos(np.dot(X, W) + b) npt.assert_almost_equal(train_output, train_ref) # Eval net assertions eval_net = self.get_eval_net() self.assertNetContainOps( eval_net, [mat_mul_spec, add_spec, cosine_spec, scale_spec]) schema.FeedRecord(input_record, [X]) workspace.RunNetOnce(eval_net) eval_output = workspace.FetchBlob(rff_output()) eval_ref = scale * np.cos(np.dot(X, W) + b) npt.assert_almost_equal(eval_output, eval_ref) # Predict net assertions predict_net = self.get_predict_net() self.assertNetContainOps( predict_net, [mat_mul_spec, add_spec, cosine_spec, scale_spec]) schema.FeedRecord(input_record, [X]) workspace.RunNetOnce(predict_net) predict_output = workspace.FetchBlob(rff_output()) predict_ref = scale * np.cos(np.dot(X, W) + b) npt.assert_almost_equal(predict_output, predict_ref)
def testArcCosineFeatureMap(self, batch_size, input_dims, output_dims, s, scale, set_weight_as_global_constant): X = np.random.normal(size=(batch_size, input_dims)).astype(np.float32) input_record = self.new_record(schema.Scalar((np.float32, (input_dims,)))) schema.FeedRecord(input_record, [X]) input_blob = input_record.field_blobs()[0] ac_output = self.model.ArcCosineFeatureMap( input_record, output_dims, s=s, scale=scale, set_weight_as_global_constant=set_weight_as_global_constant ) self.model.output_schema = schema.Struct() self.assertEqual( schema.Scalar((np.float32, (output_dims, ))), ac_output ) train_init_net, train_net = self.get_training_nets() # Run create_init_net to initialize the global constants, and W and b workspace.RunNetOnce(train_init_net) workspace.RunNetOnce(self.model.create_init_net(name='init_net')) if set_weight_as_global_constant: W = workspace.FetchBlob( self.model.global_constants['arc_cosine_feature_map_fixed_rand_W'] ) b = workspace.FetchBlob( self.model.global_constants['arc_cosine_feature_map_fixed_rand_b'] ) else: W = workspace.FetchBlob(self.model.layers[0].random_w) b = workspace.FetchBlob(self.model.layers[0].random_b) # Operation specifications fc_spec = OpSpec("FC", [input_blob, None, None], None) gt_spec = OpSpec("GT", None, None, {'broadcast': 1}) cast_spec = OpSpec("Cast", None, ac_output.field_blobs()) relu_spec = OpSpec("Relu", None, None) relu_spec_output = OpSpec("Relu", None, ac_output.field_blobs()) pow_spec = OpSpec("Pow", None, None, {'exponent': float(s - 1)}) mul_spec = OpSpec("Mul", None, ac_output.field_blobs()) if s == 0: ops_list = [ fc_spec, gt_spec, cast_spec, ] elif s == 1: ops_list = [ fc_spec, relu_spec_output, ] else: ops_list = [ fc_spec, relu_spec, pow_spec, mul_spec, ] # Train net assertions self._test_net(train_net, ops_list) self._arc_cosine_hypothesis_test(ac_output(), X, W, b, s) # Eval net assertions eval_net = self.get_eval_net() self._test_net(eval_net, ops_list) self._arc_cosine_hypothesis_test(ac_output(), X, W, b, s) # Predict net assertions predict_net = self.get_predict_net() self._test_net(predict_net, ops_list) self._arc_cosine_hypothesis_test(ac_output(), X, W, b, s)
def testSemiRandomFeatures(self, batch_size, input_dims, output_dims, s, scale, set_weight_as_global_constant): X = np.random.normal(size=(batch_size, input_dims)).astype(np.float32) input_record = self.new_record(schema.Scalar((np.float32, (input_dims,)))) schema.FeedRecord(input_record, [X]) input_blob = input_record.field_blobs()[0] srf_output = self.model.SemiRandomFeatures( input_record, output_dims, s=s, scale=scale, set_weight_as_global_constant=set_weight_as_global_constant ) self.model.output_schema = schema.Struct() self.assertEqual( schema.Scalar((np.float32, (output_dims, ))), srf_output ) init_ops_list = [ OpSpec("GaussianFill", None, None), OpSpec("UniformFill", None, None), OpSpec("GaussianFill", None, None), OpSpec("UniformFill", None, None), ] train_init_net, train_net = self.get_training_nets() # Need to run to initialize the global constants for layer workspace.RunNetOnce(self.model.create_init_net(name='init_net')) if set_weight_as_global_constant: # If weight params are global constants, they won't be in train_init_net init_ops = self._test_net(train_init_net, init_ops_list[:2]) rand_w = workspace.FetchBlob( self.model.global_constants['semi_random_features_fixed_rand_W'] ) rand_b = workspace.FetchBlob( self.model.global_constants['semi_random_features_fixed_rand_b'] ) # Operation specifications fc_random_spec = OpSpec("FC", [input_blob, None, None], None) fc_learned_spec = OpSpec("FC", [input_blob, init_ops[0].output[0], init_ops[1].output[0]], None) else: init_ops = self._test_net(train_init_net, init_ops_list) rand_w = workspace.FetchBlob(self.model.layers[0].random_w) rand_b = workspace.FetchBlob(self.model.layers[0].random_b) # Operation specifications fc_random_spec = OpSpec("FC", [input_blob, init_ops[0].output[0], init_ops[1].output[0]], None) fc_learned_spec = OpSpec("FC", [input_blob, init_ops[2].output[0], init_ops[3].output[0]], None) gt_spec = OpSpec("GT", None, None) cast_spec = OpSpec("Cast", None, None) relu_spec = OpSpec("Relu", None, None) pow_spec = OpSpec("Pow", None, None, {'exponent': float(s - 1)}) mul_interim_spec = OpSpec("Mul", None, None) mul_spec = OpSpec("Mul", None, srf_output.field_blobs()) if s == 0: ops_list = [ fc_random_spec, fc_learned_spec, gt_spec, cast_spec, mul_spec, ] elif s == 1: ops_list = [ fc_random_spec, fc_learned_spec, relu_spec, mul_spec, ] else: ops_list = [ fc_random_spec, fc_learned_spec, relu_spec, pow_spec, mul_interim_spec, mul_spec, ] # Train net assertions self._test_net(train_net, ops_list) self._semi_random_hypothesis_test(srf_output(), X, rand_w, rand_b, s) # Eval net assertions eval_net = self.get_eval_net() self._test_net(eval_net, ops_list) self._semi_random_hypothesis_test(srf_output(), X, rand_w, rand_b, s) # Predict net assertions predict_net = self.get_predict_net() self._test_net(predict_net, ops_list) self._semi_random_hypothesis_test(srf_output(), X, rand_w, rand_b, s)