def test_penalty_update(self): x = numpy.random.randn(10, 1).astype(numpy.float32) def fct(x): return numpy.sign(x) * 0.1 + (x * 0.9 * 2) exp_loss = x - fct(x) onx = function_onnx_graph('update_penalty_elastic_error', target_opset=get_max_opset(), dtype=numpy.float32, l1=0.1, l2=0.9) oinf = OnnxInference(onx) got = oinf.run({'X': x}) self.assertEqualArray(exp_loss, got['Y'], decimal=5) providers = device_to_providers('cpu') so = SessionOptions() so.log_severity_level = 4 sess = InferenceSession(onx.SerializeToString(), so, providers=providers) got = sess.run(None, {'X': x}) self.assertEqualArray(exp_loss, got[0], decimal=5)
def build_onnx_function(self, opset, device, n_tensors): so = SessionOptions() so.log_severity_level = 4 # loss_grad self.penalty_onnx_ = function_onnx_graph("n_penalty_elastic_error", target_opset=opset, n_tensors=n_tensors, loss_shape=None, l1_weight=self.l1, l2_weight=self.l2) self.penalty_sess_ = InferenceSession( self.penalty_onnx_.SerializeToString(), so, providers=device_to_providers(device)) self.penalty_sess_bind_ = (self.penalty_sess_.io_binding()._iobinding) self.names_ = [i.name for i in self.penalty_onnx_.graph.input] # weight updates self.penalty_grad_onnx_ = function_onnx_graph( "update_penalty_elastic_error", target_opset=opset, l1=self.l1, l2=self.l2) self.penalty_grad_sess_ = InferenceSession( self.penalty_grad_onnx_.SerializeToString(), so, providers=device_to_providers(device)) self.penalty_grad_sess_binds_ = [ self.penalty_grad_sess_.io_binding()._iobinding for n in range(n_tensors) ]
def create_onnxruntime_session(onnx_model_path, use_gpu, verbose): session = None try: from onnxruntime import SessionOptions, InferenceSession sess_options = SessionOptions() if not use_gpu: sess_options.intra_op_num_threads = psutil.cpu_count(logical=True) logger.debug( f"Session option: intra_op_num_threads={sess_options.intra_op_num_threads}" ) if verbose: sess_options.log_severity_level = 0 logger.debug(f"Create session for onnx model: {onnx_model_path}") execution_providers = ['CPUExecutionProvider'] if not use_gpu else [ 'CUDAExecutionProvider', 'CPUExecutionProvider' ] session = InferenceSession(onnx_model_path, sess_options, providers=execution_providers) except: logger.error(f"Exception", exc_info=True) return session
def create_onnxruntime_session(onnx_model_path, use_gpu, enable_all_optimization=True, num_threads=-1, verbose=False): session = None try: from onnxruntime import SessionOptions, InferenceSession, GraphOptimizationLevel, __version__ as onnxruntime_version sess_options = SessionOptions() if enable_all_optimization: sess_options.graph_optimization_level = GraphOptimizationLevel.ORT_ENABLE_ALL else: sess_options.graph_optimization_level = GraphOptimizationLevel.ORT_ENABLE_BASIC if num_threads > 0: sess_options.intra_op_num_threads = num_threads logger.debug(f"Session option: intra_op_num_threads={sess_options.intra_op_num_threads}") elif (not use_gpu) and (version.parse(onnxruntime_version) < version.parse('1.3.0')): # Set intra_op_num_threads = 1 to enable OpenMP for onnxruntime 1.2.0 (cpu) # onnxruntime-gpu is not built with openmp so it is better to use default (0) or cpu_count instead. sess_options.intra_op_num_threads = 1 if verbose: sess_options.log_severity_level = 0 logger.debug(f"Create session for onnx model: {onnx_model_path}") execution_providers = ['CPUExecutionProvider' ] if not use_gpu else ['CUDAExecutionProvider', 'CPUExecutionProvider'] session = InferenceSession(onnx_model_path, sess_options, providers=execution_providers) except: logger.error(f"Exception", exc_info=True) return session
def __setstate__(self, state): """ Overwrites getstate to get rid of InferenceSession. """ for k, v in state.items(): if k == 'ro_': self.ro_ = RunOptions() elif not k.endswith('_onnx_') and not k.endswith('_sess_'): setattr(self, k, v) so = SessionOptions() so.log_severity_level = 4 for k, v in state.items(): if k.endswith('_onnx_'): setattr(self, k, onnx.load(BytesIO(v))) k2 = k.replace("onnx", "sess") prov = state[k2] setattr( self, k2, InferenceSession(getattr(self, k).SerializeToString(), so, providers=prov)) for k, v in state.items(): if k.endswith('_bind_'): k2 = k[:-5] setattr(self, k, getattr(self, k2).io_binding()._iobinding) elif k.endswith('_binds_'): k2 = k[:-6] n = v setattr(self, k, [ getattr(self, k2).io_binding()._iobinding for i in range(n) ]) self.cache_in_ = {} self.cache_out_ = {} return self
def test_penalty_3w(self): loss = numpy.random.randn(1, 1).astype(numpy.float32) w1 = numpy.random.randn(10, 1).astype(numpy.float32) w2 = numpy.random.randn(5, 1).astype(numpy.float32) def fct(x): return numpy.abs(x).sum() * 0.1 + ((x)**2).sum() * 0.9 exp_loss = loss + fct(w1) + fct(w2) onx = function_onnx_graph('n_penalty_elastic_error', target_opset=get_max_opset(), dtype=numpy.float32, n_tensors=2, l1_weight=0.1, l2_weight=0.9, weight_name='weight') oinf = OnnxInference(onx) got = oinf.run({'loss': loss, 'W0': w1, 'W1': w2}) self.assertEqualArray(exp_loss.reshape((-1, )), got['Y'], decimal=5) providers = device_to_providers('cpu') so = SessionOptions() so.log_severity_level = 4 sess = InferenceSession(onx.SerializeToString(), so, providers=providers) got = sess.run(None, {'loss': loss, 'W0': w1, 'W1': w2}) self.assertEqualArray(exp_loss.reshape((-1, )), got[0], decimal=5)
def build_onnx_function(self): """ Creates ONNX graph and *InferenceSession* related to any operations applying on *OrtValue*. """ opset = get_onnx_opset(self.model_onnx) so = SessionOptions() so.log_severity_level = 4 n = len(self.weights_to_train) # loss_grad self.learning_loss.build_onnx_function(opset, self.device, self.weight_name) # weight update self.learning_rate.build_onnx_function(opset, self.device, n) # regularization self.learning_penalty.build_onnx_function(opset, self.device, n) # zero self.zero_onnx_ = function_onnx_graph("zero") self.zero_sess_ = InferenceSession(self.zero_onnx_.SerializeToString(), so, providers=device_to_providers( self.device)) # logging if self.enable_logging: self._logger = logging.getLogger("onnxcustom") else: self._logger = None
def create_ort_session(onnx_model_path, use_gpu=True): from onnxruntime import SessionOptions, InferenceSession, GraphOptimizationLevel, __version__ as onnxruntime_version sess_options = SessionOptions() sess_options.graph_optimization_level = GraphOptimizationLevel.ORT_DISABLE_ALL sess_options.intra_op_num_threads = 2 sess_options.log_severity_level = 2 execution_providers = ['CPUExecutionProvider'] if not use_gpu else ['CUDAExecutionProvider', 'CPUExecutionProvider'] return InferenceSession(onnx_model_path, sess_options, providers=execution_providers)
def create_onnxruntime_session(onnx_model_path, use_gpu, enable_all_optimization=True, num_threads=-1, enable_profiling=False, verbose=False, use_dml=False): session = None try: from onnxruntime import SessionOptions, InferenceSession, GraphOptimizationLevel, __version__ as onnxruntime_version sess_options = SessionOptions() if enable_all_optimization: sess_options.graph_optimization_level = GraphOptimizationLevel.ORT_ENABLE_ALL else: sess_options.graph_optimization_level = GraphOptimizationLevel.ORT_ENABLE_BASIC if enable_profiling: sess_options.enable_profiling = True if num_threads > 0: sess_options.intra_op_num_threads = num_threads logger.debug(f"Session option: intra_op_num_threads={sess_options.intra_op_num_threads}") if verbose: sess_options.log_severity_level = 0 else: sess_options.log_severity_level = 4 logger.debug(f"Create session for onnx model: {onnx_model_path}") if use_gpu: if use_dml: execution_providers = ['DmlExecutionProvider', 'CPUExecutionProvider'] else: execution_providers = ['CUDAExecutionProvider', 'CPUExecutionProvider'] else: execution_providers = ['CPUExecutionProvider'] session = InferenceSession(onnx_model_path, sess_options, providers=execution_providers) except: logger.error(f"Exception", exc_info=True) return session
def build_onnx_function(self, opset, device, n_tensors): so = SessionOptions() so.log_severity_level = 4 self.axpy_onnx_ = function_onnx_graph("axpy") self.axpy_sess_ = InferenceSession( self.axpy_onnx_.SerializeToString(), so, providers=device_to_providers(device)) self.axpy_sess_binds_ = [ self.axpy_sess_.io_binding()._iobinding for i in range(n_tensors)] self.alpha_ = numpy.array( [0], dtype=TENSOR_TYPE_TO_NP_TYPE[ self.axpy_onnx_.graph.input[0].type.tensor_type.elem_type])
def build_onnx_function(self, opset, device, weight_name): so = SessionOptions() so.log_severity_level = 4 # loss_grad self.loss_grad_onnx_ = function_onnx_graph("grad_loss_absolute_error", target_opset=opset, weight_name=weight_name) self.loss_grad_sess_ = InferenceSession( self.loss_grad_onnx_.SerializeToString(), so, providers=device_to_providers(device)) self.loss_grad_sess_bind_ = ( self.loss_grad_sess_.io_binding()._iobinding) # score self.build_onnx_score_function(opset, device, weight_name)
def common_check(self, name, fct, weight_name=None, output_name='Y'): onx = function_onnx_graph(name, target_opset=get_max_opset(), dtype=numpy.float32, weight_name=weight_name) expected = numpy.random.randn(10, 1).astype(numpy.float32) predicted = numpy.random.randn(10, 1).astype(numpy.float32) w = numpy.random.rand(10).astype(numpy.float32) if weight_name is None: fin = fct(expected, predicted) else: fin = fct(expected, predicted, w) oinf = OnnxInference(onx) if weight_name is None: got = oinf.run({'X1': expected, 'X2': predicted}) else: got = oinf.run({'X1': expected, 'X2': predicted, 'weight': w}) self.assertEqualArray(fin, got[output_name], decimal=5) if weight_name is not None: got = oinf.run({'X1': expected, 'X2': predicted}) fin1 = fct(expected, predicted, numpy.array([1], dtype=expected.dtype)) self.assertEqualArray(fin1, got[output_name], decimal=5) providers = device_to_providers('cpu') so = SessionOptions() so.log_severity_level = 4 sess = InferenceSession(onx.SerializeToString(), so, providers=providers) if weight_name is None: got = sess.run(None, {'X1': expected, 'X2': predicted}) else: got = sess.run(None, { 'X1': expected, 'X2': predicted, 'weight': w }) self.assertEqualArray(fin, got[0], decimal=5) if weight_name is not None: got = sess.run(None, {'X1': expected, 'X2': predicted}) fin1 = fct(expected, predicted, numpy.array([1], dtype=expected.dtype)) self.assertEqualArray(fin1, got[0], decimal=5)
def test_251(self): onx = function_onnx_graph("grad_loss_square_error", target_opset=get_max_opset(), dtype=numpy.float32, weight_name='weight') expected = numpy.random.randn(25, 1).astype(numpy.float32) predicted = numpy.random.randn(25, 1).astype(numpy.float32) oinf = OnnxInference(onx) got1 = oinf.run({'X1': expected, 'X2': predicted}) so = SessionOptions() so.log_severity_level = 4 sess = InferenceSession(onx.SerializeToString(), so, providers=['CPUExecutionProvider']) got2 = sess.run(None, {'X1': expected, 'X2': predicted}) self.assertEqualArray(got1['Y'], got2[0], decimal=5) self.assertEqualArray(got1['Y_grad'], got2[1])
def common_unary(self, name, fct): onx = function_onnx_graph(name, target_opset=get_max_opset(), dtype=numpy.float32) x = numpy.random.randn(10, 1).astype(numpy.float32) fin = fct(x) oinf = OnnxInference(onx) got = oinf.run({'X': x}) self.assertEqualArray(fin, got['Y'], decimal=5) providers = device_to_providers('cpu') so = SessionOptions() so.log_severity_level = 4 sess = InferenceSession(onx.SerializeToString(), so, providers=providers) got = sess.run(None, {'X': x}) self.assertEqualArray(fin, got[0], decimal=5)
def build_onnx_function(self, opset, device, weight_name): so = SessionOptions() so.log_severity_level = 4 # loss_grad fct_name = f"grad_{self.probability_function}_neg_log_loss_error" self.loss_grad_onnx_ = function_onnx_graph(fct_name, target_opset=opset, weight_name=weight_name, eps=self.eps) self.loss_grad_sess_ = InferenceSession( self.loss_grad_onnx_.SerializeToString(), so, providers=device_to_providers(device)) self.loss_grad_sess_bind_ = ( self.loss_grad_sess_.io_binding()._iobinding) # score self.build_onnx_score_function(opset, device, weight_name)
def build_onnx_score_function(self, opset, device, weight_name): """ Assuming the loss function was created. This one takes the onnx graph and generate the onnx graph for the method `loss_score`. """ if not hasattr(self, 'loss_grad_onnx_'): raise RuntimeError( # pragma: no cover "Missing attribute 'loss_grad_onnx_'. " "Method 'build_onnx_function' should be called first.") # score so = SessionOptions() so.log_severity_level = 4 self.loss_score_onnx_ = unreduced_onnx_loss(self.loss_grad_onnx_, 'Y') # pylint: disable=E1101 self.loss_score_sess_ = InferenceSession( self.loss_score_onnx_.SerializeToString(), so, providers=device_to_providers(device)) self.loss_score_sess_bind_ = ( self.loss_score_sess_.io_binding()._iobinding)
def common_check_1(self, name, fct, weight_name=None, **kwargs): onx = function_onnx_graph(name, target_opset=get_max_opset(), dtype=numpy.float32, weight_name=weight_name, **kwargs) x = numpy.random.randn(10, 1).astype(numpy.float32) exp_loss, exp_grad = fct(x) oinf = OnnxInference(onx) got = oinf.run({'X': x}) self.assertEqualArray(exp_loss, got['Y'], decimal=5) self.assertEqualArray(exp_grad, got['Y_grad'], decimal=5) providers = device_to_providers('cpu') so = SessionOptions() so.log_severity_level = 4 sess = InferenceSession(onx.SerializeToString(), so, providers=providers) got = sess.run(None, {'X': x}) self.assertEqualArray(exp_loss, got[0], decimal=5) self.assertEqualArray(exp_grad, got[1], decimal=5)
def common_check_alpha_beta(self, name, fct): onx = function_onnx_graph(name, target_opset=get_max_opset(), dtype=numpy.float32) x1 = numpy.random.randn(10, 1).astype(numpy.float32) x2 = numpy.random.randn(10, 1).astype(numpy.float32) g = numpy.random.randn(10, 1).astype(numpy.float32) alpha = numpy.random.randn(1).astype(numpy.float32) beta = numpy.random.randn(1).astype(numpy.float32) y, z = fct(x1, x2, g, alpha, beta) oinf = OnnxInference(onx) got = oinf.run({ 'X1': x1, 'X2': x2, 'alpha': alpha, 'beta': beta, 'G': g }) self.assertEqualArray(y, got['Y'], decimal=5) self.assertEqualArray(z, got['Z'], decimal=5) providers = device_to_providers('cpu') so = SessionOptions() so.log_severity_level = 4 sess = InferenceSession(onx.SerializeToString(), so, providers=providers) got = sess.run(None, { 'X1': x1, 'X2': x2, 'alpha': alpha, 'beta': beta, 'G': g }) self.assertEqualArray(y, got[0], decimal=5) self.assertEqualArray(z, got[1], decimal=5)
def common_check_3(self, name, fct): onx = function_onnx_graph(name, target_opset=get_max_opset(), dtype=numpy.float32) x = numpy.random.randn(10, 1).astype(numpy.float32) a = numpy.random.randn(10, 1).astype(numpy.float32).T b = numpy.random.randn(10, 1).astype(numpy.float32) y = fct(x, a, b) code = export2onnx(onx) self.assertIn("'OnnxAdd'", code) oinf = OnnxInference(onx) got = oinf.run({'X': x, 'A': a, 'B': b}) self.assertEqualArray(y, got['Y'], decimal=5) providers = device_to_providers('cpu') so = SessionOptions() so.log_severity_level = 4 sess = InferenceSession(onx.SerializeToString(), so, providers=providers) got = sess.run(None, {'X': x, 'A': a, 'B': b}) self.assertEqualArray(y, got[0], decimal=5)
def _create_training_session(self, training_onnx, weights_to_train, loss_output_name='loss', training_optimizer_name='SGDOptimizer', device='cpu'): """ Creates an instance of :epkg:`TrainingSession`. :param training_onnx: an ONNX graph with a loss function :param weights_to_train: list of initializer names to optimize :param loss_output_name: output name for the loss :param training_optimizer_name: optimizer name :param device: one :epkg:`C_OrtDevice` or a string :return: an instance of :epkg:`TrainingSession` """ if training_optimizer_name != 'SGDOptimizer': raise NotImplementedError( "Only the SGDOptimizer is implemented not %r." "" % training_optimizer_name) ort_parameters = TrainingParameters() ort_parameters.loss_output_name = loss_output_name ort_parameters.use_mixed_precision = False # ort_parameters.world_rank = -1 # ort_parameters.world_size = 1 # ort_parameters.gradient_accumulation_steps = 1 # ort_parameters.allreduce_post_accumulation = False # ort_parameters.deepspeed_zero_stage = 0 # ort_parameters.enable_grad_norm_clip = False # ort_parameters.set_gradients_as_graph_outputs = False # ort_parameters.use_memory_efficient_gradient = False # ort_parameters.enable_adasum = False if self.saved_gradient is not None: name = self.saved_gradient name2 = name + ".training.onnx" ort_parameters.model_with_gradient_graph_path = name ort_parameters.model_with_training_graph_path = name2 output_types = {} for output in training_onnx.graph.output: output_types[output.name] = output.type.tensor_type ort_parameters.weights_to_train = set(weights_to_train) ort_parameters.training_optimizer_name = training_optimizer_name # ort_parameters.lr_params_feed_name = lr_params_feed_name ort_parameters.optimizer_attributes_map = { name: {} for name in weights_to_train } ort_parameters.optimizer_int_attributes_map = { name: {} for name in weights_to_train } session_options = SessionOptions() session_options.log_severity_level = 4 session_options.log_verbosity_level = 4 # session_options.use_deterministic_compute = True providers = device_to_providers(self.device) session = TrainingSession(training_onnx.SerializeToString(), ort_parameters, session_options, providers=providers) return session
def create_onnxruntime_session( onnx_model_path, use_gpu, provider=None, enable_all_optimization=True, num_threads=-1, enable_profiling=False, verbose=False, provider_options={}, # map execution provider name to its option ): session = None try: from onnxruntime import GraphOptimizationLevel, InferenceSession, SessionOptions sess_options = SessionOptions() if enable_all_optimization: sess_options.graph_optimization_level = GraphOptimizationLevel.ORT_ENABLE_ALL else: sess_options.graph_optimization_level = GraphOptimizationLevel.ORT_ENABLE_BASIC if enable_profiling: sess_options.enable_profiling = True if num_threads > 0: sess_options.intra_op_num_threads = num_threads logger.debug( f"Session option: intra_op_num_threads={sess_options.intra_op_num_threads}" ) if verbose: sess_options.log_severity_level = 0 else: sess_options.log_severity_level = 4 logger.debug(f"Create session for onnx model: {onnx_model_path}") if use_gpu: if provider == "dml": providers = ["DmlExecutionProvider", "CPUExecutionProvider"] elif provider == "rocm": providers = ["ROCMExecutionProvider", "CPUExecutionProvider"] elif provider == "migraphx": providers = [ "MIGraphXExecutionProvider", "ROCMExecutionProvider", "CPUExecutionProvider", ] elif provider == "cuda": providers = ["CUDAExecutionProvider", "CPUExecutionProvider"] elif provider == "tensorrt": providers = [ "TensorrtExecutionProvider", "CUDAExecutionProvider", "CPUExecutionProvider", ] else: providers = ["CUDAExecutionProvider", "CPUExecutionProvider"] else: providers = ["CPUExecutionProvider"] if provider_options: providers = [ (name, provider_options[name]) if name in provider_options else name for name in providers ] session = InferenceSession(onnx_model_path, sess_options, providers=providers) except: logger.error(f"Exception", exc_info=True) return session
def common_check_2(self, name, fct, weight_name=None, verbose=0, classification=False, rnd=True, second_name='Y_grad', **kwargs): onx = function_onnx_graph(name, target_opset=get_max_opset(), dtype=numpy.float32, weight_name=weight_name, **kwargs) if verbose > 0: with open(name + ".onnx", "wb") as f: f.write(onx.SerializeToString()) if classification: N = 10 p = numpy.random.randn(N, 1).astype(numpy.float32) p[0, :] = 0 p[1, :] = 100 p[2, :] = -100 p[3, :] = 1 p[4, :] = -1 y = (numpy.random.randn(N, 1).astype(numpy.float32) > 0).astype( numpy.int64) x2 = p x1 = y else: if rnd: x1 = numpy.random.randn(10, 1).astype(numpy.float32) x2 = numpy.random.randn(10, 1).astype(numpy.float32) else: x1 = numpy.zeros((10, 1), dtype=numpy.float32) x2 = numpy.zeros((10, 1), dtype=numpy.float32) + 1 if rnd: w = numpy.random.rand(10).astype(numpy.float32) else: w = numpy.zeros(10, dtype=numpy.float32) + 0.2 if weight_name is None: exp_loss, exp_grad = fct(x1, x2) else: exp_loss, exp_grad = fct(x1, x2, w.reshape((-1, 1))) oinf = OnnxInference(onx) run_params = dict(verbose=verbose, fLOG=print) if verbose > 0 else {} if verbose > 0: print(f"\n+++++ name(1)={name!r}") if weight_name is None: got = oinf.run({'X1': x1, 'X2': x2}, **run_params) else: got = oinf.run({'X1': x1, 'X2': x2, 'weight': w}, **run_params) self.assertEqual(len(exp_grad.shape), 2) self.assertEqual(exp_grad.shape[-1], 1) self.assertEqualArray(exp_grad, got[second_name], decimal=5) self.assertEqualArray(exp_loss, got['Y'], decimal=5) providers = device_to_providers('cpu') so = SessionOptions() so.log_severity_level = 0 if verbose > 0 else 4 so.log_verbosity_level = 0 if verbose > 0 else 4 sess = InferenceSession(onx.SerializeToString(), so, providers=providers) if verbose > 0: print("+++ run") if weight_name is None: got = sess.run(None, {'X1': x1, 'X2': x2}) else: got = sess.run(None, {'X1': x1, 'X2': x2, 'weight': w}) self.assertEqualArray(exp_loss, got[0], decimal=5) self.assertEqualArray(exp_grad, got[1], decimal=5) if weight_name is not None: if verbose > 0: print("+++ run*") got = sess.run(None, {'X1': x1, 'X2': x2}) exp_loss2, exp_grad2 = fct(x1, x2, numpy.array([1], dtype=x1.dtype)) self.assertEqualArray(exp_loss2, got[0], decimal=5) self.assertEqualArray(exp_grad2, got[1], decimal=5) if 'grad' in name: rew = unreduced_onnx_loss(onx) if 'ReduceSum' in str(rew): raise AssertionError(f"Isse with:\n{rew!r}") if verbose > 0: with open(name + ".unreduced.onnx", "wb") as f: f.write(rew.SerializeToString()) if verbose > 0: print(f"\n+++++ name(2)={name!r}") oinf = OnnxInference(rew) if weight_name is None: got = oinf.run({'X1': x1, 'X2': x2}, **run_params) else: got = oinf.run({'X1': x1, 'X2': x2, 'weight': w}, **run_params) score = got['score'] self.assertEqual(len(score.shape), 2) self.assertEqual(score.shape[0], 10) self.assertEqual(score.shape[1], 1) self.assertEqualFloat(exp_loss, score.sum()) sess = InferenceSession(rew.SerializeToString(), so, providers=providers) if verbose > 0: print("+++ run") if weight_name is None: got = sess.run(None, {'X1': x1, 'X2': x2}) else: got = sess.run(None, {'X1': x1, 'X2': x2, 'weight': w}) score = got[0] self.assertEqual(len(score.shape), 2) self.assertEqual(score.shape[0], 10) self.assertEqual(score.shape[1], 1) self.assertEqualFloat(exp_loss, score.sum())