def test_scobit_utility_transform_without_interecepts(self): """ Ensures that `_scobit_utility_transform()` returns correct results """ # Create a set of systematic utilities that will test the function for # correct calculations, for proper dealing with overflow, and for # proper dealing with underflow. # The first and third elements tests general calculation. # The second element of index_array should lead to the transformation # equaling the intercept for alternative 2. # The fourth element should test what happens with underflow and should # lead to max_comp_value + ASC 1. # The fifth element should test what happens with overflow and should # lead to 50 * np.exp(1) index_array = np.array([1, 0, -1, 50, -50]) # Crerate the array of expected results intercept_1 = 0 intercept_3 = 0 shape_1 = np.exp(self.fake_shapes[0]) shape_3 = np.exp(self.fake_shapes[2]) result_1 = (intercept_1 - np.log((1.0 + np.exp(-1 * index_array[0]))**shape_1 - 1.0)) result_3 = (intercept_3 - np.log((1.0 + np.exp(-1 * index_array[2]))**shape_3 - 1.0)) expected_results = np.array([result_1, 0, result_3, scobit.max_comp_value + intercept_1, intercept_3 - 50 * np.exp(1)])[:, None] # Use the utility transformation function args = [index_array, self.fake_df[self.alt_id_col].values, self.fake_rows_to_alts, self.fake_shapes, None] kwargs = {"intercept_ref_pos": self.fake_intercept_ref_pos} func_results = scobit._scobit_utility_transform(*args, **kwargs) # Check the correctness of the result self.assertIsInstance(func_results, np.ndarray) self.assertEqual(len(func_results.shape), 2) self.assertEqual(func_results.shape[1], expected_results.shape[1]) self.assertEqual(func_results.shape[0], expected_results.shape[0]) npt.assert_allclose(expected_results, func_results) return None
def test_scobit_utility_transform_without_interecepts(self): """ Ensures that `_scobit_utility_transform()` returns correct results """ # Create a set of systematic utilities that will test the function for # correct calculations, for proper dealing with overflow, and for # proper dealing with underflow. # The first and third elements tests general calculation. # The second element of index_array should lead to the transformation # equaling the intercept for alternative 2. # The fourth element should test what happens with underflow and should # lead to max_comp_value + ASC 1. # The fifth element should test what happens with overflow and should # lead to 50 * np.exp(1) index_array = np.array([1, 0, -1, 50, -50]) # Crerate the array of expected results intercept_1 = 0 intercept_3 = 0 shape_1 = np.exp(self.fake_shapes[0]) shape_3 = np.exp(self.fake_shapes[2]) result_1 = (intercept_1 - np.log((1.0 + np.exp(-1 * index_array[0]))**shape_1 - 1.0)) result_3 = (intercept_3 - np.log((1.0 + np.exp(-1 * index_array[2]))**shape_3 - 1.0)) expected_results = np.array([ result_1, 0, result_3, scobit.max_comp_value + intercept_1, intercept_3 - 50 * np.exp(1) ])[:, None] # Use the utility transformation function args = [ index_array, self.fake_df[self.alt_id_col].values, self.fake_rows_to_alts, self.fake_shapes, None ] kwargs = {"intercept_ref_pos": self.fake_intercept_ref_pos} func_results = scobit._scobit_utility_transform(*args, **kwargs) # Check the correctness of the result self.assertIsInstance(func_results, np.ndarray) self.assertEqual(len(func_results.shape), 2) self.assertEqual(func_results.shape[1], expected_results.shape[1]) self.assertEqual(func_results.shape[0], expected_results.shape[0]) npt.assert_allclose(expected_results, func_results) return None