def monotonic_increasing_ints(draw: Any) -> NDArray: # Draw increasing ints with repeats, e.g. [0, 0, 5, 7, 7, 7] n = draw(st.integers(min_value=0, max_value=5)) repeats = draw( st_arrays(dtype=int, shape=n, elements=st.integers(min_value=1, max_value=10)) ) values = draw( st_arrays(dtype=int, shape=n, elements=st.integers(min_value=0, max_value=10)) ) values = np.cumsum(values) return np.repeat(values, repeats)
def init_fields(self, data, backend): self.nx = data.draw(hyp_st.integers(min_value=7, max_value=32), label="nx") self.ny = data.draw(hyp_st.integers(min_value=7, max_value=32), label="ny") self.nz = data.draw(hyp_st.integers(min_value=1, max_value=32), label="nz") shape = (self.nx, self.ny, self.nz) self.in_phi = gt_storage.from_array( data.draw(st_arrays(dtype=float, shape=shape)), backend=backend, default_origin=(0, 0, 0), dtype=float, ) self.in_u = gt_storage.from_array( data.draw(st_arrays(dtype=float, shape=shape)), backend=backend, default_origin=(0, 0, 0), dtype=float, ) self.in_v = gt_storage.from_array( data.draw(st_arrays(dtype=float, shape=shape)), backend=backend, default_origin=(0, 0, 0), dtype=float, ) self.tmp_phi = gt_storage.from_array( data.draw(st_arrays(dtype=float, shape=shape)), backend=backend, default_origin=(1, 1, 0), dtype=float, ) self.out_phi = gt_storage.from_array( data.draw(st_arrays(dtype=float, shape=shape)), backend=backend, default_origin=(3, 3, 0), dtype=float, ) self.alpha = 1 / 32
def test_small(client, model_data, hypothesis_data): """Test Triton-served model on many small Hypothesis-generated examples""" all_model_inputs = defaultdict(list) total_output_sizes = {} all_triton_outputs = defaultdict(list) default_arrays = { name: np.random.rand(TOTAL_SAMPLES, *shape).astype('float32') for name, shape in model_data.input_shapes.items() } for i in range(TOTAL_SAMPLES): model_inputs = { name: hypothesis_data.draw( st.one_of( st.just(default_arrays[name][i:i+1, :]), st_arrays('float32', [1] + shape) ) ) for name, shape in model_data.input_shapes.items() } if model_data.name == 'sklearn' or model_data.name == 'xgboost_shap': for array in model_inputs.values(): assume(not np.any(np.isnan(array))) model_output_sizes = { name: size for name, size in model_data.output_sizes.items() } shared_mem = hypothesis_data.draw(st.one_of( st.just(mode) for mode in valid_shm_modes() )) result = client.predict( model_data.name, model_inputs, model_data.output_sizes, shared_mem=shared_mem ) for name, input_ in model_inputs.items(): all_model_inputs[name].append(input_) for name, size in model_output_sizes.items(): total_output_sizes[name] = total_output_sizes.get(name, 0) + size for name, output in result.items(): all_triton_outputs[name].append(output) all_model_inputs = { name: np.concatenate(arrays) for name, arrays in all_model_inputs.items() } all_triton_outputs = { name: np.concatenate(arrays) for name, arrays in all_triton_outputs.items() } try: ground_truth = model_data.ground_truth_model.predict(all_model_inputs) except Exception: assume(False) for output_name in sorted(ground_truth.keys()): if model_data.ground_truth_model.predict_proba: arrays_close( all_triton_outputs[output_name], ground_truth[output_name], rtol=1e-3, atol=1e-2, assert_close=True ) else: arrays_close( all_triton_outputs[output_name], ground_truth[output_name], atol=0.1, total_atol=3, assert_close=True ) # Test entire batch of Hypothesis-generated inputs at once shared_mem = hypothesis_data.draw(st.one_of( st.just(mode) for mode in valid_shm_modes() )) all_triton_outputs = client.predict( model_data.name, all_model_inputs, total_output_sizes, shared_mem=shared_mem ) for output_name in sorted(ground_truth.keys()): if model_data.ground_truth_model.predict_proba: arrays_close( all_triton_outputs[output_name], ground_truth[output_name], rtol=1e-3, atol=1e-2, assert_close=True ) else: arrays_close( all_triton_outputs[output_name], ground_truth[output_name], atol=0.1, total_atol=3, assert_close=True )