def util_batch_unbatch_array(model: Model[Floats2d, Array2d], in_data: Floats2d, out_data: Array2d): unbatched = [model.ops.reshape2f(a, 1, -1) for a in in_data] with data_validation(True): model.initialize(in_data, out_data) Y_batched = model.predict(in_data).tolist() Y_not_batched = [model.predict(u)[0].tolist() for u in unbatched] assert_almost_equal(Y_batched, Y_not_batched, decimal=4)
def util_batch_unbatch_ragged(model: Model[Ragged, Array2d], in_data: Ragged, out_data: Array2d): with data_validation(True): model.initialize(in_data, out_data) Y_batched = model.predict(in_data) Y_not_batched = [ model.predict(in_data[i])[0] for i in range(len(in_data)) ] assert_almost_equal(Y_batched, Y_not_batched, decimal=4)
def util_batch_unbatch_list( model: Model[List[Array2d], List[Array2d]], in_data: List[Array2d], out_data: List[Array2d], ): with data_validation(True): model.initialize(in_data, out_data) Y_batched = model.predict(in_data) Y_not_batched = [model.predict([u])[0] for u in in_data] assert_almost_equal(Y_batched, Y_not_batched, decimal=4)
def test_layers_from_config(name, kwargs, in_data, out_data): cfg = {"@layers": name, **kwargs} model = registry.resolve({"config": cfg})["config"] if "LSTM" in name: model = with_padded(model) valid = True with data_validation(valid): model.initialize(in_data, out_data) Y, backprop = model(in_data, is_train=True) assert_data_match(Y, out_data) dX = backprop(Y) assert_data_match(dX, in_data)
def test_validation(): model = chain(Relu(10), Relu(10), with_ragged(reduce_max()), Softmax()) with data_validation(True): with pytest.raises(DataValidationError): model.initialize(X=model.ops.alloc2f(1, 10), Y=model.ops.alloc2f(1, 10)) with pytest.raises(DataValidationError): model.initialize(X=model.ops.alloc3f(1, 10, 1), Y=model.ops.alloc2f(1, 10)) with pytest.raises(DataValidationError): model.initialize(X=[model.ops.alloc2f(1, 10)], Y=model.ops.alloc2f(1, 10))
def test_layers_from_config(name, kwargs, in_data, out_data): cfg = {"@layers": name, **kwargs} filled = registry.fill_config({"config": cfg}) model = registry.make_from_config(filled)["config"] if "LSTM" in name: model = with_padded(model) valid = True if "FeatureExtractor" in name: # can't validate fake docs: valid = False with data_validation(valid): model.initialize(in_data, out_data) Y, backprop = model(in_data, is_train=True) assert_data_match(Y, out_data) dX = backprop(Y) assert_data_match(dX, in_data)
def test_layers_from_config(name, kwargs, in_data, out_data): cfg = {"@layers": name, **kwargs} model = registry.resolve({"config": cfg})["config"] if "LSTM" in name: model = with_padded(model) valid = True with data_validation(valid): model.initialize(in_data, out_data) Y, backprop = model(in_data, is_train=True) assert_data_match(Y, out_data) dX = backprop(Y) assert_data_match(dX, in_data) # Test that during predictions, no dropout is applied model._to_ops(NoDropoutOps()) model.predict(in_data)
def test_validation_complex(): good_model = chain(list2ragged(), reduce_sum(), Relu(12, dropout=0.5), Relu(1)) X = [good_model.ops.xp.zeros((4, 75), dtype="f")] Y = good_model.ops.xp.zeros((1, ), dtype="f") good_model.initialize(X, Y) good_model.predict(X) bad_model = chain( list2ragged(), reduce_sum(), Relu(12, dropout=0.5), # ERROR: Why can't I attach a Relu to an attention layer? ParametricAttention(12), Relu(1), ) with data_validation(True): with pytest.raises(DataValidationError): bad_model.initialize(X, Y)