def test_upload_download_text(): tempbase = Path(tempfile.mkdtemp()) upfile = tempbase / "up.txt" downfile = tempbase / "down.txt" with open(upfile, "w") as f: f.write("Hello") # Grab a Steamship client and generate an upload url client = get_steamship_client() space = Space.get(client=client).data upload_name = random_name() url_resp = space.create_signed_url( SignedUrl.Request( bucket=SignedUrl.Bucket.PLUGIN_DATA, filepath=upload_name, operation=SignedUrl.Operation.WRITE, )) assert url_resp is not None assert url_resp.data is not None assert url_resp.data.signed_url is not None # Upload the zip file to the URL upload_to_signed_url(url_resp.data.signed_url, filepath=upfile) # Now create a download signed URL download_resp = space.create_signed_url( SignedUrl.Request( bucket=SignedUrl.Bucket.PLUGIN_DATA, filepath=upload_name, operation=SignedUrl.Operation.READ, )) assert download_resp is not None assert download_resp.data is not None assert download_resp.data.signed_url is not None # Verify that we get an exception when downloading something that doesn't exist # TODO: Follow up after we get a firmer understaing of the failure semantics of Localstack 404 errors with # pre-signed URLs versus AWS Actual errors with pre-signed URLs. Localstack seems to reply with an HTTP 200 # containing an XML string that contains an error message. # url = download_resp.data.signedUrl # parsed_url = urlparse(url) # bad_url = urlunparse(parsed_url._replace(path=f"{parsed_url.path}BOOGA")) # bad_download_path = tempbase / Path("bad.zip") # with pytest.raises(Exception): # download_from_signed_url(download_resp.data.signedUrl, to_file=bad_download_path) # Download the zip file to the URL download_from_signed_url(download_resp.data.signed_url, to_file=downfile) # Verify the download URL is there assert os.path.exists(downfile) == True # Verify the zip files are the same with open(downfile, "r") as f1: with open(upfile, "r") as f2: f1c = f1.read() f2c = f2.read() assert f1c == f2c
def test_configurable_instance_invoke(): greeting1 = "Hola" config_template = {"greeting": {"type": "string"}} instance_config = {"greeting": greeting1} client = get_steamship_client() hello_world_path = APPS_PATH / "configurable_hello_world.py" with deploy_app( client, hello_world_path, version_config_template=config_template, instance_config=instance_config, ) as (app, version, instance): # Now let's invoke it! # Note: we're invoking the data at configurable_hello_world.py in the tests/demo_apps folder res = instance.post("greet").data assert res == f"{greeting1}, Person" greeting2 = "Hallo" instance_config2 = {"greeting": greeting2} instance2 = AppInstance.create( client, app_id=app.id, app_version_id=version.id, config=instance_config2 ) instance2.wait() assert instance2.error is None assert instance2.data is not None instance2 = instance2.data res2 = instance2.post("greet").data assert res2 == f"{greeting2}, Person"
def test_get_training_parameters(): """Any trainable plugin needs a Python+Lambda component that can report its trainable params. This tests that all the plumbing works for that to be returned""" client = get_steamship_client() tagger_path = PLUGINS_PATH / "taggers" / "plugin_trainable_tagger.py" # Now make a trainable tagger to train on those tags with deploy_plugin( client, tagger_path, "tagger", training_platform=HostingType.LAMBDA, ) as (tagger, taggerVersion, taggerInstance): training_request = TrainingParameterPluginInput(plugin_instance=taggerInstance.handle) res = taggerInstance.get_training_parameters( training_request ) # TODO (enias): How is this working? assert res.data is not None params = res.data assert params.training_epochs is not None assert params.training_epochs == TRAINING_PARAMETERS.training_epochs assert math.isclose( params.testing_holdout_percent, TRAINING_PARAMETERS.testing_holdout_percent, abs_tol=0.0001, ) assert params.training_params == TRAINING_PARAMETERS.training_params
def test_e2e_csv_blockifier_plugin(): client = get_steamship_client() csv_blockifier_plugin_path = PLUGINS_PATH / "blockifiers" / "csv_blockifier.py" version_config_template = dict( text_column=dict(type="string"), tag_columns=dict(type="string"), tag_kind=dict(type="string"), ) # TODO (enias): Derive this from Config instance_config = dict( # Has to match up text_column="Message", tag_columns="Category", tag_kind="Intent", ) with deploy_plugin( client, csv_blockifier_plugin_path, "blockifier", version_config_template=version_config_template, instance_config=instance_config, ) as (plugin, version, instance): with upload_file(client, "utterances.csv") as file: assert len(file.refresh().data.blocks) == 0 file.blockify(plugin_instance=instance.handle).wait() # Check the number of blocks blocks = file.refresh().data.blocks assert len(blocks) == 5 for block in blocks: assert block.tags is not None assert len(block.tags) > 0 for tag in block.tags: assert tag.name is not None assert tag.kind is not None file.delete()
def test_deploy_in_space(): client = get_steamship_client() space = Space.create(client, handle="test-non-default-space").data instance = PluginInstance.create(client, plugin_handle="test-tagger", space_id=space.id).data assert instance.space_id == space.id
def test_multiple_queries(): steamship = get_steamship_client() plugin_instance = PluginInstance.create(steamship, plugin_handle=_TEST_EMBEDDER).data with random_index(steamship, plugin_instance.handle) as index: # Test for suppressed re-indexing a1 = "Ted can eat an entire block of cheese." a2 = "Joe can drink an entire glass of water." _ = index.insert_many([a1, a2]) index.embed().wait() qs1 = ["Who can eat the most cheese", "Who can run the fastest?"] search_results = index.search(qs1) assert len(search_results.data.items) == 1 assert search_results.data.items[0].value.value == a1 assert search_results.data.items[0].value.query == qs1[0] qs2 = ["Who can tie a shoe?", "Who can drink the most water?"] search_results = index.search(qs2) assert len(search_results.data.items) == 1 assert search_results.data.items[0].value.value == a2 assert search_results.data.items[0].value.query == qs2[1] qs3 = ["What can Ted do?", "What can Sam do?", "What can Jerry do?"] search_results = index.search(qs3) assert len(search_results.data.items) == 1 assert search_results.data.items[0].value.value == a1 assert search_results.data.items[0].value.query == qs3[0] qs3 = ["What can Sam do?", "What can Ted do?", "What can Jerry do?"] search_results = index.search(qs3) assert len(search_results.data.items) == 1 assert search_results.data.items[0].value.value == a1 assert search_results.data.items[0].value.query == qs3[1] index.create_snapshot().wait() a3 = "Susan can run very fast." a4 = "Brenda can fight alligators." _ = index.insert_many([a3, a4]) index.embed().wait() qs4 = ["What can Brenda do?", "What can Ronaldo do?", "What can Jerry do?"] search_results = index.search(qs4) assert len(search_results.data.items) == 1 assert search_results.data.items[0].value.value == a4 assert search_results.data.items[0].value.query == qs4[0] qs4 = [ "What can Brenda do?", "Who should run a marathon?", "What can Jerry do?", ] search_results = index.search(qs4, k=2) assert len(search_results.data.items) == 2 assert search_results.data.items[0].value.value == a4 assert search_results.data.items[0].value.query == qs4[0] assert search_results.data.items[1].value.value == a3 assert search_results.data.items[1].value.query == qs4[1]
def test_get_space(): client = get_steamship_client() default = Space.get(client=client).data space1 = Space.create(client=client, handle="test").data space1a = Space.get(client=client, space_id=space1.id).data assert space1a.id == space1.id assert space1a.id != default.id assert space1a.handle == space1.handle
def test_get_steamship_client(): client = get_steamship_client() assert client.config is not None assert client.config.profile == "test" assert client.config.api_key is not None user = User.current(client).data assert user.id is not None assert user.handle is not None
def test_deploy_in_space(): client = get_steamship_client() demo_app_path = APPS_PATH / "demo_app.py" space = Space.create(client, handle="test-non-default-space").data with deploy_app(client, demo_app_path, space_id=space.id) as (_, _, instance): assert instance.space_id == space.id
def test_app_create(): client = get_steamship_client() app = App.create(client) assert app.error is None assert app.data is not None _ = app.data.delete() assert app.error is None
def test_index_usage(): steamship = get_steamship_client() plugin_instance = PluginInstance.create(steamship, plugin_handle=_TEST_EMBEDDER).data with random_index(steamship, plugin_instance.handle) as index: a1 = "Ted can eat an entire block of cheese." q1 = "Who can eat the most cheese" _ = index.insert(a1) _ = index.search(q1) # Now embed task = index.embed() task.wait() task.refresh() assert task.task.state == TaskState.succeeded search_results = index.search(q1) assert len(search_results.data.items) == 1 assert search_results.data.items[0].value.value == a1 # Associate metadata a2 = "Armadillo shells are bulletproof." q2 = "What is something interesting about Armadillos?" a2id = "A2id" a2type = "A2type" a2metadata = dict( id=a2id, idid=f"{a2id}{a2id}", boolVal=True, intVal=123, floatVal=1.2, ) _ = index.insert(a2, external_id=a2id, external_type=a2type, metadata=a2metadata) search_results2 = index.search(q2) assert len(search_results2.data.items) == 1 assert search_results2.data.items[0].value.value == a2 assert search_results2.data.items[0].value.external_id is None assert search_results2.data.items[0].value.external_type is None assert search_results2.data.items[0].value.metadata is None search_results3 = index.search(q2, include_metadata=True) assert len(search_results3.data.items) == 1 assert search_results3.data.items[0].value.value == a2 assert search_results3.data.items[0].value.external_id == a2id assert search_results3.data.items[0].value.external_type == a2type assert search_results3.data.items[0].value.metadata == a2metadata # Because I don't know pytest enough to fully trust the dict comparison.. assert search_results3.data.items[0].value.metadata["id"] == a2id assert search_results3.data.items[0].value.metadata["idid"] == "{}{}".format(a2id, a2id) search_results4 = index.search(q2, k=10) assert len(search_results4.data.items) == 2 assert search_results4.data.items[0].value.value == a2 assert search_results4.data.items[1].value.value == a1
def test_duplicate_inserts(): steamship = get_steamship_client() plugin_instance = PluginInstance.create(steamship, plugin_handle=_TEST_EMBEDDER).data with random_index(steamship, plugin_instance.handle) as index: # Test for suppressed re-indexing a1 = "Ted can eat an entire block of cheese." q1 = "Who can eat the most cheese" _ = index.insert(a1) _ = index.search(q1)
def test_response_post_update_fails_when_no_task_present(): client = get_steamship_client() response = Response() with pytest.raises(Exception): response.post_update(client) # No task_id response2 = Response(status=Task()) with pytest.raises(Exception): response2.post_update(client)
def test_trainable_tagger(): client = get_steamship_client() assert client is not None plugin = TestTrainableTaggerPlugin(client=client) assert plugin.client is not None # STEP 1. Training Parameters # The first part of trainable is to produce trainable parameters. The end-user may offer inputs to this, # but ultimately it is the plugin itself which decides upon the final set of trainable parameters. tagger1 = plugin.get_training_parameters( PluginRequest(data=TrainingParameterPluginInput(), task_id="000", plugin_instance_id="000") ) assert tagger1.data.dict() == TRAINING_PARAMETERS.dict() tagger2 = plugin.get_training_parameters_endpoint( **PluginRequest( data=TrainingParameterPluginInput(), task_id="000", plugin_instance_id="000" ).dict() ) assert tagger2.data.dict() == TRAINING_PARAMETERS.dict() assert tagger2.data.training_epochs == TRAINING_PARAMETERS.training_epochs # STEP 2. Training # The first part of trainable is to produce your own trainable parameters. model = plugin.model_cls()() tagger1 = plugin.train( PluginRequest( data=TrainPluginInput( plugin_instance="foo", training_params=TRAINING_PARAMETERS.training_params ), task_id="000", plugin_instance_id="000", ), model, ) assert tagger1.data == TRAIN_RESPONSE.dict() tagger2 = plugin.train_endpoint( **PluginRequest( data=TrainPluginInput( plugin_instance="foo", training_params=TRAINING_PARAMETERS.training_params ), task_id="000", plugin_instance_id="000", ).dict() ) assert tagger2.data == TRAIN_RESPONSE.dict() # STEP 3. Run res = plugin.run(TEST_PLUGIN_REQ) _test_resp(res) # The endpoints take a kwargs block which is transformed into the appropriate JSON object res2 = plugin.run_endpoint(**TEST_PLUGIN_REQ_DICT) _test_resp(res2)
def test_file_embed_lookup(): steamship = get_steamship_client() content_a = "Ted likes to run." content_b = "Grace likes to bike." file = steamship.upload(content=content_a, mime_type=MimeTypes.MKD).data blockify_res = file.blockify( plugin_instance="markdown-blockifier-default-1.0") assert blockify_res.error is None blockify_res.wait() parser = PluginInstance.create(steamship, plugin_handle="test-tagger").data parse_res = file.tag(plugin_instance=parser.handle) assert parse_res.error is None parse_res.wait() b = steamship.upload(content=content_b, mime_type=MimeTypes.MKD).data blockify_res = b.blockify( plugin_instance="markdown-blockifier-default-1.0") assert blockify_res.error is None blockify_res.wait() parser = PluginInstance.create(steamship, plugin_handle="test-tagger").data parse_res = b.tag(plugin_instance=parser.handle) assert parse_res.error is None parse_res.wait() embedder = PluginInstance.create(steamship, plugin_handle="test-embedder").data # Now we add the file to the index with random_index(steamship, embedder.handle) as index: index.insert_file(file.id, block_type="sentence", reindex=True) index.insert_file(b.id, block_type="sentence", reindex=True) res = index.search("What does Ted like to do?").data assert len(res.items) == 1 assert res.items[0].value.value == content_a res = index.search("What does Grace like to do?").data assert len(res.items) == 1 assert res.items[0].value.value == content_b # Now we list the items itemsa = index.list_items(file_id=file.id).data assert len(itemsa.items) == 1 assert len(itemsa.items[0].embedding) > 0 assert itemsa.items[0].value == content_a itemsb = index.list_items(file_id=b.id).data assert len(itemsb.items) == 1 assert len(itemsb.items[0].embedding) > 0 assert len(itemsb.items[0].embedding) == len(itemsa.items[0].embedding) assert itemsb.items[0].value == content_b
def test_plugin_instance_get(): steamship = get_steamship_client() handle = f"test_tagger_test_handle{uuid.uuid4()}" instance = PluginInstance.create(steamship, plugin_handle="test-tagger", handle=handle).data assert instance.id is not None other_instance = PluginInstance.get(steamship, handle=handle).data assert instance.id == other_instance.id
def test_parsing(): steamship = get_steamship_client() parser = PluginInstance.create(steamship, plugin_handle="test-tagger").data resp = parser.tag("This is a test") resp.wait() resp = resp.data assert len(resp.file.blocks) == 1 d = resp.file.blocks[0] assert d.text == "This is a test" assert len(d.tags) == 5
def test_file_index(): steamship = get_steamship_client() t = "A nice poem" p1_1 = "Roses are red." p1_2 = "Violets are blue." p2_1 = "Sugar is sweet." p2_2 = "I love you." t2 = "A flavorful story" p3_1 = "Cake is made of flour." p3_2 = "Cake tastes good with milk." p4_1 = "Cake comes in chocolate and vanilla flavors." p4_2 = "Cake can be cut into mAny pieces and shared." content1 = f"# {t}\n\n{p1_1} {p1_2}\n\n{p2_1} {p2_2}" content2 = f"# {t2}\n\n{p3_1} {p3_2}\n\n{p4_1} {p4_2}" content = f"{content1}\n\n{content2}" file = steamship.upload(content=content, mime_type=MimeTypes.MKD).data assert file.id is not None assert file.mime_type == MimeTypes.MKD blockify_resp = file.blockify( plugin_instance="markdown-blockifier-default-1.0") assert blockify_resp.error is None blockify_resp.wait() # Now we parse parser = PluginInstance.create(steamship, plugin_handle="test-tagger").data parse_resp = file.tag(plugin_instance=parser.handle) assert parse_resp.error is None parse_resp.wait() # Now the sentences should be parsed! q2 = file.refresh().data assert len(q2.blocks) == 6 # Now we add the file to the index via the shortcut. embedder = PluginInstance.create(steamship, plugin_handle="test-embedder").data # noinspection PyUnresolvedReferences index = file.index(plugin_instance=embedder.handle) res = index.search("What color are roses?").data assert len(res.items) == 1 # Because the simdex now indexes entire blocks and not sentences, the result of this is the whole block text assert res.items[0].value.value == " ".join([p1_1, p1_2]) res = index.search("What flavors does cake come in?").data assert len(res.items) == 1 # Because the simdex now indexes entire blocks and not sentences, the result of this is the whole block text assert res.items[0].value.value == " ".join([p4_1, p4_2]) index.delete() file.delete()
def test_plugin_public(): steamship = get_steamship_client() resp = Plugin.list(steamship).data assert resp.plugins is not None plugins = resp.plugins assert len(plugins) > 0 # Make sure they can't be deleted. res = plugins[0].delete() assert res.error is not None
def test_e2e_blockifier_plugin(): client = get_steamship_client() blockifier_path = PLUGINS_PATH / "blockifiers" / "blockifier.py" with deploy_plugin(client, blockifier_path, "blockifier") as ( plugin, version, instance, ): file = File.create(client=client, content="This is a test.").data assert len(file.refresh().data.blocks) == 0 file.blockify(plugin_instance=instance.handle).wait() assert len(file.refresh().data.blocks) == 4 file.delete()
def test_model_checkpoint_save_load(): """A ModelCheckpoint captures the entire state of a model at any given time. On disk, it is a folder. - Models are expected to take this folder as their initialization input - Models are expected to persist whatever they need to persist to this folder during training. On Steamship, it is a zip archive stored in the associated PluginInstance's "Space Bucket" - Each ModelCheckpoint uploaded has a handle (like V1) - Every ModelCheckpoint uploaded can also become the new default """ client = get_steamship_client() train_task_response = create_dummy_training_task(client) train_task_id = train_task_response.task.task_id checkpoint_1 = ModelCheckpoint(client=client, handle="epoch1", plugin_instance_id="0000") with open(checkpoint_1.folder_path_on_disk() / "params.json", "w") as f: f.write("HI THERE") checkpoint_1.upload_model_bundle() # Now let's download the checkpoint labeled "epoch1" and test that it is equal checkpoint_downloaded = ModelCheckpoint(client=client, handle="epoch1", plugin_instance_id="0000") checkpoint_downloaded.download_model_bundle() _test_folders_equal(checkpoint_1.folder_path_on_disk(), checkpoint_downloaded.folder_path_on_disk()) # We should also be able to download "default" checkpoint by not providing a handle checkpoint_default_1 = ModelCheckpoint(client=client, plugin_instance_id="0000") checkpoint_default_1.download_model_bundle() _test_folders_equal(checkpoint_1.folder_path_on_disk(), checkpoint_default_1.folder_path_on_disk()) # Let's create a new checkpoint with our trainer... epoch2 checkpoint_2 = ModelCheckpoint(client=client, handle="epoch2", plugin_instance_id="0000") with open(checkpoint_2.folder_path_on_disk() / "params.json", "w") as f: f.write("UPDATED PARAMS") checkpoint_2.upload_model_bundle() # If we download the new DEFAULT checkpoint, we will now receive the epoch2 files... checkpoint_default_2 = ModelCheckpoint(client=client, plugin_instance_id="0000") checkpoint_default_2.download_model_bundle() _test_folders_equal(checkpoint_2.folder_path_on_disk(), checkpoint_default_2.folder_path_on_disk())
def test_embed_task(): steamship = get_steamship_client() plugin_instance = PluginInstance.create(steamship, plugin_handle=_TEST_EMBEDDER).data with random_index(steamship, plugin_instance.handle) as index: _ = index.insert("test", reindex=False) res = index.embed() assert res.task.task_id is not None assert res.task.state is not None assert res.task.task_created_on is not None assert res.task.task_last_modified_on is not None assert res.task.state == TaskState.waiting res.wait() assert res.task.state == TaskState.succeeded
def test_version_create(): client = get_steamship_client() demo_app_path = APPS_PATH / "demo_app.py" app = App.create(client).data zip_bytes = zip_deployable(demo_app_path) version = AppVersion.create(client, app_id=app.id, filebytes=zip_bytes) version.wait() res = version.data.delete() assert res.error is None res = app.delete() assert res.error is None
def create_index(_: Client, plugin_instance: str): steamship = get_steamship_client() # Should require plugin task = steamship.create_index() assert task.error is not None index = steamship.create_index(plugin_instance=plugin_instance).data assert index is not None # Duplicate creation should fail with upsert=False task = steamship.create_index( handle=index.handle, plugin_instance=plugin_instance, upsert=False ) assert task.error is not None index.delete()
def test_e2e_tagger(): client = get_steamship_client() parser_path = PLUGINS_PATH / "taggers" / "plugin_parser.py" # TODO (enias): Use Enum for plugin type with deploy_plugin(client, parser_path, "tagger") as (plugin, version, instance): test_doc = "Hi there" res = instance.tag(doc=test_doc) res.wait() assert res.error is None assert res.data is not None assert len(res.data.file.blocks) == 1 assert res.data.file.blocks[0].text == test_doc # Let's try it on a file. This is the same test we run on the Swift test parser. # Since the python test parser is implemented to behave the same, we can reuse it! tag_file(client, instance.handle)
def test_e2e_corpus_importer(): client = get_steamship_client() corpus_importer_path = PLUGINS_PATH / "importers" / "plugin_corpus_importer.py" file_importer_path = PLUGINS_PATH / "importers" / "plugin_file_importer.py" with temporary_space(client) as space: with deploy_plugin(client, file_importer_path, "fileImporter", space_id=space.id) as ( _, _, fi_instance, ): with deploy_plugin(client, corpus_importer_path, "corpusImporter", space_id=space.id) as ( plugin, version, instance, ): req = CorpusImportRequest( type="file", value="dummy-value", plugin_instance=instance.handle, file_importer_plugin_instance=fi_instance.handle, ) res = client.post( "plugin/instance/importCorpus", req, expect=CorpusImportResponse, space_id=space.id, ) res.wait() # We should now have two files! files = File.list(client, space_id=space.id).data assert files.files is not None assert len(files.files) == 2 for file in files.files: data = file.raw().data assert data.decode("utf-8") == TEST_DOC file.delete()
def test_trainable_tagger(): client = get_steamship_client() assert client is not None plugin = TestTrainableTaggerConfigPlugin(client=client, config=dict(testValue1="foo", testValue2="bar")) assert plugin.client is not None # Make sure plugin model gets its config while 'training'. tagger2 = plugin.train_endpoint(**PluginRequest( data=TrainPluginInput(plugin_instance="foo", training_params=None), task_id="000", plugin_instance_id="000", ).dict()) # Make sure plugin model gets its config while 'running' res = plugin.run(TEST_PLUGIN_REQ)
def test_file_upload_then_parse(): steamship = get_steamship_client() a = steamship.upload(content="This is a test.").data assert a.id is not None q1 = a.refresh().data assert len(q1.blocks) == 0 task = a.blockify(plugin_instance="markdown-blockifier-default-1.0") assert task.error is None assert task.task is not None assert task.task.state == TaskState.waiting task.wait() assert task.error is None assert task.task is not None assert task.task.state == TaskState.succeeded q1 = a.refresh().data assert len(q1.blocks) == 1 assert q1.blocks[0].text == "This is a test." b = steamship.upload(content="""# Header This is a test.""").data assert b.id is not None q1 = b.refresh().data assert len(q1.blocks) == 0 task = b.blockify(plugin_instance="markdown-blockifier-default-1.0") assert task.error is None assert task.task is not None task.wait() q1 = b.refresh().data assert len(q1.blocks) == 2 assert q1.blocks[1].text == "This is a test." assert q1.blocks[0].text == "Header" a.delete() b.delete()
def test_delete_index(): steamship = get_steamship_client() plugin_instance = PluginInstance.create(steamship, plugin_handle=_TEST_EMBEDDER).data index = steamship.create_index(plugin_instance=plugin_instance.handle).data assert index.id is not None task = steamship.create_index(handle=index.handle, plugin_instance=plugin_instance.handle) assert task.error is None index2 = task.data assert index.id == index2.id index.delete() task = steamship.create_index(plugin_instance=plugin_instance.handle) assert task.error is None assert task.data is not None index3 = task.data assert index.id != index3.id index3.delete()
def test_response_post_update_can_update_task(): client = get_steamship_client() task_result = create_dummy_training_task(client) task = task_result.task new_state = TaskState.failed new_message = "HI THERE" new_output = {"a": 3} assert task.state != new_state assert task.status_message != new_message assert task.output != new_output response = Response(status=task) response.status.state = new_state response.status.status_message = new_message response.status.output = new_output # Sanity check: we'll prove that caling task.check() resets this.. task_result.refresh() # Assert not equal assert task.state != new_state assert task.status_message != new_message assert task.output != new_output # And override again response.status.state = new_state response.status.status_message = new_message response.set_data(json=new_output) # Now we call post_update response.post_update(client) # Call task.check task_result.refresh() # Assert equal assert task.state == new_state assert task.status_message == new_message assert task.output == json.dumps(new_output)