def test_unpublish_all_versions_by_handles_ok(self): # Test variables list_of_dataset_handles = [PREFIX_NO_HDL+'/bla', PREFIX_NO_HDL+'/blub'] # Set solr mock to return handles: testcoupler = TESTHELPERS.get_coupler() TESTHELPERS.patch_solr_returns_list_of_datasets_and_versions(testcoupler, list_of_dataset_handles, None) TESTHELPERS.patch_with_rabbit_mock(testcoupler) args = TESTHELPERS.get_args_for_unpublish_all() assistant = esgfpid.assistant.unpublish.AssistantAllVersions(coupler=testcoupler, **args) # Run code to be tested: assistant.unpublish_all_dataset_versions() # Check result: received_rabbit_task1 = TESTHELPERS.get_received_message_from_rabbitmock(testcoupler, 0) received_rabbit_task2 = TESTHELPERS.get_received_message_from_rabbitmock(testcoupler, 1) expected_rabbit_task1 = TESTHELPERS.get_rabbit_message_unpub_one() expected_rabbit_task1["handle"]=list_of_dataset_handles[0] del expected_rabbit_task1["version_number"] expected_rabbit_task2 = TESTHELPERS.get_rabbit_message_unpub_one() expected_rabbit_task2["handle"]=list_of_dataset_handles[1] del expected_rabbit_task2["version_number"] same1 = utils.is_json_same(expected_rabbit_task1, received_rabbit_task1) same2 = utils.is_json_same(expected_rabbit_task2, received_rabbit_task2) self.assertTrue(same1, error_message(expected_rabbit_task1, received_rabbit_task1)) self.assertTrue(same2, error_message(expected_rabbit_task2, received_rabbit_task2))
def test_normal_publication_with_pos_consis_check(self): # Test variables: fileargs = TESTHELPERS.get_args_for_adding_file() prev_list = [FILEHANDLE_HDL] # Preparations: testcoupler = TESTHELPERS.get_coupler() TESTHELPERS.patch_solr_returns_previous_files( testcoupler, prev_list) # solr returns file list TESTHELPERS.patch_with_rabbit_mock(testcoupler) dsargs = TESTHELPERS.get_args_for_publication_assistant() assistant = DatasetPublicationAssistant(coupler=testcoupler, **dsargs) # Run code to be tested: assistant.add_file(**fileargs) assistant.dataset_publication_finished() # Check result (dataset): received_rabbit_task = TESTHELPERS.get_received_message_from_rabbitmock( testcoupler, 0) expected_rabbit_task = TESTHELPERS.get_rabbit_message_publication_dataset( ) same = utils.is_json_same(expected_rabbit_task, received_rabbit_task) self.assertTrue( same, error_message(expected_rabbit_task, received_rabbit_task)) # Check result (file): received_rabbit_task = TESTHELPERS.get_received_message_from_rabbitmock( testcoupler, 1) expected_rabbit_task = TESTHELPERS.get_rabbit_message_publication_file( ) same = utils.is_json_same(expected_rabbit_task, received_rabbit_task) self.assertTrue( same, error_message(expected_rabbit_task, received_rabbit_task))
def test_add_file_without_hdl_in_handle(self): # Preparations: testcoupler = TESTHELPERS.get_coupler(solr_switched_off=True) TESTHELPERS.patch_with_rabbit_mock(testcoupler) dsargs = TESTHELPERS.get_args_for_publication_assistant() assistant = DatasetPublicationAssistant(coupler=testcoupler, **dsargs) fileargs = TESTHELPERS.get_args_for_adding_file() fileargs['file_handle'] = FILEHANDLE_NO_HDL # Run code to be tested: assistant.add_file(**fileargs) assistant.dataset_publication_finished() # Check result (dataset): received_rabbit_task = TESTHELPERS.get_received_message_from_rabbitmock( testcoupler, 0) expected_rabbit_task = TESTHELPERS.get_rabbit_message_publication_dataset( ) same = utils.is_json_same(expected_rabbit_task, received_rabbit_task) self.assertTrue( same, error_message(expected_rabbit_task, received_rabbit_task)) # Check result (file): received_rabbit_task = TESTHELPERS.get_received_message_from_rabbitmock( testcoupler, 1) expected_rabbit_task = TESTHELPERS.get_rabbit_message_publication_file( ) same = utils.is_json_same(expected_rabbit_task, received_rabbit_task) self.assertTrue( same, error_message(expected_rabbit_task, received_rabbit_task))
def test_datacart_several_times_same_datasets(self): # Test variables content1 = {'foo':'foo', 'bar':'bar'} content2 = {'foo':'foo', 'bar': None} content3 = {'foo':'foo', 'bar':'hdl:bar'} # Note: In all three cases, the same datasets are passed, so # the same data cart PID has to be created/updated. # Preparations: Make an assistant with a patched coupler. testcoupler = TESTHELPERS.get_coupler(solr_switched_off=True) TESTHELPERS.patch_with_rabbit_mock(testcoupler) assistant = esgfpid.assistant.datacart.DataCartAssistant( prefix=PREFIX_NO_HDL, coupler=testcoupler ) # Run code to be tested: pid1 = assistant.make_data_cart_pid(content1) pid2 = assistant.make_data_cart_pid(content2) pid3 = assistant.make_data_cart_pid(content3) # Check result: # These are the expected messages: expected_handle_all_cases = PREFIX_WITH_HDL+"/b597a79e-1dc7-3d3f-b689-75ac5a78167f" expected_rabbit_task1 = { "handle": expected_handle_all_cases, "operation": "shopping_cart", "message_timestamp":"anydate", "data_cart_content":content1, "ROUTING_KEY": PREFIX_FOR_ROUTINGKEY+'.HASH.fresh.datacart' } expected_rabbit_task2 = { "handle": expected_handle_all_cases, "operation": "shopping_cart", "message_timestamp":"anydate", "data_cart_content":content2, "ROUTING_KEY": PREFIX_FOR_ROUTINGKEY+'.HASH.fresh.datacart' } expected_rabbit_task3 = { "handle": expected_handle_all_cases, "operation": "shopping_cart", "message_timestamp":"anydate", "data_cart_content":content3, "ROUTING_KEY": PREFIX_FOR_ROUTINGKEY+'.HASH.fresh.datacart' } # Check if all the messages are correct: received_rabbit_task1 = TESTHELPERS.get_received_message_from_rabbitmock(testcoupler, 0) received_rabbit_task2 = TESTHELPERS.get_received_message_from_rabbitmock(testcoupler, 1) received_rabbit_task3 = TESTHELPERS.get_received_message_from_rabbitmock(testcoupler, 2) same1 = utils.is_json_same(expected_rabbit_task1, received_rabbit_task1) same2 = utils.is_json_same(expected_rabbit_task2, received_rabbit_task2) same3 = utils.is_json_same(expected_rabbit_task3, received_rabbit_task3) self.assertTrue(same1, error_message(expected_rabbit_task1, received_rabbit_task1)) self.assertTrue(same2, error_message(expected_rabbit_task2, received_rabbit_task2)) self.assertTrue(same3, error_message(expected_rabbit_task3, received_rabbit_task3)) # Check if all the handles are the same: self.assertTrue(pid1==pid2, 'Pids 1&2 are not the same.') self.assertTrue(pid1==pid3, 'Pids 1&3 are not the same.')
def test_unpublish_all_versions_by_version_numbers_ok(self): # Test variables list_of_version_numbers = [DS_VERSION, DS_VERSION2] # Set solr mock to return handles: testcoupler = TESTHELPERS.get_coupler() TESTHELPERS.patch_solr_returns_list_of_datasets_and_versions( testcoupler, None, list_of_version_numbers) TESTHELPERS.patch_with_rabbit_mock(testcoupler) args = TESTHELPERS.get_args_for_unpublish_all() assistant = esgfpid.assistant.unpublish.AssistantAllVersions( coupler=testcoupler, **args) #self.coupler._Coupler__solr_sender.datasethandles_or_versionnumbers_of_allversions['version_numbers'] = list_of_version_numbers # Run code to be tested: assistant.unpublish_all_dataset_versions() # Check result: expected_rabbit_task1 = { "operation": "unpublish_one_version", "aggregation_level": "dataset", "message_timestamp": "anydate", "data_node": DATA_NODE, "handle": DATASETHANDLE_HDL, "ROUTING_KEY": ROUTING_KEY_BASIS + 'unpublication.one', "drs_id": DRS_ID, "version_number": DS_VERSION } expected_rabbit_task2 = { "operation": "unpublish_one_version", "aggregation_level": "dataset", "message_timestamp": "anydate", "data_node": DATA_NODE, "handle": DATASETHANDLE_HDL2, "ROUTING_KEY": ROUTING_KEY_BASIS + 'unpublication.one', "drs_id": DRS_ID, "version_number": DS_VERSION2 } received_rabbit_task1 = TESTHELPERS.get_received_message_from_rabbitmock( testcoupler, 0) received_rabbit_task2 = TESTHELPERS.get_received_message_from_rabbitmock( testcoupler, 1) same1 = utils.is_json_same(expected_rabbit_task1, received_rabbit_task1) same2 = utils.is_json_same(expected_rabbit_task2, received_rabbit_task2) self.assertTrue( same1, error_message(expected_rabbit_task1, received_rabbit_task1)) self.assertTrue( same2, error_message(expected_rabbit_task2, received_rabbit_task2))
def test_normal_publication_sev_files_ok(self): # Test variables handle1 = PREFIX_WITH_HDL + '/456' handle2 = PREFIX_WITH_HDL + '/789' # Preparations: testcoupler = TESTHELPERS.get_coupler(solr_switched_off=True) TESTHELPERS.patch_with_rabbit_mock(testcoupler) dsargs = TESTHELPERS.get_args_for_publication_assistant() assistant = DatasetPublicationAssistant(coupler=testcoupler, **dsargs) args1 = TESTHELPERS.get_args_for_adding_file() args1['file_handle'] = handle1 args2 = TESTHELPERS.get_args_for_adding_file() args2['file_handle'] = handle2 # Run code to be tested: assistant.add_file(**args1) assistant.add_file(**args2) assistant.dataset_publication_finished() # Check result (dataset): received_rabbit_task = TESTHELPERS.get_received_message_from_rabbitmock( testcoupler, 0) expected_rabbit_task = TESTHELPERS.get_rabbit_message_publication_dataset( ) expected_rabbit_task['files'] = [handle2, handle1] same = utils.is_json_same(expected_rabbit_task, received_rabbit_task) self.assertTrue( same, error_message(expected_rabbit_task, received_rabbit_task)) # Check result (file): received_rabbit_task = TESTHELPERS.get_received_message_from_rabbitmock( testcoupler, 1) expected_rabbit_task = TESTHELPERS.get_rabbit_message_publication_file( ) expected_rabbit_task['handle'] = handle1 same = utils.is_json_same(expected_rabbit_task, received_rabbit_task) self.assertTrue( same, error_message(expected_rabbit_task, received_rabbit_task)) # Check result (file): received_rabbit_task = TESTHELPERS.get_received_message_from_rabbitmock( testcoupler, 2) expected_rabbit_task = TESTHELPERS.get_rabbit_message_publication_file( ) expected_rabbit_task['handle'] = handle2 same = utils.is_json_same(expected_rabbit_task, received_rabbit_task) self.assertTrue( same, error_message(expected_rabbit_task, received_rabbit_task))
def test_datacart_for_several_datasets_with_pids(self): # Test variables content = {'foo':'foo', 'bar':'bar'} # Preparations: Make an assistant with a patched coupler. testcoupler = TESTHELPERS.get_coupler(solr_switched_off=True) TESTHELPERS.patch_with_rabbit_mock(testcoupler) assistant = esgfpid.assistant.datacart.DataCartAssistant( prefix=PREFIX_NO_HDL, coupler=testcoupler ) # Run code to be tested: assistant.make_data_cart_pid(content) # Check result: expected_rabbit_task = { "handle": PREFIX_WITH_HDL+'/b597a79e-1dc7-3d3f-b689-75ac5a78167f', "operation": "shopping_cart", "message_timestamp":"anydate", "data_cart_content":{'foo':'foo', 'bar':'bar'}, "ROUTING_KEY": PREFIX_FOR_ROUTINGKEY+'.HASH.fresh.datacart' } received_rabbit_task = TESTHELPERS.get_received_message_from_rabbitmock(testcoupler) same = utils.is_json_same(expected_rabbit_task, received_rabbit_task) self.assertTrue(same, error_message(expected_rabbit_task, received_rabbit_task))
def test_add_errata_id_one_as_list(self): # Preparations testcoupler = TESTHELPERS.get_coupler(solr_switched_off=True) TESTHELPERS.patch_with_rabbit_mock(testcoupler) assistant = ErrataAssistant(prefix=PREFIX_NO_HDL, coupler=testcoupler) # Run code to be tested: assistant.add_errata_ids(drs_id=DRS_ID, version_number=DS_VERSION, errata_ids=[ERRATA]) # Check result: expected_rabbit_task = { "handle": DATASETHANDLE_HDL, "operation": "add_errata_ids", "message_timestamp": "anydate", "errata_ids": [ERRATA], "ROUTING_KEY": PREFIX_FOR_ROUTINGKEY + '.HASH.fresh.errata-add', "drs_id": DRS_ID, "version_number": DS_VERSION } received_rabbit_task = TESTHELPERS.get_received_message_from_rabbitmock( testcoupler) same = utils.is_json_same(expected_rabbit_task, received_rabbit_task) self.assertTrue( same, error_message(expected_rabbit_task, received_rabbit_task))
def test_remove_errata_id_several(self): # Preparations testcoupler = TESTHELPERS.get_coupler(solr_switched_off=True) TESTHELPERS.patch_with_rabbit_mock(testcoupler) assistant = ErrataAssistant(prefix=PREFIX_NO_HDL, coupler=testcoupler) # Run code to be tested: assistant.remove_errata_ids(drs_id=DRS_ID, version_number=DS_VERSION, errata_ids=ERRATA_SEVERAL) # Check result: expected_rabbit_task = { "handle": DATASETHANDLE_HDL, "operation": "remove_errata_ids", "message_timestamp": "anydate", "errata_ids": ERRATA_SEVERAL, "ROUTING_KEY": ROUTING_KEY_BASIS + 'errata.remove', "drs_id": DRS_ID, "version_number": DS_VERSION } received_rabbit_task = TESTHELPERS.get_received_message_from_rabbitmock( testcoupler) same = utils.is_json_same(expected_rabbit_task, received_rabbit_task) self.assertTrue( same, error_message(expected_rabbit_task, received_rabbit_task))
def test_make_message_unpublish_one_version_ok(self): # Test variables args_dict = dict(dataset_handle='abc/def', data_node='dkrz.de', timestamp='todayish', drs_id='mytest') # Run code to be tested: received_message = messages.unpublish_one_version(**args_dict) # Check result: expected = copy.deepcopy(args_dict) expected[ 'ROUTING_KEY'] = PREFIX_FOR_ROUTINGKEY + '.HASH.fresh.unpubli-onevers' expected['aggregation_level'] = 'dataset' expected['operation'] = 'unpublish_one_version' # Rename some: expected['handle'] = expected['dataset_handle'] del expected['dataset_handle'] expected['message_timestamp'] = expected['timestamp'] del expected['timestamp'] same = utils.is_json_same(expected, received_message) self.assertTrue(same, error_message(expected, received_message))
def test_make_message_remove_errata_ids_ok(self): # Test variables args_dict = dict(dataset_handle='abc/def', errata_ids='dkrz.de', timestamp='todayish', drs_id='drs_foo', version_number='vers_foo') # Run code to be tested: received_message = messages.remove_errata_ids_message(**args_dict) # Check result: expected = copy.deepcopy(args_dict) expected[ 'ROUTING_KEY'] = PREFIX_FOR_ROUTINGKEY + '.HASH.fresh.errata-rem' expected['operation'] = 'remove_errata_ids' expected['drs_id'] = 'drs_foo' expected['version_number'] = 'vers_foo' # Rename some: expected['handle'] = expected['dataset_handle'] del expected['dataset_handle'] expected['message_timestamp'] = expected['timestamp'] del expected['timestamp'] same = utils.is_json_same(expected, received_message) self.assertTrue(same, error_message(expected, received_message))
def test_make_message_publication_dataset_replica_ok(self): # Test variables args_dict = self.__get_args_dict_dataset() args_dict['is_replica'] = True # Run code to be tested: received_message = messages.publish_dataset(**args_dict) # Check result: expected = copy.deepcopy(args_dict) expected[ 'ROUTING_KEY'] = PREFIX_FOR_ROUTINGKEY + '.HASH.fresh.publi-ds-repli' expected['aggregation_level'] = 'dataset' expected['operation'] = 'publish' # Rename some: expected['handle'] = expected['dataset_handle'] del expected['dataset_handle'] expected['message_timestamp'] = expected['timestamp'] del expected['timestamp'] expected['files'] = expected['list_of_files'] del expected['list_of_files'] same = utils.is_json_same(expected, received_message) self.assertTrue(same, error_message(expected, received_message))
def test_make_data_cart_pid(self): # Test variables content1 = {'foo': 'foo', 'bar': 'bar'} content2 = {'foo': 'foo', 'bar': None} # Preparations: Create patched connector # (Patched to avoid that message be sent, and to retrieve the created message) testconnector = TESTHELPERS.get_connector() TESTHELPERS.patch_with_rabbit_mock(testconnector) # Run code to be tested: Create data cart PIDs # And retrieve the messages pid1 = testconnector.create_data_cart_pid(content1) pid2 = testconnector.create_data_cart_pid(content2) # Check result: Were the correct messages created? expected_handle_both_cases = PREFIX_WITH_HDL + "/b597a79e-1dc7-3d3f-b689-75ac5a78167f" expected_rabbit_task1 = { "handle": expected_handle_both_cases, "operation": "shopping_cart", "message_timestamp": "anydate", "data_cart_content": content1, "ROUTING_KEY": PREFIX_FOR_ROUTINGKEY + '.HASH.fresh.datacart' } expected_rabbit_task2 = { "handle": expected_handle_both_cases, "operation": "shopping_cart", "message_timestamp": "anydate", "data_cart_content": content2, "ROUTING_KEY": PREFIX_FOR_ROUTINGKEY + '.HASH.fresh.datacart' } received_rabbit_msg1 = TESTHELPERS.get_received_message_from_rabbitmock( testconnector, 0) received_rabbit_msg2 = TESTHELPERS.get_received_message_from_rabbitmock( testconnector, 1) same1 = utils.is_json_same(expected_rabbit_task1, received_rabbit_msg1) same2 = utils.is_json_same(expected_rabbit_task2, received_rabbit_msg2) self.assertTrue( same1, error_message(expected_rabbit_task1, received_rabbit_msg1)) self.assertTrue( same2, error_message(expected_rabbit_task2, received_rabbit_msg2)) self.assertTrue(pid1 == pid2, 'Both pids are not the same.')
def test_normal_publication_replica_flag_string_ok(self): # Preparations: testcoupler = TESTHELPERS.get_coupler( solr_switched_off=True) # solr switched off, no consistency check TESTHELPERS.patch_with_rabbit_mock(testcoupler) dsargs = TESTHELPERS.get_args_for_publication_assistant() dsargs['is_replica'] = True assistant = DatasetPublicationAssistant(coupler=testcoupler, **dsargs) fileargs = TESTHELPERS.get_args_for_adding_file() # Run code to be tested: assistant.add_file(**fileargs) assistant.dataset_publication_finished() # Check result (dataset): received_rabbit_task = TESTHELPERS.get_received_message_from_rabbitmock( testcoupler, 0) expected_rabbit_task = TESTHELPERS.get_rabbit_message_publication_dataset( ) expected_rabbit_task['is_replica'] = True expected_rabbit_task[ 'ROUTING_KEY'] = ROUTING_KEY_BASIS + 'publication.dataset.replica' same = utils.is_json_same(expected_rabbit_task, received_rabbit_task) self.assertTrue( same, error_message(expected_rabbit_task, received_rabbit_task)) # Check result (file): received_rabbit_task = TESTHELPERS.get_received_message_from_rabbitmock( testcoupler, 1) expected_rabbit_task = TESTHELPERS.get_rabbit_message_publication_file( ) expected_rabbit_task['is_replica'] = True expected_rabbit_task[ 'ROUTING_KEY'] = ROUTING_KEY_BASIS + 'publication.file.replica' same = utils.is_json_same(expected_rabbit_task, received_rabbit_task) self.assertTrue( same, error_message(expected_rabbit_task, received_rabbit_task))
def test_normal_publication_without_solr_access(self): ''' Solr was switched off and raises an errors, but this is caught. The consistency check is not run, but the rest of the publication is the same. ''' # Preparations: testcoupler = TESTHELPERS.get_coupler() TESTHELPERS.patch_with_rabbit_mock(testcoupler) TESTHELPERS.patch_solr_raises_error( testcoupler, SolrSwitchedOff) # solr raises error dsargs = TESTHELPERS.get_args_for_publication_assistant() assistant = DatasetPublicationAssistant(coupler=testcoupler, **dsargs) fileargs = TESTHELPERS.get_args_for_adding_file() # Run code to be tested: assistant.add_file(**fileargs) assistant.dataset_publication_finished() # Check result (dataset): received_rabbit_task = TESTHELPERS.get_received_message_from_rabbitmock( testcoupler, 0) expected_rabbit_task = TESTHELPERS.get_rabbit_message_publication_dataset( ) same = utils.is_json_same(expected_rabbit_task, received_rabbit_task) self.assertTrue( same, error_message(expected_rabbit_task, received_rabbit_task)) # Check result (file): received_rabbit_task = TESTHELPERS.get_received_message_from_rabbitmock( testcoupler, 1) expected_rabbit_task = TESTHELPERS.get_rabbit_message_publication_file( ) same = utils.is_json_same(expected_rabbit_task, received_rabbit_task) self.assertTrue( same, error_message(expected_rabbit_task, received_rabbit_task))
def test_unpublish_one_version_by_version_number(self): # Preparations testcoupler = TESTHELPERS.get_coupler(solr_switched_off=True) TESTHELPERS.patch_with_rabbit_mock(testcoupler) args = TESTHELPERS.get_args_for_unpublish_one() assistant = esgfpid.assistant.unpublish.AssistantOneVersion(coupler=testcoupler, **args) # Run code to be tested: assistant.unpublish_one_dataset_version( version_number=DS_VERSION ) # Check result: expected_rabbit_task = TESTHELPERS.get_rabbit_message_unpub_one() received_rabbit_task = TESTHELPERS.get_received_message_from_rabbitmock(testcoupler) same = utils.is_json_same(expected_rabbit_task, received_rabbit_task) self.assertTrue(same, error_message(expected_rabbit_task, received_rabbit_task))
def test_unpublish_all_versions_solr_off_consumer_must_find_versions_ok(self): # Preparations: testcoupler = TESTHELPERS.get_coupler(solr_url=None, solr_switched_off=True) TESTHELPERS.patch_with_rabbit_mock(testcoupler) args = TESTHELPERS.get_args_for_unpublish_all() args["consumer_solr_url"]=SOLR_URL_CONSUMER assistant = esgfpid.assistant.unpublish.AssistantAllVersions(coupler=testcoupler, **args) # Run code to be tested: assistant.unpublish_all_dataset_versions() # Check result: expected_rabbit_task = TESTHELPERS.get_rabbit_message_unpub_all() expected_rabbit_task["consumer_solr_url"] = SOLR_URL_CONSUMER received_rabbit_task = TESTHELPERS.get_received_message_from_rabbitmock(testcoupler) same = utils.is_json_same(expected_rabbit_task, received_rabbit_task) self.assertTrue(same, error_message(expected_rabbit_task, received_rabbit_task))
def test_unpublish_one_version_by_handle(self): # Preparations testcoupler = TESTHELPERS.get_coupler(solr_switched_off=True) TESTHELPERS.patch_with_rabbit_mock(testcoupler) args = TESTHELPERS.get_args_for_unpublish_one() args['drs_id'] = None # not passed assistant = esgfpid.assistant.unpublish.AssistantOneVersion(coupler=testcoupler, **args) # Run code to be tested: assistant.unpublish_one_dataset_version( dataset_handle=DATASETHANDLE_HDL # is redundant, but will be checked. ) # Check result: expected_rabbit_task = TESTHELPERS.get_rabbit_message_unpub_one_by_handle() received_rabbit_task = TESTHELPERS.get_received_message_from_rabbitmock(testcoupler) same = utils.is_json_same(expected_rabbit_task, received_rabbit_task) self.assertTrue(same, error_message(expected_rabbit_task, received_rabbit_task))
def test_make_message_publication_file_ok(self): # Test variables args_dict = self.__get_args_dict_file() # Run code to be tested: received_message = messages.publish_file(**args_dict) # Check result: expected = copy.deepcopy(args_dict) expected['ROUTING_KEY'] = ROUTING_KEY_BASIS + 'publication.file.orig' expected['aggregation_level'] = 'file' expected['operation'] = 'publish' # Rename some: expected['handle'] = expected['file_handle'] del expected['file_handle'] expected['message_timestamp'] = expected['timestamp'] del expected['timestamp'] same = utils.is_json_same(expected, received_message) self.assertTrue(same, error_message(expected, received_message))
def test_add_file_fileversion_as_integer_filesize_as_string(self): # Preparations: testcoupler = TESTHELPERS.get_coupler(solr_switched_off=True) TESTHELPERS.patch_with_rabbit_mock(testcoupler) dsargs = TESTHELPERS.get_args_for_publication_assistant() assistant = DatasetPublicationAssistant(coupler=testcoupler, **dsargs) fileargs = TESTHELPERS.get_args_for_adding_file() fileargs['file_version'] = int(fileargs['file_version']) fileargs['file_size'] = str(fileargs['file_size']) # Run code to be tested: assistant.add_file(**fileargs) assistant.dataset_publication_finished() # Check result (file): received_rabbit_task = TESTHELPERS.get_received_message_from_rabbitmock( testcoupler, 1) expected_rabbit_task = TESTHELPERS.get_rabbit_message_publication_file( ) same = utils.is_json_same(expected_rabbit_task, received_rabbit_task) self.assertTrue( same, error_message(expected_rabbit_task, received_rabbit_task))
def test_make_message_unpublish_allversions_consumer_must_find_versions_ok( self): # Test variables args_dict = dict(drs_id='abc', data_node='dkrz.de', timestamp='todayish') # Run code to be tested: received_message = messages.unpublish_allversions_consumer_must_find_versions( **args_dict) # Check result: expected = copy.deepcopy(args_dict) expected['ROUTING_KEY'] = ROUTING_KEY_BASIS + 'unpublication.all' expected['aggregation_level'] = 'dataset' expected['operation'] = 'unpublish_all_versions' # Rename some: expected['message_timestamp'] = expected['timestamp'] del expected['timestamp'] same = utils.is_json_same(expected, received_message) self.assertTrue(same, error_message(expected, received_message))
def test_normal_publication_solr_returns_none(self): # Preparations: testcoupler = TESTHELPERS.get_coupler() TESTHELPERS.patch_with_rabbit_mock(testcoupler) TESTHELPERS.patch_solr_returns_previous_files( testcoupler, None ) # makes mock return None - this should never happen in reality! dsargs = TESTHELPERS.get_args_for_publication_assistant() assistant = DatasetPublicationAssistant(coupler=testcoupler, **dsargs) fileargs = TESTHELPERS.get_args_for_adding_file() # Run code to be tested: assistant.add_file(**fileargs) assistant.dataset_publication_finished() # Check result (dataset): received_rabbit_task = TESTHELPERS.get_received_message_from_rabbitmock( testcoupler, 0) expected_rabbit_task = TESTHELPERS.get_rabbit_message_publication_dataset( ) same = utils.is_json_same(expected_rabbit_task, received_rabbit_task) self.assertTrue( same, error_message(expected_rabbit_task, received_rabbit_task))