def test_14_process_enhancements_exception(self): # What happens when processing an enhancement fails sources = EnhancementFixtureFactory.request_per_day("2001-01", 9) dois = ["10.1234/first", "10.1234/second", "10.1234/third"] # we're going to construct a series of enhancements for each doi for i in range(len(sources)): s = sources[i] doi_idx = i % 3 # iterate over the dois 3 times doi = dois[doi_idx] s["record"]["dc:identifier"] = [{"type": "doi", "id": doi}] en = Enhancement(s) en.save() time.sleep(2) # set up the mock PublicApi.publish = publish_mock # now run the process job back to the first day with self.assertRaises(TestException): WorkflowApi.process_enhancements() time.sleep(2) # we know this died during the 6th update request being processed, # so just check that the workflow state reflects that wfs_dao = WorkflowState() wfs = wfs_dao.pull("enhancements") assert wfs.last_request == "2001-01-05T00:00:00Z" assert len(wfs.already_processed) == 1
def test_18_enhancement(self): # Check we can create and work with Enhancement model objects # first make a blank one req = Enhancement() # now make one around the fixture source = EnhancementFixtureFactory.example() req = Enhancement(source) pub = req.make_public_apc() assert isinstance(pub, PublicAPC) assert pub.record == req.record # make one with a broken source broken = {"whatever" : "broken"} with self.assertRaises(dataobj.DataStructureException): req = Enhancement(broken) # now make one bit by bit req = Enhancement() req.record = source.get("record") # now make it broken req = Enhancement() with self.assertRaises(dataobj.DataStructureException): req.record = {"random" : "stuff"}
def process_enhancements(cls): """ Go through any new Enhancements (since this method last ran) and process them. :return: """ # first, pick up our current state from storage workflow_dao = WorkflowState() wfs = workflow_dao.pull("enhancements") # if we don't have a current state, make one if wfs is None: wfs = WorkflowState() wfs.id = "enhancements" # get the oldest page of enhancements and process them dao = Enhancement() enhancements = dao.list_all_since(wfs.last_request) # produces a generator for e in enhancements: try: # if the request was created at the time of the last request processed, it is possible it arrived # before or after the cut-off. As we don't have any more than second-level granularity in the timing, # we also need to check to see whether it was one of the ids processed during that second if e.created_date == wfs.last_request and wfs.is_processed(e.id): # if it was created at that time, and it was one of the ones processed, we can skip it continue # if the request is from a later time, or was not processed during the last run, then do the usual # processing, which in this case is just to publish the data, and let the merge handle it PublicApi.publish(e) # now, revisit the timing of this request. If the time is the same as the last request date, this is a # request which came in during that same second, but was not processed at the time because it was at the # wrong end of the second. In that case, we just need to add the id to the list of records from that second # which have now been processed if e.created_date == wfs.last_request: wfs.add_processed(e.id) else: # otherwise, this is a whole new second, and we can forget everything that went before and start afresh. wfs.last_request = e.created_date wfs.already_processed = [e.id] except: wfs.save(blocking=True) raise wfs.save(blocking=True)
def test_20_enhancement_iterator(self): # Check we can successfully iterate over enhancements sources = EnhancementFixtureFactory.request_per_day("2001-01", 10) for s in sources: req = Enhancement(s) req.save() time.sleep(2) dao = Enhancement() gen = dao.list_all_since("2001-01-01T00:00:00Z", page_size=5) # set the page size small, to ensure the iterator has to work results = [x for x in gen] assert len(results) == 10 dates = [r.created_date for r in results] comp = deepcopy(dates) comp.sort() # this puts the dates in ascending order (i.e. oldest first) # the point of this comparison is to show that the results came out in the right order. # that is, oldest first assert dates == comp
def test_07_check_jobs(self): # Ensure that we can check existing jobs correctly acc = MonitorUKAccount() acc.email = "*****@*****.**" acc.lantern_email = "*****@*****.**" acc.lantern_api_key = "123456789" acc.save() lj1 = LanternJob() lj1.job_id = "111111111" lj1.account = acc.id lj1.status = "complete" lj1.save() lj2 = LanternJob() lj2.job_id = "222222222" lj2.account = acc.id lj2.status = "active" lj2.last_updated = dates.format(dates.before_now(5000)) lj2.save(updated=False) lj3 = LanternJob() lj3.job_id = "333333333" lj3.account = acc.id lj3.status = "active" lj3.last_updated = dates.format(dates.before_now(5000)) lj3.save(updated=False) lj4 = LanternJob() lj4.job_id = "444444444" lj4.account = acc.id lj4.status = "active" lj4.last_updated = dates.format(dates.before_now(5000)) lj4.save(updated=False) lj5 = LanternJob() lj5.job_id = "555555555" lj5.account = acc.id lj5.status = "active" lj5.save(blocking=True) LanternApi.check_jobs() # check that the progress requests we expected were made assert len(PROGRESS_REQUESTS) == 3 assert "222222222" in PROGRESS_REQUESTS assert "333333333" in PROGRESS_REQUESTS assert "444444444" in PROGRESS_REQUESTS # check that the job which received an error was just ignored dao = LanternJob() ignored = dao.pull(lj4.id) assert ignored.last_updated == lj4.last_updated assert ignored.status == "active" # check that the record which was not complete was touched touched = dao.pull(lj2.id) assert touched.last_updated != lj2.last_updated assert touched.status == "active" # check that results were requested only for one item assert len(RESULTS_REQUESTS) == 1 assert "333333333" in RESULTS_REQUESTS # wait for a bit, so that enhancements have time to go in time.sleep(2) # check that an enhancement was registered edao = Enhancement() gen = edao.iterall() enhancements = [e for e in gen] assert len(enhancements) == 1 result = LanternFixtureFactory.xwalk_result() assert enhancements[0].data["record"] == result["record"]
def test_13_process_ehnancements_cycle(self): # Run through the process of processing an enhancement source = EnhancementFixtureFactory.example() if "id" in source: del source["id"] pub_dao = PublicAPC() wfs_dao = WorkflowState() # first make a public record for us to enhance first = PublicAPCFixtureFactory.example() del first["record"]["dc:title"] pub = PublicAPC(first) pub.save(blocking=True) # now create an enhancements on the record second = deepcopy(source) second["record"]["dc:title"] = "Update" second["created_date"] = "2002-01-01T00:00:00Z" en = Enhancement(second) en.public_id = pub.id en.save(blocking=True) # run the job WorkflowApi.process_enhancements() time.sleep(2) # check that the workflow state was created wfs = wfs_dao.pull("enhancements") assert wfs is not None assert wfs.last_request == en.created_date assert wfs.already_processed == [en.id] # check the public record was updated pubs = pub_dao.find_by_doi("10.1234/me") assert len(pubs) == 1 assert pubs[0].record.get("dc:title") == "Update" # now run an update with the same date, to observe the difference in the workflow state third = deepcopy(source) third["record"]["dc:title"] = "Update 2" third["created_date"] = "2002-01-01T00:00:00Z" en2 = Enhancement(third) en2.public_id = pub.id en2.save(blocking=True) # run the job again WorkflowApi.process_enhancements() time.sleep(2) # check the public record was updated pubs = pub_dao.find_by_doi("10.1234/me") assert len(pubs) == 1 assert ( pubs[0].record.get("dc:title") == "Update" ) # should not have been updated, since data was already present # check that the workflow state was updated wfs = wfs_dao.pull("enhancements") assert wfs is not None assert wfs.last_request == en2.created_date assert wfs.already_processed == [en.id, en2.id] # processed records should have been appended