def test_finally_execution(): """When one segment fails ensure parallel segments clean up.""" segBad = FakeWalSegment('1' * 8 * 3) segOK = FakeWalSegment('2' * 8 * 3) class CleanupCheckingUploader(object): def __init__(self): self.cleaned_up = False def __call__(self, segment): if segment is segOK: try: while True: gevent.sleep(0.1) finally: self.cleaned_up = True elif segment is segBad: raise Explosion('fail') else: assert False, 'Expect only two segments' segment._uploaded = True return segment uploader = CleanupCheckingUploader() group = worker.WalTransferGroup(uploader) group.start(segOK) group.start(segBad) with pytest.raises(Explosion): group.join() assert uploader.cleaned_up is True
def test_multi_pipeline_fail(): """Model a failure of the pipelined segments under concurrency.""" group = worker.WalTransferGroup(FakeWalUploader()) segments = list(prepare_multi_upload_segments()) exp = Explosion('fail') fail_idx = 2 segments[fail_idx]._upload_explosive = exp for seg in segments: group.start(seg) with pytest.raises(Explosion) as e: group.join() assert e.value is exp for i, seg in enumerate(segments): if i == fail_idx: assert failed(seg) else: # Given race conditions in conjunction with exceptions -- # which will abort waiting for other greenlets to finish # -- one can't know very much about the final state of # segment. assert indeterminate(seg)
def test_simple_upload(): """Model a case where there is no concurrency while uploading.""" group = worker.WalTransferGroup(FakeWalUploader()) seg = FakeWalSegment('1' * 8 * 3, explicit=True) group.start(seg) group.join() assert success(seg)
def test_start_after_join(): """Break an invariant by adding transfers after .join.""" group = worker.WalTransferGroup(FakeWalUploader()) group.join() seg = FakeWalSegment('arbitrary') with pytest.raises(UserCritical): group.start(seg)
def test_mark_done_fault(): """Exercise exception handling from .mark_done()""" group = worker.WalTransferGroup(FakeWalUploader()) exp = Explosion('boom') seg = FakeWalSegment('arbitrary', mark_done_explosive=exp) group.start(seg) with pytest.raises(Explosion) as e: group.join() assert e.value is exp
def test_simple_fail(): """Model a simple failure in the non-concurrent case.""" group = worker.WalTransferGroup(FakeWalUploader()) exp = Explosion('fail') seg = FakeWalSegment('1' * 8 * 3, explicit=True, upload_explosive=exp) group.start(seg) with pytest.raises(Explosion) as e: group.join() assert e.value is exp assert failed(seg)
def test_multi_upload(): """Model a case with upload concurrency.""" group = worker.WalTransferGroup(FakeWalUploader()) segments = list(prepare_multi_upload_segments()) # "Start" fake uploads for seg in segments: group.start(seg) group.join() # Check invariants on the non-explicit segments. for seg in segments: assert success(seg)
def test_multi_explicit_fail(): """Model a failure of the explicit segment under concurrency.""" group = worker.WalTransferGroup(FakeWalUploader()) segments = list(prepare_multi_upload_segments()) exp = Explosion('fail') segments[0]._upload_explosive = exp for seg in segments: group.start(seg) with pytest.raises(Explosion) as e: group.join() assert e.value is exp assert failed(segments[0]) for seg in segments[1:]: assert success(seg)
def wal_s3_archive(self, wal_path, concurrency=1): """ Uploads a WAL file to S3 This code is intended to typically be called from Postgres's archive_command feature. """ # Upload the segment expressly indicated. It's special # relative to other uploads when parallel wal-push is enabled, # in that it's not desirable to tweak its .ready/.done files # in archive_status. xlog_dir = path.dirname(wal_path) segment = worker.WalSegment(wal_path, explicit=True) uploader = s3_worker.WalUploader(self.aws_access_key_id, self.aws_secret_access_key, self.s3_prefix, self.gpg_key_id) group = worker.WalTransferGroup(uploader) group.start(segment) # Upload any additional wal segments up to the specified # concurrency by scanning the Postgres archive_status # directory. started = 1 seg_stream = worker.WalSegment.from_ready_archive_status(xlog_dir) while started < concurrency: try: other_segment = seg_stream.next() except StopIteration: break if other_segment.path != wal_path: group.start(other_segment) started += 1 # Wait for uploads to finish. group.join()