async def run(): node = Node('/syncdatanode') push = PushToSpace(node, [HTTPPut()]) transfer = await self.sync_transfer_node(push, 200) put_end = transfer.protocols[0].endpoint.url job_id = os.path.basename(put_end) set_fuzz(True) async def defer_abort(job_id): await asyncio.sleep(0.5) await self.change_job_state(job_id, state='PHASE=ABORT', expected_status=200) await self.poll_job(job_id, poll_until=('ABORTED', 'ERROR'), expected_status='ABORTED') tasks = [ asyncio.ensure_future( self.push_to_space(put_end, '/tmp/datafile.dat', expected_status=400)), asyncio.ensure_future(defer_abort(job_id)) ] await asyncio.gather(*tasks) set_fuzz(False) await self.poll_job(job_id, poll_until=('ABORTED', 'ERROR'), expected_status='ABORTED')
async def run(): node1 = ContainerNode('/datanode') await self.create_node(node1) node1 = ContainerNode('/datanode/datanode1') await self.create_node(node1) node1 = Node('/datanode/datanode1/datanode2.dat') await self.create_node(node1) node = Node('/datanode/datanode1/datanode2.dat') push = PushToSpace(node, [HTTPPut()]) job = await self.transfer_node(push) await self.change_job_state(job.job_id) await self.poll_job(job.job_id, poll_until=('EXECUTING', 'ERROR'), expected_status='EXECUTING') transfer = await self.get_transfer_details(job.job_id, expected_status=200) end1 = transfer.protocols[0].endpoint.url # start job to upload to same node job = await self.transfer_node(push) await self.change_job_state(job.job_id) await self.poll_job(job.job_id, poll_until=('EXECUTING', 'ERROR'), expected_status='EXECUTING') transfer = await self.get_transfer_details(job.job_id, expected_status=200) end2 = transfer.protocols[0].endpoint.url set_fuzz(True) # concurrent upload tasks = [ asyncio.ensure_future( self.push_to_space_defer_error(end1, '/tmp/datafile.dat')), asyncio.ensure_future( self.push_to_space_defer_error(end2, '/tmp/datafile.dat')) ] set_fuzz(False) result = [] finished, unfinished = await asyncio.wait(tasks) self.assertEqual(len(finished), 2) for i in finished: r = await i result.append(r[0]) self.assertIn(200, result) self.assertIn(400, result)
async def run(): node = Node('/syncdatanode') push = PushToSpace(node, [HTTPPut()]) set_fuzz(True) async def defer_delete(node): await asyncio.sleep(0.5) await self.delete_node(node) tasks = [ asyncio.ensure_future(self.sync_transfer_node(push, 200)), asyncio.ensure_future(defer_delete(node)) ] await asyncio.gather(*tasks) set_fuzz(False)
async def run(): node = Node('/syncdatanode') push = PushToSpace(node, [HTTPPut()]) transfer = await self.sync_transfer_node(push, 200) put_end = transfer.protocols[0].endpoint.url await self.push_to_space(put_end, '/tmp/datafile.dat', expected_status=200) pull = PullFromSpace(node, [HTTPGet()]) transfer = await self.sync_transfer_node(pull) pull_end = transfer.protocols[0].endpoint.url job_id = os.path.basename(pull_end) set_fuzz(True) async def defer_abort(job_id): await asyncio.sleep(0.5) await self.change_job_state(job_id, state='PHASE=ABORT', expected_status=200) await self.poll_job(job_id, poll_until=('ABORTED', 'ERROR'), expected_status='ABORTED') tasks = [ asyncio.ensure_future( self.pull_from_space_defer_error(pull_end, '/tmp/download/')), asyncio.ensure_future(defer_abort(job_id)) ] # client recv should fail with self.assertRaises(IOError): await asyncio.gather(*tasks) set_fuzz(False) await self.poll_job(job_id, poll_until=('ABORTED', 'ERROR'), expected_status='ABORTED')