Beispiel #1
0
    async def _process_standard_upload(self, queue: asyncio.LifoQueue, session: aiohttp.ClientSession,
                                       presigned_request: PresignedS3Upload, progress_update_fn: Callable) -> None:
        """Method to handle the standard single request upload workflow.

        If a presigned url has not been generated, get it. if it has, put the file contents.

        Args:
            queue: The current work queue
            session: The current aiohttp session
            presigned_request: the current PresignedS3Upload object to process
            progress_update_fn: A callable with arg "completed_bytes" (int) indicating how many bytes have been
                                uploaded in since last called


        Returns:
            None
        """
        if not presigned_request.is_presigned:
            # Fetch the signed URL
            await presigned_request.get_presigned_s3_url(session)
            queue.put_nowait(presigned_request)
        else:
            # Process S3 Upload
            try:
                await presigned_request.put_object(session, progress_update_fn)
                self.successful_requests.append(presigned_request)
                presigned_request.remove_compressed_file()
            except Exception:
                presigned_request.remove_compressed_file()
                raise
Beispiel #2
0
    async def _pull_object_consumer(self, queue: asyncio.LifoQueue, session: aiohttp.ClientSession,
                                    progress_update_fn: Callable) -> None:
        """Async Queue consumer worker for downloading objects from the object service/s3

        Args:
            queue: The current work queue
            session: The current aiohttp session
            progress_update_fn: A callable with arg "completed_bytes" (int) indicating how many bytes have been
                                downloaded in since last called

        Returns:
            None
        """
        while True:
            presigned_request: PresignedS3Download = await queue.get()

            try:
                if not presigned_request.is_presigned:
                    # Fetch the signed URL
                    await presigned_request.get_presigned_s3_url(session)
                    queue.put_nowait(presigned_request)
                else:
                    # Process S3 Download
                    await presigned_request.get_object(session, progress_update_fn)
                    self.successful_requests.append(presigned_request)

            except Exception as err:
                logger.exception(err)
                self.failed_requests.append(presigned_request)

            # Notify the queue that the item has been processed
            queue.task_done()
class ScheduleRunner:
	"""
	This class is itself a queue: It will drop elements out of Schedule.queue as they are taken.
	"""
	def __init__(self, schedule: Schedule) -> None:
		"""Stimulus_list is in MILLISECONDS."""
		self.stimulus_list = schedule.stimulus_list
		self.queue = LifoQueue()
		self.time_range = range(0, len(self.stimulus_list))
		self.n_ms_total = schedule.total_ms
		# sorting on tuples apparently sorts by the first index first
		for index_ms, stimulus in sorted(schedule.stimulus_list, key=lambda x: x[0], reverse=True):
			self.queue.put_nowait((index_ms / 1000, stimulus))

	def get_nowait(self) -> typing.Tuple[int, Stimulus]:
		return self.queue.get_nowait()

	def run(
			self,
			write_callback: Callable[[Stimulus], None],
			audio_callback: Callable[[Stimulus], None]
	) -> StimulusTimeLog:
		"""Runs the stimulus schedule immediately.
		This runs the scheduled stimuli and blocks. Does not sleep.
		:param write_callback: Example: board.write
		:param audio_callback: Example: global_audio.play
		"""

		logger.info("Battery will run for {}ms. Starting!".format(self.n_ms_total))
		stimulus_time_log = StimulusTimeLog()
		stimulus_time_log.start()  # This is totally fine: It happens at time 0 in the stimulus_list AND the full battery.

		t0 = monotonic()
		for _ in self.time_range:
			scheduled_seconds, stimulus = self.get_nowait()
			while monotonic() - t0 < scheduled_seconds: pass

			# Use self._board.digital_write and analog_write because we don't want to perform checks (for performance)
			if stimulus is StimulusType.MARKER:
				logger.info("Starting: {}".format(stimulus.name))
				continue
			elif stimulus.is_digital() or stimulus.is_analog():
				write_callback(stimulus)
			elif stimulus.is_audio():
				audio_callback(stimulus)  # volume is handled internally
			else:
				raise ValueError("Invalid stimulus type %s!" % stimulus.stim_type)

			stimulus_time_log.append(StimulusTimeRecord(stimulus, datetime.datetime.now()))

		# This is critical. Otherwise, the StimulusTimeLog will finish() at the time the last stimulus is applied, not the time the battery ends
		# TODO double-check
		#while monotonic() - t0 < self.n_ms_total / 1000: pass
		offset_ms = datetime.timedelta(milliseconds = self.n_ms_total - monotonic() + t0)
		if offset_ms.microseconds < 0:
			logger.warning("Stimuli finished too late: {}ms after".format(offset_ms))
			offset_ms = 0
		stimulus_time_log.finish_future(datetime.datetime.now() + offset_ms)
		return stimulus_time_log  # for trimming camera frames
	def __init__(self, schedule: Schedule) -> None:
		"""Stimulus_list is in MILLISECONDS."""
		self.stimulus_list = schedule.stimulus_list
		self.queue = LifoQueue()
		self.time_range = range(0, len(self.stimulus_list))
		self.n_ms_total = schedule.total_ms
		# sorting on tuples apparently sorts by the first index first
		for index_ms, stimulus in sorted(schedule.stimulus_list, key=lambda x: x[0], reverse=True):
			self.queue.put_nowait((index_ms / 1000, stimulus))
Beispiel #5
0
async def main():
    lifo_queue = LifoQueue()

    work_items = [
        WorkItem(3, 1, 'Lowest priority first'),
        WorkItem(3, 2, 'Lowest priority second'),
        WorkItem(3, 3, 'Lowest priority third'),
        WorkItem(2, 4, 'Medium priority'),
        WorkItem(1, 5, 'High priority')
    ]

    worker_task = asyncio.create_task(worker(lifo_queue))

    for work in work_items:
        lifo_queue.put_nowait(work)  #B

    await asyncio.gather(lifo_queue.join(), worker_task)
    async def _push_object_consumer(self, queue: asyncio.LifoQueue,
                                    session: aiohttp.ClientSession,
                                    status_update_fn: Callable) -> None:
        """Async Queue consumer worker for pushing objects to the object service/s3

        Args:
            queue: The current work queue
            session: The current aiohttp session
            status_update_fn: the update function for providing feedback

        Returns:
            None
        """
        while True:
            presigned_request: PresignedS3Upload = await queue.get()

            try:
                if presigned_request.skip_object is False:
                    if not presigned_request.is_presigned:
                        # Fetch the signed URL
                        status_update_fn(
                            f'Preparing upload for {presigned_request.object_details.dataset_path}'
                        )
                        await presigned_request.get_presigned_s3_url(session)
                        queue.put_nowait(presigned_request)
                    else:
                        # Process S3 Upload
                        status_update_fn(
                            f'Uploading {presigned_request.object_details.dataset_path}'
                        )
                        await presigned_request.put_object(session)
                        self.successful_requests.append(presigned_request)
                else:
                    # Object skipped because it already exists in the backend (de-duplicating)
                    status_update_fn(
                        f'{presigned_request.object_details.dataset_path} already exists.'
                        f' Skipping upload to avoid duplicated storage.')
                    self.successful_requests.append(presigned_request)

            except Exception as err:
                logger.exception(err)
                self.failed_requests.append(presigned_request)

            # Notify the queue that the item has been processed
            queue.task_done()
Beispiel #7
0
    async def _push_object_consumer(self, queue: asyncio.LifoQueue, session: aiohttp.ClientSession,
                                    progress_update_fn: Callable) -> None:
        """Async Queue consumer worker for pushing objects to the object service/s3

        Args:
            queue: The current work queue
            session: The current aiohttp session
            progress_update_fn: A callable with arg "completed_bytes" (int) indicating how many bytes have been
                                uploaded in since last called

        Returns:
            None
        """
        while True:
            presigned_request: PresignedS3Upload = await queue.get()

            try:
                if presigned_request.skip_object is False:
                    if presigned_request.is_multipart:
                        # Run multipart upload workflow
                        await self._process_multipart_upload(queue, session, presigned_request, progress_update_fn)
                    else:
                        # Run standard, single-request workflow
                        await self._process_standard_upload(queue, session, presigned_request, progress_update_fn)
                else:
                    # Object skipped because it already exists in the backend (object level de-duplicating)
                    logger.info(f"Skipping duplicate download {presigned_request.object_details.dataset_path}")
                    progress_update_fn(os.path.getsize(presigned_request.object_details.object_path))
                    self.successful_requests.append(presigned_request)

            except Exception as err:
                logger.exception(err)
                self.failed_requests.append(presigned_request)
                if presigned_request.is_multipart and presigned_request.multipart_upload_id is not None:
                    # Make best effort to abort a multipart upload if needed
                    try:
                        await presigned_request.abort_multipart_upload(session)
                    except Exception as err:
                        logger.error(f"An error occured while trying to abort multipart upload "
                                     f"{presigned_request.multipart_upload_id} for {presigned_request.object_id}")
                        logger.exception(err)

            # Notify the queue that the item has been processed
            queue.task_done()
Beispiel #8
0
def _init():
    parallelism = config.get('parallelism', 2)
    if parallelism < 2:
        logger.warning(
            'Parallelism less than 2, custom judge will not be supported.')
    logger.info('Using parallelism: %d', parallelism)
    sandboxes_task = create_sandboxes(parallelism)
    global _lock, _queue
    _lock = Lock()
    _queue = LifoQueue()
    put_sandbox(*get_event_loop().run_until_complete(sandboxes_task))
Beispiel #9
0
async def queue_for_profile(profile: str, maxsize: int):
    # Create a queue of duplicate s3 clients for the given AWS profile.
    # Client duplication is a procautionary measure against potential
    # overwriting at runtime.

    queue: LifoQueue[None] = LifoQueue(maxsize=maxsize)

    while not queue.full():
        client = await S3ClientWrapper(profile=profile).__aenter__()
        queue.put_nowait(client)

    return queue
Beispiel #10
0
    async def _pull_object_consumer(self, queue: asyncio.LifoQueue,
                                    session: aiohttp.ClientSession,
                                    status_update_fn: Callable) -> None:
        """Async Queue consumer worker for downloading objects from the object service/s3

        Args:
            queue: The current work queue
            session: The current aiohttp session
            status_update_fn: the update function for providing feedback

        Returns:
            None
        """
        while True:
            presigned_request: PresignedS3Download = await queue.get()

            try:
                if not presigned_request.is_presigned:
                    # Fetch the signed URL
                    status_update_fn(
                        f'Preparing download for {presigned_request.object_details.dataset_path}'
                    )
                    await presigned_request.get_presigned_s3_url(session)
                    queue.put_nowait(presigned_request)
                else:
                    # Process S3 Download
                    status_update_fn(
                        f'Downloading {presigned_request.object_details.dataset_path}'
                    )
                    await presigned_request.get_object(session)
                    self.successful_requests.append(presigned_request)

            except Exception as err:
                logger.exception(err)
                self.failed_requests.append(presigned_request)

            # Notify the queue that the item has been processed
            queue.task_done()
Beispiel #11
0
    async def _process_multipart_upload(self, queue: asyncio.LifoQueue, session: aiohttp.ClientSession,
                                        presigned_request: PresignedS3Upload, progress_update_fn: Callable) -> None:
        """Method to handle the complex multipart upload workflow.

        1. Create a multipart upload and get the ID
        2. Upload all parts
        3. Complete the part and mark the PresignedS3Upload object as successful

        Args:
            queue: The current work queue
            session: The current aiohttp session
            presigned_request: the current PresignedS3Upload object to process
            progress_update_fn: A callable with arg "completed_bytes" (int) indicating how many bytes have been
                                uploaded in since last called


        Returns:
            None
        """
        if not presigned_request.multipart_upload_id:
            # Create a multipart upload and create parts
            await presigned_request.prepare_multipart_upload(session)
            # Requeue for more processing
            queue.put_nowait(presigned_request)
        else:
            try:
                if presigned_request.current_part:
                    if not presigned_request.is_presigned:
                        # Fetch the signed URL
                        await presigned_request.get_presigned_s3_url(session)
                        queue.put_nowait(presigned_request)
                    else:
                        # Process S3 Upload, mark the part as done, and requeue it
                        etag = await presigned_request.put_object(session, progress_update_fn)
                        presigned_request.mark_current_part_complete(etag)
                        queue.put_nowait(presigned_request)
                else:
                    # If you get here, you are done and should complete the upload
                    await presigned_request.complete_multipart_upload(session)
                    self.successful_requests.append(presigned_request)
            except Exception:
                presigned_request.remove_compressed_file()
                raise
Beispiel #12
0
from jd4.cgroup import wait_cgroup
from jd4.compile import Compiler, Interpreter
from jd4.config import config
from jd4.log import logger
from jd4.sandbox import create_sandboxes
from jd4.util import read_pipe

_CONFIG_DIR = user_config_dir('jd4')
_LANGS_FILE = path.join(_CONFIG_DIR, 'langs.yaml')
_MAX_OUTPUT = 8192
DEFAULT_TIME_MS = 20000
DEFAULT_MEM_KB = 262144
PROCESS_LIMIT = 32

_sandbox_pool = LifoQueue()
_langs = dict()


async def _compiler_build(compiler, code, time_limit_ns, memory_limit_bytes,
                          process_limit):
    loop = get_event_loop()
    sandbox = await _sandbox_pool.get()
    try:
        await compiler.prepare(sandbox, code.encode())
        output_file = path.join(sandbox.in_dir, 'output')
        mkfifo(output_file)
        cgroup_sock = socket(AF_UNIX, SOCK_STREAM | SOCK_NONBLOCK)
        cgroup_sock.bind(path.join(sandbox.in_dir, 'cgroup'))
        cgroup_sock.listen()
        build_task = loop.create_task(