def _poll(self, task, trap): ASSERT.is_(trap.kind, traps.Traps.POLL) if trap.events is pollers.Polls.READ: self._read_blocker.block(trap.fd, task) else: ASSERT.is_(trap.events, pollers.Polls.WRITE) self._write_blocker.block(trap.fd, task)
def sendfile(self, file): if self._body.is_closed(): raise ResponseClosed('response is closed') # sendfile can be called only once. ASSERT.is_(self._send_mechanism, _SendMechanisms.UNDECIDED) ASSERT.not_none(file) self._set_send_mechanism(_SendMechanisms.SENDFILE) self.file = file
def _sleep(self, task, trap): ASSERT.is_(trap.kind, traps.Traps.SLEEP) if trap.duration is None: self._forever_blocker.block(None, task) elif trap.duration <= 0: self._ready_tasks.append(TaskReady(task, None, None)) else: self._sleep_blocker.block(time.monotonic() + trap.duration, task)
def _join(self, task, trap): ASSERT.is_(trap.kind, traps.Traps.JOIN) ASSERT.is_(trap.task._kernel, self) ASSERT.is_not(trap.task, task) # You can't join yourself. if trap.task.is_completed(): self._ready_tasks.append(TaskReady(task, None, None)) else: self._task_completion_blocker.block(trap.task, task)
def notify_success(self): if self._state is _States.GREEN: self._event_log.clear() elif self._state is _States.YELLOW: self._event_log.add(time.monotonic()) if self._event_log.count() >= self._success_threshold: self._change_state_green() else: ASSERT.is_(self._state, _States.RED)
async def sendfile(self, file): ASSERT.true(self._has_begun) # sendfile can be called only once. ASSERT.is_(self._send_mechanism, _SendMechanisms.UNDECIDED) ASSERT.not_none(file) self._send_mechanism = _SendMechanisms.SENDFILE await self._headers_sent.wait() with timers.timeout_after(self._SENDFILE_TIMEOUT): return await self._sock.sendfile(file)
def build(parameters): ASSERT.is_(parameters['inside-builder-pod'], True) ASSERT.all(parameters['roots'], _is_root_dir) with scripts.using_sudo(): # We should run `apt-get update` even when we are not upgrading # the full system because some packages may be removed from the # distro repo while our local package index still has it. scripts.apt_get_update() scripts.mkdir(parameters['drydock'])
def timeout_after(self, task, duration): ASSERT.false(self._closed) self._assert_owner() ASSERT.is_(task._kernel, self) if duration is None: return lambda: None # Even if duration <= 0, the kernel should raise ``Timeout`` at # the next blocking trap for consistency (so, don't raise here). self._timeout_after_blocker.block(time.monotonic() + duration, task) return functools.partial(self._timeout_after_blocker.cancel, task)
def cancel(self, task): """Cancel the task. This is a no-op is task has been completed. """ ASSERT.false(self._closed) self._assert_owner() ASSERT.is_(task._kernel, self) if not task.is_completed(): self._disrupt(task, errors.TaskCancellation)
def output(self, output_file): columns = [(column, self._stringifiers.get(column, str)) for column in self._columns] rows = [[stringifier(row[column]) for column, stringifier in columns] for row in self._rows] if self._format is Formats.CSV: self._output_csv(rows, output_file) else: ASSERT.is_(self._format, Formats.TEXT) self._output_text(rows, output_file)
def notify_failure(self): if self._state is _States.GREEN: now = time.monotonic() self._event_log.add(now) if (self._event_log.count(now - self._failure_period) >= self._failure_threshold): self._change_state_red(now) elif self._state is _States.YELLOW: self._change_state_red(time.monotonic()) else: ASSERT.is_(self._state, _States.RED)
def __enter__(self): # ``set_wakeup_fd`` can only be called from the main thread. ASSERT.is_(threading.current_thread(), threading.main_thread()) # Disallow nested use; ``SignalSource`` is a singleton and is # intended to be used as such. ASSERT.none(self._wakeup_fd) sock_r, self._sock_w = socket.socketpair() self._sock_r = adapters.SocketAdapter(sock_r) self._sock_w.setblocking(False) self._wakeup_fd = signal.set_wakeup_fd(self._sock_w.fileno()) return self
def rfc_7231_date(now=None): if not now: now = datetimes.utcnow() # We can't handle non-UTC time zone at the moment. ASSERT.is_(now.tzinfo, datetime.timezone.utc) return RFC_7231_FORMAT.format( year=now.year, month=RFC_7231_MONTHS[now.month - 1], day_name=RFC_7231_DAY_NAMES[now.weekday()], day=now.day, hour=now.hour, minute=now.minute, second=now.second, )
async def __aenter__(self): if self._state is _States.GREEN: self._num_concurrent_requests += 1 return self if self._state is _States.RED: if (self._event_log.count(time.monotonic() - self._failure_timeout) > 0): raise Unavailable('circuit breaker disconnected: %s' % self._key) self._change_state_yellow() ASSERT.is_(self._state, _States.YELLOW) if self._num_concurrent_requests > 0: raise Unavailable('circuit breaker has not re-connected yet: %s' % self._key) self._num_concurrent_requests += 1 return self
def _none_setter(builder, name, none): ASSERT.is_(none, None) builder[name] = _capnp.VOID
def _block(self, task, trap): ASSERT.is_(trap.kind, traps.Traps.BLOCK) self._generic_blocker.block(trap.source, task) if trap.post_block_callback: trap.post_block_callback()
def sendfile(self, file): # sendfile can be called only once. ASSERT.is_(self._send_mechanism, _SendMechanisms.UNDECIDED) ASSERT.not_none(file) self._send_mechanism = _SendMechanisms.SENDFILE self.file = file
def cmd_setup_base_rootfs(image_rootfs_path, prune_stash_path): """Set up base rootfs. Changes from 18.04 to 20.04. * /lib is now a symlink to /usr/lib. * system.slice has been removed: https://github.com/systemd/systemd/commit/d8e5a9338278d6602a0c552f01f298771a384798 """ ASSERT.predicate(image_rootfs_path, Path.is_dir) oses.assert_root_privilege() # Remove unneeded files. for dir_relpath in ( 'usr/share/doc', 'usr/share/info', 'usr/share/man', 'var/cache', 'var/lib/apt', 'var/lib/dpkg', ): dir_path = image_rootfs_path / dir_relpath if dir_path.is_dir(): if prune_stash_path: dst_path = ASSERT.not_predicate( prune_stash_path / dir_relpath, g1.files.lexists ) dst_path.mkdir(mode=0o755, parents=True, exist_ok=True) _move_dir_content(dir_path, dst_path) else: _clear_dir_content(dir_path) # Remove certain config files. for path in ( # Remove this so that systemd-nspawn may set the hostname. image_rootfs_path / 'etc/hostname', # systemd-nspawn uses machine-id to link journal. image_rootfs_path / 'etc/machine-id', image_rootfs_path / 'var/lib/dbus/machine-id', # debootstrap seems to copy this file from the build machine, # which is not the host machine that runs this image; so let's # replace this with a generic stub. image_rootfs_path / 'etc/resolv.conf', image_rootfs_path / 'run/systemd/resolve/stub-resolv.conf', ): LOG.info('remove: %s', path) g1.files.remove(path) # Replace certain config files. for path, content in ( (image_rootfs_path / 'etc/default/locale', _LOCALE), (image_rootfs_path / 'etc/resolv.conf', _RESOLV_CONF), (image_rootfs_path / 'etc/systemd/journald.conf', _JOURNALD_CONF), ): LOG.info('replace: %s', path) path.write_text(content) # Remove unneeded unit files. base_units = set(_BASE_UNITS) for unit_dir_path in ( image_rootfs_path / 'etc/systemd/system', image_rootfs_path / 'usr/lib/systemd/system', ): if not unit_dir_path.exists(): continue LOG.info('clean up unit files in: %s', unit_dir_path) for unit_path in unit_dir_path.iterdir(): if unit_path.name in base_units: base_units.remove(unit_path.name) continue # There should have no duplicated units, right? ASSERT.not_in(unit_path.name, _BASE_UNITS) LOG.info('remove: %s', unit_path) g1.files.remove(unit_path) ASSERT.empty(base_units) # Create unit files. for unit_dir_path, unit_files in ( (image_rootfs_path / 'etc/systemd/system', _ETC_UNIT_FILES), (image_rootfs_path / 'usr/lib/systemd/system', _LIB_UNIT_FILES), ): for unit_file in unit_files: ASSERT.predicate(unit_dir_path, Path.is_dir) path = unit_dir_path / unit_file.relpath LOG.info('create: %s', path) if unit_file.kind is _UnitFile.Kinds.DIRECTORY: path.mkdir(mode=0o755) elif unit_file.kind is _UnitFile.Kinds.FILE: path.write_text(unit_file.content) path.chmod(0o644) else: ASSERT.is_(unit_file.kind, _UnitFile.Kinds.SYMLINK) path.symlink_to(unit_file.content) bases.chown_root(path) # Create ``pod-exit`` script and exit status directory. pod_exit_path = image_rootfs_path / 'usr/sbin/pod-exit' LOG.info('create: %s', pod_exit_path) pod_exit_path.write_text(_POD_EXIT) bases.setup_file(pod_exit_path, 0o755, bases.chown_root) bases.make_dir(image_rootfs_path / 'var/lib/pod', 0o755, bases.chown_root) bases.make_dir( image_rootfs_path / 'var/lib/pod/exit-status', 0o755, bases.chown_root )
def cleanup(parameters): ASSERT.is_(parameters['inside-builder-pod'], True) ASSERT.all(parameters['roots'], _is_root_dir) with scripts.using_sudo(): scripts.apt_get_clean()
def build(parameters): ASSERT.is_(parameters['//bases:inside-builder-pod'], False) ASSERT.predicate(parameters['root'], Path.is_dir)
def to_builder(self, dataobject, builder): ASSERT.isinstance(dataobject, self._dataclass) ASSERT.is_(builder.schema, self._schema) self._converter.to_builder(dataobject, builder)
def _none_getter(reader, name): # pylint: disable=useless-return ASSERT.is_(reader[name], _capnp.VOID) return None
async def recvfile(response, file): """Receive response body into a file. The caller must set ``stream`` to true when make the request. DANGER! This breaks the multiple levels of encapsulation, from requests.Response all the way down to http.client.HTTPResponse. As a result, the response object is most likely unusable after a recvfile call, and you should probably close it immediately. """ # requests sets _content to False initially. ASSERT.is_(response._content, False) ASSERT.false(response._content_consumed) urllib3_response = ASSERT.not_none(response.raw) chunked = urllib3_response.chunked httplib_response = ASSERT.isinstance( urllib3_response._fp, http.client.HTTPResponse ) ASSERT.false(httplib_response.closed) sock = ASSERT.isinstance(httplib_response.fp.raw._sock, socket.socket) output = DecoderChain(file) if chunked: chunk_decoder = ChunkDecoder() output.add(chunk_decoder) num_to_read = 0 eof = lambda: chunk_decoder.eof else: num_to_read = ASSERT.greater( ASSERT.not_none(httplib_response.length), 0 ) eof = lambda: num_to_read <= 0 # Use urllib3's decoder code. urllib3_response._init_decoder() if urllib3_response._decoder is not None: output.add(ContentDecoder(urllib3_response._decoder)) with contextlib.ExitStack() as stack: src = adapters.FileAdapter(httplib_response.fp) stack.callback(src.disown) sock.setblocking(False) stack.callback(sock.setblocking, True) buffer = memoryview(stack.enter_context(_BUFFER_POOL.using())) while not eof(): if chunked: # TODO: If server sends more data at the end, like # response of the next request, for now recvfile might # read them, and then err out. Maybe recvfile should # check this, and not read more than it should instead? num_read = await src.readinto1(buffer) else: num_read = await src.readinto1( buffer[:min(num_to_read, _CHUNK_SIZE)] ) if num_read == 0: break output.write(buffer[:num_read]) num_to_read -= num_read output.flush() # Sanity check. if not chunked: ASSERT.equal(num_to_read, 0) # Trick requests to release the connection back to the connection # pool, rather than closing/discarding it. response._content_consumed = True # http.client.HTTPConnection tracks the last response; so you have # to close it to make the connection object useable again. httplib_response.close() # Close the response for the caller since response is not useable # after recvfile. response.close() loggings.ONCE_PER( 1000, LOG.info, 'buffer pool stats: %r', _BUFFER_POOL.get_stats() )
def from_reader(self, reader): ASSERT.is_(reader.schema, self._schema) return self._converter.from_reader(reader)