def __init__(self, message, schema, raw): ASSERT.isinstance(schema, self._schema_type) super().__init__(raw) # Keep a strong reference to the root message to ensure that it # is not garbage-collected before us. self._message = message self.schema = schema
def get_root(self, struct_schema): ASSERT.isinstance(struct_schema, schemas.StructSchema) return dynamics.DynamicStructBuilder( self, struct_schema, self._raw.getRoot(struct_schema._raw), )
def _struct_to_lower(value): if isinstance(value, DynamicStructReader): reader = value._raw else: ASSERT.isinstance(value, DynamicStructBuilder) reader = value._raw.asReader() return _capnp.DynamicValue.Reader.fromDynamicStruct(reader)
def install(self, bundle_dir, target_ops_dir_path): del target_ops_dir_path # Unused. ASSERT.isinstance(bundle_dir, XarBundleDir) log_args = (bundle_dir.label, bundle_dir.version) # Make metadata first so that uninstall may roll back properly. LOG.info('xars install: metadata: %s %s', *log_args) jsons.dump_dataobject( models.XarMetadata( label=bundle_dir.label, version=bundle_dir.version, image=bundle_dir.deploy_instruction.image, ), self.metadata_path, ) bases.set_file_attrs(self.metadata_path) # Sanity check of the just-written metadata file. ASSERT.equal(self.label, bundle_dir.label) ASSERT.equal(self.version, bundle_dir.version) if bundle_dir.deploy_instruction.is_zipapp(): LOG.info('xars install: zipapp: %s %s', *log_args) bases.copy_exec(bundle_dir.zipapp_path, self.zipapp_target_path) else: LOG.info('xars install: xar: %s %s', *log_args) ctr_scripts.ctr_import_image(bundle_dir.image_path) ctr_scripts.ctr_install_xar( bundle_dir.deploy_instruction.name, bundle_dir.deploy_instruction.exec_relpath, bundle_dir.deploy_instruction.image, ) return True
def update_definition(self, token_name, definition): models.validate_token_name(token_name) ASSERT.isinstance(definition, self.Definition) # Validate the new definition before updating. definition.validate_assigned_values( [a.value for a in self.assignments[token_name]]) self.definitions[token_name] = definition
def _lower_error_or_none(self, error): if error is None: return None ASSERT.isinstance(error, Exception) error_name = ASSERT(self._match_error_type(error), 'unknown error type: {!r}', error) return self._wiredata.to_lower( self._response_type(error=self._response_type.Error( **{error_name: error})))
def do_iter(namespace): for name, value in namespace._entries.items(): parts.append(name) if isinstance(value, Namespace): yield from do_iter(value) else: ASSERT.isinstance(value, ParameterBase) label = labels.Label(module_path, '.'.join(parts)) yield label, value parts.pop()
def __init__(self, *, queue=None, **kwargs): # Only support priority queue use case for now. For other use # cases, the ordinary go-to session type should be sufficient. if queue is not None: ASSERT.isinstance(queue, queues.PriorityQueue) else: queue = queues.PriorityQueue() self._queue = queue self._sender = bases.Sender(self._send, **kwargs) self._base_session = None # Set by ClusterSession.
def load(namespace, config_tree): for key, value in ASSERT.isinstance(config_tree, dict).items(): entry = namespace for part in ASSERT.isinstance(key, str).split('.'): entry = getattr(entry, part) if isinstance(entry, Namespace): load(entry, value) else: ASSERT.isinstance(entry, ParameterBase) if entry.convert: value = entry.convert(value) entry.set(value)
def method_caller(obj, queue): """Actor that interprets messages as method calls of an object.""" LOG.debug('start') while True: try: call = ASSERT.isinstance(queue.get(), MethodCall) except queues.Closed: break with call.future.catching_exception(reraise=False): method = getattr(obj, ASSERT.isinstance(call.method, str)) call.future.set_result(method(*call.args, **call.kwargs)) del call LOG.debug('exit')
def __init__(self, protocol, *, raw=False): # In case ``__init__`` raises. self._handle = None ASSERT.isinstance(protocol, Protocols) opener = protocol.value[1] if raw else protocol.value[0] handle = _nng.nng_socket() errors.check(opener(ctypes.byref(handle))) self._handle = handle.value self.protocol = protocol self.dialers = {} self.listeners = {}
def __init__(self, status, message, headers=None, content=b''): super().__init__(message) self.status = ASSERT.in_range(_cast_status(status), (300, 600)) self.headers = ASSERT.predicate( dict(headers) if headers is not None else {}, lambda hdrs: all( isinstance(k, str) and isinstance(v, str) for k, v in hdrs.items()), ) self.content = ASSERT.isinstance(content, bytes)
def __init__(self, data=b'', *, msg_p=None): # In case ``__init__`` raises. self._msg_p = None ASSERT.isinstance(data, bytes) if msg_p is None: msg_p = _nng.nng_msg_p() errors.check(_nng.F.nng_msg_alloc(ctypes.byref(msg_p), len(data))) if data: ctypes.memmove(_nng.F.nng_msg_body(msg_p), data, len(data)) else: # We are taking ownership of ``msg_p`` and should not take # any initial data. ASSERT.false(data) self._msg_p = msg_p self.header = Header(self._get) self.body = Body(self._get)
def function_caller(queue): """Actor that interprets messages as function calls.""" LOG.debug('start') while True: try: call = ASSERT.isinstance(queue.get(), MethodCall) except queues.Closed: break with call.future.catching_exception(reraise=False): ASSERT.predicate(call.method, callable) call.future.set_result(call.method(*call.args, **call.kwargs)) del call LOG.debug('exit')
def __init__( self, columns, *, format=Formats.TEXT, # pylint: disable=redefined-builtin header=True, stringifiers=None, ): self._format = ASSERT.isinstance(format, Formats) self._header = header self._columns = columns self._stringifiers = stringifiers or {} self._rows = []
def format_namespace(namespace, indent): for name, value in namespace._entries.items(): write_indent(indent) output.write(name) output.write(':') if isinstance(value, Namespace): if value._doc: output.write(' ') output.write(value._doc) output.write('\n') format_namespace(value, indent + 1) else: ASSERT.isinstance(value, ParameterBase) if value.doc: output.write(' ') output.write(value.doc) if isinstance(value, Parameter): output.write(' (default: ') output.write((value.format or json.dumps)(value.default)) elif isinstance(value, ConstParameter): output.write(' (value: ') output.write((value.format or json.dumps)(value.value)) else: ASSERT.isinstance(value, RequiredParameter) output.write(' (type: ') if isinstance(value.type, type): output.write(value.type.__name__) else: output.write(', '.join(t.__name__ for t in value.type)) if value.unit: if isinstance(value, RequiredParameter): output.write(', unit: ') else: output.write(' ') output.write(value.unit) output.write(')\n')
async def recvfile(response, file): """Receive response body into a file. The caller must set ``stream`` to true when make the request. DANGER! This breaks the multiple levels of encapsulation, from requests.Response all the way down to http.client.HTTPResponse. As a result, the response object is most likely unusable after a recvfile call, and you should probably close it immediately. """ # requests sets _content to False initially. ASSERT.is_(response._content, False) ASSERT.false(response._content_consumed) urllib3_response = ASSERT.not_none(response.raw) chunked = urllib3_response.chunked httplib_response = ASSERT.isinstance( urllib3_response._fp, http.client.HTTPResponse ) ASSERT.false(httplib_response.closed) sock = ASSERT.isinstance(httplib_response.fp.raw._sock, socket.socket) output = DecoderChain(file) if chunked: chunk_decoder = ChunkDecoder() output.add(chunk_decoder) num_to_read = 0 eof = lambda: chunk_decoder.eof else: num_to_read = ASSERT.greater( ASSERT.not_none(httplib_response.length), 0 ) eof = lambda: num_to_read <= 0 # Use urllib3's decoder code. urllib3_response._init_decoder() if urllib3_response._decoder is not None: output.add(ContentDecoder(urllib3_response._decoder)) with contextlib.ExitStack() as stack: src = adapters.FileAdapter(httplib_response.fp) stack.callback(src.disown) sock.setblocking(False) stack.callback(sock.setblocking, True) buffer = memoryview(stack.enter_context(_BUFFER_POOL.using())) while not eof(): if chunked: # TODO: If server sends more data at the end, like # response of the next request, for now recvfile might # read them, and then err out. Maybe recvfile should # check this, and not read more than it should instead? num_read = await src.readinto1(buffer) else: num_read = await src.readinto1( buffer[:min(num_to_read, _CHUNK_SIZE)] ) if num_read == 0: break output.write(buffer[:num_read]) num_to_read -= num_read output.flush() # Sanity check. if not chunked: ASSERT.equal(num_to_read, 0) # Trick requests to release the connection back to the connection # pool, rather than closing/discarding it. response._content_consumed = True # http.client.HTTPConnection tracks the last response; so you have # to close it to make the connection object useable again. httplib_response.close() # Close the response for the caller since response is not useable # after recvfile. response.close() loggings.ONCE_PER( 1000, LOG.info, 'buffer pool stats: %r', _BUFFER_POOL.get_stats() )
def block(self, source, task): ASSERT.isinstance(source, (int, float)) ASSERT.not_in(task, self._tasks) heapq.heappush(self._queue, self.Item(source, task)) self._tasks.add(task)
def set_root(self, struct): ASSERT.isinstance(struct, dynamics.DynamicStructReader) self._raw.setRoot(struct._raw)
def append(self, data): ASSERT.isinstance(data, bytes) errors.check(self._chunk_append(self._get(), data, len(data)))
def block(self, source, task): """Record that ``task`` is joining on ``source`` task.""" ASSERT.isinstance(source, tasks.Task) ASSERT.is_not(source, task) # A task can't join on itself. ASSERT.false(source.is_completed()) return super().block(source, task)
def install(self, bundle_dir, target_ops_dir_path): ASSERT.isinstance(bundle_dir, PodBundleDir) log_args = (bundle_dir.label, bundle_dir.version) # Make metadata first so that uninstall may roll back properly. LOG.debug('pods install: metadata: %s %s', *log_args) metadata, groups = self._make_metadata(bundle_dir.deploy_instruction) jsons.dump_dataobject(metadata, self.metadata_path) bases.set_file_attrs(self.metadata_path) # Sanity check of the just-written metadata file. ASSERT.equal(self.label, bundle_dir.label) ASSERT.equal(self.version, bundle_dir.version) ASSERT.equal(self.metadata, metadata) LOG.debug('pods install: pod ids: %s %s: %s', *log_args, ', '.join(groups)) LOG.debug('pods install: volumes: %s %s', *log_args) bases.make_dir(self.volumes_dir_path) for volume, volume_path in bundle_dir.iter_volumes(): volume_dir_path = self.volumes_dir_path / volume.name LOG.debug('pods: extract: %s -> %s', volume_path, volume_dir_path) bases.make_dir(ASSERT.not_predicate(volume_dir_path, Path.exists)) scripts.tar_extract( volume_path, directory=volume_dir_path, extra_args=( '--same-owner', '--same-permissions', ), ) LOG.debug('pods install: images: %s %s', *log_args) for _, image_path in bundle_dir.iter_images(): ctr_scripts.ctr_import_image(image_path) LOG.debug('pods install: tokens: %s %s', *log_args) assignments = {} with tokens.make_tokens_database().writing() as active_tokens: for pod_id in groups: assignments[pod_id] = { alias: active_tokens.assign(token_name, pod_id, alias) for alias, token_name in bundle_dir.deploy_instruction.token_names.items() } envs = ops_envs.load() LOG.debug('pods install: prepare pods: %s %s', *log_args) bases.make_dir(self.refs_dir_path) for pod_id, group in groups.items(): pod_config = self._make_pod_config( bundle_dir.deploy_instruction, target_ops_dir_path, systemds.make_envs( pod_id, self.metadata, group.envs, envs, assignments[pod_id], ), ) with tempfile.NamedTemporaryFile() as config_tempfile: config_path = Path(config_tempfile.name) jsons.dump_dataobject(pod_config, config_path) ctr_scripts.ctr_prepare_pod(pod_id, config_path) ctr_scripts.ctr_add_ref_to_pod(pod_id, self.refs_dir_path / pod_id) LOG.debug('pods install: systemd units: %s %s', *log_args) units = {(pod_id, unit.name): unit for pod_id, group in groups.items() for unit in group.units} for config in self.metadata.systemd_unit_configs: systemds.install( config, self.metadata, groups[config.pod_id], units[config.pod_id, config.name], envs, assignments[config.pod_id], ) systemds.daemon_reload() return True
def _decode_raw_value(self, value_type, raw_value): """Decode a raw value into ``value_type``-typed value. This and ``_encode_value`` complement each other. """ if typings.is_recursive_type(value_type): if value_type.__origin__ in (list, set, frozenset): element_type = value_type.__args__[0] return value_type.__origin__( self._decode_raw_value(element_type, raw_element) for raw_element in raw_value) elif value_type.__origin__ is tuple: ASSERT.equal(len(raw_value), len(value_type.__args__)) return tuple( self._decode_raw_value(element_type, raw_element) for element_type, raw_element in zip( value_type.__args__, raw_value, )) elif typings.is_union_type(value_type): # Handle ``None`` special case. if raw_value is None: ASSERT.in_(NoneType, value_type.__args__) return None # Handle ``Optional[T]`` special case. type_ = typings.match_optional_type(value_type) if type_: return self._decode_raw_value(type_, raw_value) ASSERT.equal(len(raw_value), 1) type_name, raw_element = next(iter(raw_value.items())) for type_ in value_type.__args__: if typings.is_recursive_type(type_): candidate = str(type_) else: candidate = type_.__name__ if type_name == candidate: return self._decode_raw_value(type_, raw_element) return ASSERT.unreachable( 'raw value is not any union element type: {!r} {!r}', value_type, raw_value, ) else: return ASSERT.unreachable('unsupported generic: {!r}', value_type) elif wiredata.is_message_type(value_type): return value_type( **{ f.name: self._decode_raw_value(f.type, raw_value[f.name]) for f in dataclasses.fields(value_type) if f.name in raw_value }) elif not isinstance(value_type, type): # Non-``type`` instance cannot be passed to ``issubclass``. return ASSERT.unreachable('unsupported value type: {!r}', value_type) elif issubclass(value_type, datetime.datetime): return value_type.fromisoformat(raw_value) elif issubclass(value_type, enum.Enum): return value_type[raw_value] elif issubclass(value_type, bytes): return base64.standard_b64decode(raw_value.encode('ascii')) elif issubclass(value_type, Exception): ASSERT.equal(len(raw_value), 1) return value_type( *(ASSERT.isinstance(raw_arg, _DIRECTLY_SERIALIZABLE_TYPES) for raw_arg in raw_value[value_type.__name__])) elif issubclass(value_type, _DIRECTLY_SERIALIZABLE_TYPES): if value_type in _DIRECTLY_SERIALIZABLE_TYPES: return ASSERT.isinstance(raw_value, value_type) else: # Support sub-type of int, etc. return value_type(raw_value) else: return ASSERT.unreachable('unsupported value type: {!r}', value_type)
def _data_to_lower(value): ASSERT.isinstance(value, (bytes, memoryview)) return _capnp.DynamicValue.Reader.fromData(value)
def _encode_value(self, value_type, value): """Encode a value into a raw value. This and ``_decode_raw_value`` complement each other. """ if typings.is_recursive_type(value_type): if value_type.__origin__ in (list, set, frozenset): element_type = value_type.__args__[0] return [ self._encode_value(element_type, element) for element in value ] elif value_type.__origin__ is tuple: ASSERT.equal(len(value), len(value_type.__args__)) return tuple( self._encode_value(element_type, element) for element_type, element in zip( value_type.__args__, value, )) elif typings.is_union_type(value_type): # Make a special case for ``None``. if value is None: ASSERT.in_(NoneType, value_type.__args__) return None # Make a special case for ``Optional[T]``. type_ = typings.match_optional_type(value_type) if type_: return self._encode_value(type_, value) for type_ in value_type.__args__: if typings.is_recursive_type(type_): if _match_recursive_type(type_, value): return { str(type_): self._encode_value(type_, value) } elif isinstance(value, type_): return { type_.__name__: self._encode_value(type_, value) } return ASSERT.unreachable( 'value is not any union element type: {!r} {!r}', value_type, value, ) else: return ASSERT.unreachable('unsupported generic: {!r}', value_type) elif wiredata.is_message(value): ASSERT.predicate(value_type, wiredata.is_message_type) return { f.name: self._encode_value(f.type, getattr(value, f.name)) for f in dataclasses.fields(value) } elif isinstance(value, datetime.datetime): ASSERT.issubclass(value_type, datetime.datetime) return value.isoformat() elif isinstance(value, enum.Enum): ASSERT.issubclass(value_type, enum.Enum) return value.name # JSON does not support binary type; so it has to be encoded. elif isinstance(value, bytes): ASSERT.issubclass(value_type, bytes) return base64.standard_b64encode(value).decode('ascii') elif isinstance(value, Exception): ASSERT.issubclass(value_type, Exception) return { type(value).__name__: [ ASSERT.isinstance(arg, _DIRECTLY_SERIALIZABLE_TYPES) for arg in value.args ] } elif isinstance(value, _DIRECTLY_SERIALIZABLE_TYPES): ASSERT.issubclass(value_type, _DIRECTLY_SERIALIZABLE_TYPES) return value else: return ASSERT.unreachable('unsupported value type: {!r} {!r}', value_type, value)
def _text_to_lower(value): ASSERT.isinstance(value, str) return _capnp.DynamicValue.Reader.fromText(value)
def _enum_to_lower(schema, value): if isinstance(value, enum.Enum): value = value.value ASSERT.isinstance(value, int) return _capnp.DynamicValue.Reader.fromDynamicEnum( _capnp.DynamicEnum(schema._raw, value))
def _primitive_to_lower(type_, to_lower, value): ASSERT.isinstance(value, type_) return to_lower(value)
def __setitem__(self, header, value): ASSERT.true(self._is_uncommitted()) ASSERT.isinstance(header, str) ASSERT.isinstance(value, str) self._headers[header] = value
def _validate(self, value): ASSERT.isinstance(value, self.type) if self.validate: ASSERT.predicate(value, self.validate) return value