def create_engine( db_url, *, check_same_thread=True, trace=False, pragmas=(), ): ASSERT( DB_URL_PATTERN.fullmatch(db_url), 'expect sqlite URL, not {!r}', db_url, ) engine = sqlalchemy.create_engine( db_url, connect_args={ 'check_same_thread': check_same_thread, }, ) # It would be better to call ``add_trace`` before ``config_db``. if trace: sqlalchemy.event.listen(engine, 'connect', functools.partial(add_trace, db_url=db_url)) if pragmas: do_config_db = functools.partial(config_db, pragmas=pragmas) else: do_config_db = config_db sqlalchemy.event.listen(engine, 'connect', config_conn) sqlalchemy.event.listen(engine, 'connect', do_config_db, once=True) sqlalchemy.event.listen(engine, 'begin', do_begin) return engine
def check_invariants(self, active_ops_dirs): for ops_dir in active_ops_dirs: ASSERT( ops_dir.metadata.name != self.metadata.name, 'expect unique xar label name: {}, {}', ops_dir.label, self.label, )
def _lower_error_or_none(self, error): if error is None: return None ASSERT.isinstance(error, Exception) error_name = ASSERT(self._match_error_type(error), 'unknown error type: {!r}', error) return self._wiredata.to_lower( self._response_type(error=self._response_type.Error( **{error_name: error})))
def _assert_unique_name_and_version(new_metadata): for image_dir_path, metadata in _iter_metadatas(): ASSERT( new_metadata.name != metadata.name or new_metadata.version != metadata.version, 'expect unique image name and version: {}, {}', image_dir_path, new_metadata, )
def _guess_label_from_rule(rule): """Guess pod, xar, or image label from build rule. For example, //pod/foo:bar/build becomes //foo:bar. """ name_parts = rule.name.parts ASSERT( len(name_parts) == 2 and name_parts[1] == 'build', 'expect pod, xar, or image build rule: {}', rule, ) return foreman.Label.parse('//%s:%s' % ('/'.join(rule.path.parts[1:]), name_parts[0]))
def get_declared_error_types(response_type): # When there is only one error type, reqrep.make_annotations # would not generate Optional[T]. fields = dataclasses.fields(response_type.Error) if len(fields) == 1: return {ASSERT.issubclass(fields[0].type, Exception): fields[0].name} else: return { ASSERT( typings.is_recursive_type(field.type) and typings.is_union_type(field.type) and typings.match_optional_type(field.type), 'expect typing.Optional[T]: {!r}', field, ): field.name for field in fields }
def __init__( self, cache_dir_path, capacity, *, post_eviction_size=None, executor=None, # Use this to evict in the background. ): self._lock = threading.Lock() self._cache_dir_path = ASSERT.predicate(cache_dir_path, Path.is_dir) self._capacity = ASSERT.greater(capacity, 0) self._post_eviction_size = (post_eviction_size if post_eviction_size is not None else int(self._capacity * POST_EVICTION_SIZE_RATIO)) ASSERT( 0 <= self._post_eviction_size <= self._capacity, 'expect 0 <= post_eviction_size <= {}, not {}', self._capacity, self._post_eviction_size, ) self._executor = executor # By the way, if cache cold start is an issue, we could store # and load this table from a file. self._access_log = collections.OrderedDict() # getting_path may "lease" paths to the user, and we should not # evict these paths. self._active_paths = g1_collections.Multiset() self._num_hits = 0 self._num_misses = 0 # It's safe to call these methods after this point. self._eviction_countdown = self._estimate_eviction_countdown() self._maybe_evict()
def _sanity_check(self): expect_num_tasks = self._num_tasks actual_num_tasks = sum( map( len, ( self._ready_tasks, self._task_completion_blocker, self._read_blocker, self._write_blocker, self._sleep_blocker, self._generic_blocker, self._forever_blocker, ), )) if self._current_task: actual_num_tasks += 1 ASSERT( expect_num_tasks >= 0 and expect_num_tasks == actual_num_tasks, 'sanity check fail: {!r}', self, )
def define_pod( *, name: str, apps: typing.List[App] = (), images: typing.List[str] = (), mounts: typing.List[Mount] = (), volumes: typing.List[Volume] = (), systemd_unit_groups: typing.List[SystemdUnitGroup] = (), token_names: typing.Mapping[str, str] = None, ): """Define a pod. This defines: * Parameter: name/version. * Rule: name/build. NOTE: This rule is generally run in the host system, not inside a builder pod. """ ASSERT(len(images) <= 1, 'expect at most one image per pod for now: {}') # Let's require absolute release labels (because it is quite hard to # derive label path for images and volumes from pod label). ASSERT.all(images, lambda label: label.startswith('//')) ASSERT.all(volumes, lambda volume: volume.label.startswith('//')) ASSERT.unique(map(_get_label_name, images)) ASSERT.unique(_get_label_name(volume.label) for volume in volumes) name_prefix = shipyard2.rules.canonicalize_name_prefix(name) parameter_version = name_prefix + 'version' rule_build = name_prefix + 'build' (foreman.define_parameter(parameter_version)\ .with_doc('pod version')) images = list(map(foreman.Label.parse, images)) @foreman.rule(rule_build) @foreman.rule.depend('//pods/bases:build') @foreman.rule.depend('//releases:build') def build(parameters): version = ASSERT.not_none(parameters[parameter_version]) pod_dir_path = releases.get_output_dir_path(parameters, name, version) if ( pod_dir_path / \ shipyard2.POD_DIR_RELEASE_METADATA_FILENAME ).exists(): LOG.info('skip: build pod: %s %s', name, version) return LOG.info('build pod: %s %s', name, version) try: scripts.mkdir(pod_dir_path) releases.generate_release_metadata( parameters, pod_dir_path / shipyard2.POD_DIR_RELEASE_METADATA_FILENAME, ) _generate_deploy_instruction( parameters=parameters, pod_dir_path=pod_dir_path, name=name, version=version, apps=apps, images=images, mounts=mounts, volumes=volumes, systemd_unit_groups=systemd_unit_groups, token_names=token_names, ) _link_images(parameters, pod_dir_path, images) _link_volumes(parameters, pod_dir_path, volumes) except Exception: # Roll back on error. scripts.rm(pod_dir_path, recursive=True) raise for label in images: build.depend(str(_images.derive_rule(label))) return PodRules(build=build)
def create_engine(db_url): ASSERT( DB_URL_PATTERN.match(db_url), 'expect postgresql URL, not {!r}', db_url ) return sqlalchemy.create_engine(db_url)