Ejemplo n.º 1
0
def get_duration(self, *, name, type_name):
    ASSERT.equal(type_name, 'ms')
    value = get_simple(self, name=name, type_name='ms')
    if value >= 0:
        return datetime.timedelta(milliseconds=value)
    else:
        return _nng.Durations(value)
Ejemplo n.º 2
0
def _extract_image(archive_path, dst_dir_path):
    # We assume archive is always gzip-compressed for now.
    hasher = hashlib.sha256()
    # If we are running as root, we can and should preserve the
    # original owners and permissions.
    i_am_root = oses.has_root_privilege()
    # TODO: Should we use stdlib's tarfile rather than calling tar?
    with scripts.using_stdin(subprocess.PIPE), scripts.popen([
        'tar',
        '--extract',
        *('--file', '-'),
        *('--directory', dst_dir_path),
        *(('--same-owner', '--same-permissions') if i_am_root else ()),
    ]) as proc:
        try:
            with gzip.open(archive_path, 'rb') as archive:
                while True:
                    data = archive.read(4096)
                    if not data:
                        break
                    proc.stdin.write(data)
                    hasher.update(data)
        except:
            proc.kill()
            raise
        else:
            proc.stdin.close()
            proc.wait()
            ASSERT.equal(proc.poll(), 0)
    return hasher.hexdigest()
Ejemplo n.º 3
0
 def writer_release(self):
     with self._lock:
         ASSERT.equal(self._num_readers, 0)
         ASSERT.equal(self._num_writers, 1)
         self._num_writers = 0
         self._reader_cond.notify_all()
         self._writer_cond.notify()
Ejemplo n.º 4
0
def cmd_remove(args):
    if args.type == 'pods':
        dir_object_type = repos.PodDir
        current_versions = _get_envs_dir(args).get_current_pod_versions()
    elif args.type == 'xars':
        dir_object_type = repos.XarDir
        current_versions = _get_envs_dir(args).get_current_xar_versions()
    elif args.type == 'builder-images':
        dir_object_type = repos.BuilderImageDir
        # Builder images are not referenced by pods and thus do not have
        # current versions.
        current_versions = {}
    elif args.type == 'images':
        dir_object_type = repos.ImageDir
        current_versions = repos.get_current_image_versions(args.release_repo)
    else:
        ASSERT.equal(args.type, 'volumes')
        dir_object_type = repos.VolumeDir
        current_versions = repos.PodDir.get_current_volume_versions(
            args.release_repo)
    if args.version in current_versions.get(args.label, ()):
        LOG.warning(
            'skip: remove: %s %s %s',
            args.type,
            args.label,
            args.version,
        )
        return 1
    dir_object = dir_object_type.from_relpath(
        args.release_repo,
        args.label.path / args.label.name / args.version,
    )
    LOG.info('remove: %s %s %s', args.type, args.label, args.version)
    dir_object.remove()
    return 0
Ejemplo n.º 5
0
def set_sqlite_tmpdir(tmpdir_path):
    #
    # NOTE: Do NOT overwrite SQLITE_TMPDIR environ entry because:
    #
    # * Prior to Python 3.9, posix.putenv, which is implemented by
    #   putenv, can only keeps references to the latest values; old
    #   values are garbage collected.  (Since 3.9 [1], posix.putenv is
    #   changed to be implemented by setenv, and no longer has such
    #   problem.)
    #
    # * SQLite keeps a static reference to the SQLITE_TMPDIR value [2].
    #   Thus you must ensure that SQLITE_TMPDIR, once set and referenced
    #   by SQLite, is never overwritten (not even by a same value) so
    #   that the old value is not garbage collected; otherwise, SQLite
    #   will access a freed memory region.
    #
    # pylint: disable=line-too-long
    # [1] https://github.com/python/cpython/commit/b8d1262e8afe7b907b4a394a191739571092acdb
    # [2] https://github.com/sqlite/sqlite/blob/78043e891ab2fba7dbec1493a9d3e10ab2476745/src/os_unix.c#L5755
    # pylint: enable=line-too-long
    #
    tmpdir_path = str(tmpdir_path)
    sqlite_tmpdir = os.environ.get('SQLITE_TMPDIR')
    if sqlite_tmpdir is None:
        os.environ['SQLITE_TMPDIR'] = tmpdir_path
    else:
        ASSERT.equal(sqlite_tmpdir, tmpdir_path)
    LOG.info('SQLITE_TMPDIR = %r', os.environ['SQLITE_TMPDIR'])
Ejemplo n.º 6
0
    async def _readline(self, limit=65536):
        """Read one line from the socket.

        It errs out when line length exceeds the limit.
        """
        if self._buffer:
            ASSERT.equal(len(self._buffer), 1)
            line = self._search_line(0)
            if line is not None:
                return line
        while not self._ended and self._size <= limit:
            data = await self._sock.recv(limit + 1)
            if not data:
                self._ended = True
                break
            self._buffer.append(data)
            self._size += len(data)
            line = self._search_line(-1)
            if line is not None:
                ASSERT.in_(len(self._buffer), (0, 1))
                return line
        if self._size > limit:
            raise _TooLong('request line length exceeds %d' % limit)
        if self._buffer:
            remaining = b''.join(self._buffer)
            self._buffer.clear()
            self._size = 0
            return remaining
        else:
            return b''
Ejemplo n.º 7
0
    def install(self, bundle_dir, target_ops_dir_path):
        del target_ops_dir_path  # Unused.
        ASSERT.isinstance(bundle_dir, XarBundleDir)
        log_args = (bundle_dir.label, bundle_dir.version)

        # Make metadata first so that uninstall may roll back properly.
        LOG.info('xars install: metadata: %s %s', *log_args)
        jsons.dump_dataobject(
            models.XarMetadata(
                label=bundle_dir.label,
                version=bundle_dir.version,
                image=bundle_dir.deploy_instruction.image,
            ),
            self.metadata_path,
        )
        bases.set_file_attrs(self.metadata_path)
        # Sanity check of the just-written metadata file.
        ASSERT.equal(self.label, bundle_dir.label)
        ASSERT.equal(self.version, bundle_dir.version)

        if bundle_dir.deploy_instruction.is_zipapp():
            LOG.info('xars install: zipapp: %s %s', *log_args)
            bases.copy_exec(bundle_dir.zipapp_path, self.zipapp_target_path)
        else:
            LOG.info('xars install: xar: %s %s', *log_args)
            ctr_scripts.ctr_import_image(bundle_dir.image_path)
            ctr_scripts.ctr_install_xar(
                bundle_dir.deploy_instruction.name,
                bundle_dir.deploy_instruction.exec_relpath,
                bundle_dir.deploy_instruction.image,
            )

        return True
Ejemplo n.º 8
0
 def reader_release(self):
     with self._lock:
         ASSERT.greater(self._num_readers, 0)
         ASSERT.equal(self._num_writers, 0)
         self._num_readers -= 1
         if self._num_readers == 0:
             self._writer_cond.notify()
Ejemplo n.º 9
0
 def expect(pattern):
     """Drop ``pattern`` prefix from ``data``."""
     nonlocal data
     n = min(len(pattern), len(data))
     ASSERT.equal(pattern[:n], data[:n])
     data = data[n:]
     return n
Ejemplo n.º 10
0
def machine_id_to_pod_id(machine_id):
    ASSERT.equal(len(machine_id), 32)
    return '%s-%s-%s-%s-%s' % (
        machine_id[0:8],
        machine_id[8:12],
        machine_id[12:16],
        machine_id[16:20],
        machine_id[20:32],
    )
Ejemplo n.º 11
0
 def __post_init__(self):
     ASSERT.not_empty(self.sources)
     for i, source in enumerate(self.sources):
         # Empty source path means host's /var/tmp.
         if source:
             ASSERT.predicate(Path(source), Path.is_absolute)
         else:
             ASSERT.equal(i, len(self.sources) - 1)
     ASSERT.predicate(Path(self.target), Path.is_absolute)
Ejemplo n.º 12
0
def cmd_release(args):
    LOG.info('release: %s %s to %s', args.label, args.version, args.env)
    if args.type == 'pods':
        release = _get_envs_dir(args).release_pod
    else:
        ASSERT.equal(args.type, 'xars')
        release = _get_envs_dir(args).release_xar
    release(args.env, args.label, args.version)
    return 0
Ejemplo n.º 13
0
 def __post_init__(self):
     ASSERT.in_(self.kind, ('range', 'values'))
     if self.kind == 'range':
         ASSERT.equal(len(self.args), 2)
         ASSERT.all(self.args, lambda arg: isinstance(arg, int))
         ASSERT.less_or_equal(self.args[0], self.args[1])
     else:
         ASSERT.equal(self.kind, 'values')
         ASSERT.all(self.args, lambda arg: isinstance(arg, str))
Ejemplo n.º 14
0
def get_string(self, *, name, type_name):
    ASSERT.equal(type_name, 'string')
    getopt = _nng.F['nng_%s_get_string' % self._name]
    value = ctypes.c_char_p()
    errors.check(getopt(self._handle, name, ctypes.byref(value)))
    try:
        return value.value.decode('utf-8')  # pylint: disable=no-member
    finally:
        _nng.F.nng_strfree(value)
Ejemplo n.º 15
0
 async def _check_lease_expiration(self):
     ASSERT.equal(self._manager.tx_id, 0)
     async with self._manager.reading() as conn:
         expirations = databases.lease_scan_expirations(conn, self._tables)
     now = time.time()
     for expiration in expirations:
         self._timer_queue.spawn(
             _sleep(expiration - now, self._lease_expire)
         )
Ejemplo n.º 16
0
def get_bytes(self, *, name, type_name):
    ASSERT.equal(type_name, 'bytes')
    getopt = _nng.F['nng_%s_get' % self._name]
    ptr = ctypes.c_void_p()
    size = ctypes.c_size_t()
    errors.check(
        getopt(self._handle, name, ctypes.byref(ptr), ctypes.byref(size)))
    try:
        return ctypes.string_at(ptr, size.value)
    finally:
        _nng.F.nng_free(ptr, size)
Ejemplo n.º 17
0
def _link(sub_dir_name, parameters, pod_dir_path, label, version):
    if sub_dir_name == shipyard2.POD_DIR_IMAGES_DIR_NAME:
        derive = lambda ps, l, _: _images.derive_image_path(ps, l)
    else:
        ASSERT.equal(sub_dir_name, shipyard2.POD_DIR_VOLUMES_DIR_NAME)
        derive = _derive_volume_path
    target_path = ASSERT.predicate(
        derive(parameters, label, version),
        Path.is_file,
    )
    scripts.make_relative_symlink(
        target_path,
        pod_dir_path / sub_dir_name / label.name / target_path.name,
    )
Ejemplo n.º 18
0
 def __post_init__(self):
     validate_pod_label(self.label)
     ASSERT.equal(
         _get_label_name(_POD_LABEL_PATTERN, self.label),
         self.pod_config_template.name,
     )
     # Only allow specifying image for pods by name for now.
     ASSERT.all(self.images, lambda image: image.name and image.version)
     # Due to bundle directory layout, image names and volume names
     # are expected to be unique.  (This layout restriction should be
     # not too restrictive in practice.)
     ASSERT.unique(self.images, lambda image: image.name)
     ASSERT.unique(self.volumes, lambda volume: volume.name)
     # We use the same format on local alias as on token name.
     ASSERT.all(self.token_names.keys(), validate_token_name)
     ASSERT.all(self.token_names.values(), validate_token_name)
Ejemplo n.º 19
0
 def get_all_tasks(self):
     """Return a list of all tasks (useful for debugging)."""
     self._assert_owner()
     all_tasks = []
     if self._current_task:
         all_tasks.append(self._current_task)
     all_tasks.extend(task_ready.task for task_ready in self._ready_tasks)
     for task_collection in (
             self._task_completion_blocker,
             self._read_blocker,
             self._write_blocker,
             self._sleep_blocker,
             self._generic_blocker,
             self._forever_blocker,
     ):
         all_tasks.extend(task_collection)
     ASSERT.equal(len(all_tasks), self._num_tasks)
     return all_tasks
Ejemplo n.º 20
0
        def validate_assigned_values(self, assigned_values):
            """Validate assigned values.

            * No duplicated assignments.
            * Assigned values are a subset of defined values.
            """
            ASSERT.all(assigned_values, lambda value: isinstance(value, str))
            if self.kind == 'range':
                ASSERT.unique(assigned_values)
                ASSERT.all(
                    assigned_values,
                    lambda value: self.args[0] <= int(value) < self.args[1],
                )
            else:
                ASSERT.equal(self.kind, 'values')
                ASSERT.issubset(
                    g1_collections.Multiset(assigned_values),
                    g1_collections.Multiset(self.args),
                )
            return assigned_values
Ejemplo n.º 21
0
 def next_available(self, assigned_values):
     if self.kind == 'range':
         assigned_value_set = frozenset(map(int, assigned_values))
         if assigned_value_set:
             value = max(assigned_value_set) + 1
             if value < self.args[1]:
                 # NOTE: We allow only str-typed token values.
                 return str(value)
         for value in range(*self.args):
             if value not in assigned_value_set:
                 # NOTE: We allow only str-typed token values.
                 return str(value)
     else:
         ASSERT.equal(self.kind, 'values')
         candidates = g1_collections.Multiset(self.args)
         candidates -= g1_collections.Multiset(assigned_values)
         if candidates:
             return next(iter(candidates))
     return ASSERT.unreachable('no value available: {} {}', self,
                               assigned_values)
Ejemplo n.º 22
0
 async def set(self, *, key, value, transaction=0):
     prior = await self._set(key=key, value=value, transaction=transaction)
     if prior is None or prior.value != value:
         if transaction != 0:
             revision = ASSERT.not_none(self._tx_revision) + 1
         else:
             ASSERT.equal(self._manager.tx_id, 0)
             async with self._manager.reading() as conn:
                 revision = databases.get_revision(conn, self._tables)
         self._maybe_publish_events(
             transaction,
             [
                 interfaces.DatabaseEvent(
                     previous=prior,
                     current=interfaces.KeyValue(
                         revision=revision, key=key, value=value
                     ),
                 ),
             ],
         )
     return prior
Ejemplo n.º 23
0
def cmd_list_definitions(args):
    columnar = columns.Columnar(
        **columns_argparses.make_columnar_kwargs(args),
        stringifiers=_DEFINITION_LIST_STRINGIFIERS,
    )
    for token_name, definition in (
            tokens.make_tokens_database().get().definitions.items()):
        if definition.kind == 'range':
            columnar.append({
                'token-name': token_name,
                'range': definition.args,
                'values': (),
            })
        else:
            ASSERT.equal(definition.kind, 'values')
            columnar.append({
                'token-name': token_name,
                'range': (),
                'values': definition.args,
            })
    columnar.sort(lambda row: row['token-name'])
    columnar.output(sys.stdout)
    return 0
Ejemplo n.º 24
0
def build(parameters):
    src_path = _get_src_path(parameters)
    ASSERT.equal(src_path.name, 'v8')
    _fetch(parameters, src_path)
    _build(src_path)
Ejemplo n.º 25
0
 def _by_keys(self, keys):
     ASSERT.equal(len(keys), len(self.key_columns))
     return and_(*(c == k for c, k in zip(self.key_columns, keys)))
Ejemplo n.º 26
0
 def make_record(self, keys, values):
     ASSERT.equal(len(keys), len(self.key_columns))
     ASSERT.equal(len(values), len(self.value_columns))
     record = dict(zip(self.key_column_names, keys))
     record.update(zip(self.value_column_names, values))
     return record
Ejemplo n.º 27
0
    def install(self, bundle_dir, target_ops_dir_path):
        ASSERT.isinstance(bundle_dir, PodBundleDir)
        log_args = (bundle_dir.label, bundle_dir.version)

        # Make metadata first so that uninstall may roll back properly.
        LOG.debug('pods install: metadata: %s %s', *log_args)
        metadata, groups = self._make_metadata(bundle_dir.deploy_instruction)
        jsons.dump_dataobject(metadata, self.metadata_path)
        bases.set_file_attrs(self.metadata_path)

        # Sanity check of the just-written metadata file.
        ASSERT.equal(self.label, bundle_dir.label)
        ASSERT.equal(self.version, bundle_dir.version)
        ASSERT.equal(self.metadata, metadata)
        LOG.debug('pods install: pod ids: %s %s: %s', *log_args,
                  ', '.join(groups))

        LOG.debug('pods install: volumes: %s %s', *log_args)
        bases.make_dir(self.volumes_dir_path)
        for volume, volume_path in bundle_dir.iter_volumes():
            volume_dir_path = self.volumes_dir_path / volume.name
            LOG.debug('pods: extract: %s -> %s', volume_path, volume_dir_path)
            bases.make_dir(ASSERT.not_predicate(volume_dir_path, Path.exists))
            scripts.tar_extract(
                volume_path,
                directory=volume_dir_path,
                extra_args=(
                    '--same-owner',
                    '--same-permissions',
                ),
            )

        LOG.debug('pods install: images: %s %s', *log_args)
        for _, image_path in bundle_dir.iter_images():
            ctr_scripts.ctr_import_image(image_path)

        LOG.debug('pods install: tokens: %s %s', *log_args)
        assignments = {}
        with tokens.make_tokens_database().writing() as active_tokens:
            for pod_id in groups:
                assignments[pod_id] = {
                    alias: active_tokens.assign(token_name, pod_id, alias)
                    for alias, token_name in
                    bundle_dir.deploy_instruction.token_names.items()
                }

        envs = ops_envs.load()

        LOG.debug('pods install: prepare pods: %s %s', *log_args)
        bases.make_dir(self.refs_dir_path)
        for pod_id, group in groups.items():
            pod_config = self._make_pod_config(
                bundle_dir.deploy_instruction,
                target_ops_dir_path,
                systemds.make_envs(
                    pod_id,
                    self.metadata,
                    group.envs,
                    envs,
                    assignments[pod_id],
                ),
            )
            with tempfile.NamedTemporaryFile() as config_tempfile:
                config_path = Path(config_tempfile.name)
                jsons.dump_dataobject(pod_config, config_path)
                ctr_scripts.ctr_prepare_pod(pod_id, config_path)
            ctr_scripts.ctr_add_ref_to_pod(pod_id, self.refs_dir_path / pod_id)

        LOG.debug('pods install: systemd units: %s %s', *log_args)
        units = {(pod_id, unit.name): unit
                 for pod_id, group in groups.items() for unit in group.units}
        for config in self.metadata.systemd_unit_configs:
            systemds.install(
                config,
                self.metadata,
                groups[config.pod_id],
                units[config.pod_id, config.name],
                envs,
                assignments[config.pod_id],
            )

        systemds.daemon_reload()
        return True
Ejemplo n.º 28
0
 def flush(self):
     ASSERT.true(self.eof)
     ASSERT.equal(self._chunk_remaining, -2)
     return []
Ejemplo n.º 29
0
async def recvfile(response, file):
    """Receive response body into a file.

    The caller must set ``stream`` to true when make the request.

    DANGER! This breaks the multiple levels of encapsulation, from
    requests.Response all the way down to http.client.HTTPResponse.
    As a result, the response object is most likely unusable after a
    recvfile call, and you should probably close it immediately.
    """
    # requests sets _content to False initially.
    ASSERT.is_(response._content, False)
    ASSERT.false(response._content_consumed)

    urllib3_response = ASSERT.not_none(response.raw)
    chunked = urllib3_response.chunked

    httplib_response = ASSERT.isinstance(
        urllib3_response._fp, http.client.HTTPResponse
    )
    ASSERT.false(httplib_response.closed)
    sock = ASSERT.isinstance(httplib_response.fp.raw._sock, socket.socket)

    output = DecoderChain(file)

    if chunked:
        chunk_decoder = ChunkDecoder()
        output.add(chunk_decoder)
        num_to_read = 0
        eof = lambda: chunk_decoder.eof
    else:
        num_to_read = ASSERT.greater(
            ASSERT.not_none(httplib_response.length), 0
        )
        eof = lambda: num_to_read <= 0

    # Use urllib3's decoder code.
    urllib3_response._init_decoder()
    if urllib3_response._decoder is not None:
        output.add(ContentDecoder(urllib3_response._decoder))

    with contextlib.ExitStack() as stack:
        src = adapters.FileAdapter(httplib_response.fp)
        stack.callback(src.disown)

        sock.setblocking(False)
        stack.callback(sock.setblocking, True)

        buffer = memoryview(stack.enter_context(_BUFFER_POOL.using()))
        while not eof():
            if chunked:
                # TODO: If server sends more data at the end, like
                # response of the next request, for now recvfile might
                # read them, and then err out.  Maybe recvfile should
                # check this, and not read more than it should instead?
                num_read = await src.readinto1(buffer)
            else:
                num_read = await src.readinto1(
                    buffer[:min(num_to_read, _CHUNK_SIZE)]
                )
            if num_read == 0:
                break
            output.write(buffer[:num_read])
            num_to_read -= num_read

        output.flush()

    # Sanity check.
    if not chunked:
        ASSERT.equal(num_to_read, 0)

    # Trick requests to release the connection back to the connection
    # pool, rather than closing/discarding it.
    response._content_consumed = True
    # http.client.HTTPConnection tracks the last response; so you have
    # to close it to make the connection object useable again.
    httplib_response.close()

    # Close the response for the caller since response is not useable
    # after recvfile.
    response.close()

    loggings.ONCE_PER(
        1000, LOG.info, 'buffer pool stats: %r', _BUFFER_POOL.get_stats()
    )
Ejemplo n.º 30
0
def set_duration(self, value, **kwargs):
    ASSERT.equal(kwargs.get('type_name'), 'ms')
    if isinstance(value, datetime.timedelta):
        value = int(value / ONE_MILLISECOND + 0.5)
    set_simple(self, value, **kwargs)