Exemple #1
0
 def _logged_func(self):
     stack = ExitStack()
     self.exc = None
     self.timer = Timer()
     stack.callback(self.timer.stop)
     stack.callback(self.stop)
     try:
         if not self.console_logging:
             stack.enter_context(_logger.suppressed())
         _logger.debug("%s - starting", self)
         while True:
             self._result = self.func(*self.args, **self.kwargs)
             if not self.loop:
                 return
             if self.wait(self.sleep):
                 _logger.debug("%s - stopped", self)
                 return
     except ProcessExiting as exc:
         _logger.debug(exc)
         raise
     except KeyboardInterrupt as exc:
         _logger.silent_exception("KeyboardInterrupt in thread running %s:",
                                  self.func)
         self.exc = apply_timestamp(exc)
         if IS_GEVENT:
             raise  # in gevent we should let this exception propagate to the main greenlet
     except Exception as exc:
         _logger.silent_exception(
             "Exception in thread running %s: %s (traceback can be found in debug-level logs)",
             self.func, type(exc))
         self.exc = apply_timestamp(exc)
     finally:
         stack.close()
Exemple #2
0
class TestZip(unittest.TestCase):
    root = 'importlib_metadata.tests.data'

    def setUp(self):
        # Find the path to the example-*.whl so we can add it to the front of
        # sys.path, where we'll then try to find the metadata thereof.
        self.resources = ExitStack()
        self.addCleanup(self.resources.close)
        wheel = self.resources.enter_context(
            path(self.root, 'example-21.12-py3-none-any.whl'))
        sys.path.insert(0, str(wheel))
        self.resources.callback(sys.path.pop, 0)

    def test_zip_version(self):
        self.assertEqual(version('example'), '21.12')

    def test_zip_entry_points(self):
        scripts = dict(entry_points()['console_scripts'])
        entry_point = scripts['example']
        self.assertEqual(entry_point.value, 'example:main')

    def test_missing_metadata(self):
        self.assertIsNone(distribution('example').read_text('does not exist'))

    def test_case_insensitive(self):
        self.assertEqual(version('Example'), '21.12')

    def test_files(self):
        for file in files('example'):
            path = str(file.dist.locate_file(file))
            assert '.whl/' in path, path
    def setUp(self):
        resources = ExitStack()
        self.addCleanup(resources.close)
        self.var_dir = resources.enter_context(TemporaryDirectory())
        self._mlist = create_list('*****@*****.**')
        self._mlist.display_name = 'Test List'
        getUtility(ITemplateManager).set(
            'user:ack:welcome', self._mlist.list_id, 'mailman:///welcome.txt')
        config.push('template config', """\
        [paths.testing]
        template_dir: {}/templates
        """.format(self.var_dir))
        resources.callback(config.pop, 'template config')
        # Populate the template directories with a few fake templates.
        path = os.path.join(self.var_dir, 'templates', 'site', 'en')
        os.makedirs(path)
        full_path = os.path.join(path, 'list:user:notice:welcome.txt')
        with open(full_path, 'w', encoding='utf-8') as fp:
            print("""\
Welcome to the $list_name mailing list.

    Posting address: $fqdn_listname
    Help and other requests: $list_requests
    Your name: $user_name
    Your address: $user_address""", file=fp)
        # Write a list-specific welcome message.
        path = os.path.join(self.var_dir, 'templates', 'lists',
                            '*****@*****.**', 'xx')
        os.makedirs(path)
        full_path = os.path.join(path, 'list:user:notice:welcome.txt')
        with open(full_path, 'w', encoding='utf-8') as fp:
            print('You just joined the $list_name mailing list!', file=fp)
Exemple #4
0
class TestZip(unittest.TestCase):
    def setUp(self):
        # Find the path to the example.*.whl so we can add it to the front of
        # sys.path, where we'll then try to find the metadata thereof.
        self.resources = ExitStack()
        self.addCleanup(self.resources.close)
        wheel = self.resources.enter_context(
            path('importlib_metadata.tests.data',
                 'example-21.12-py3-none-any.whl'))
        sys.path.insert(0, str(wheel))
        self.resources.callback(sys.path.pop, 0)

    def test_zip_version(self):
        self.assertEqual(importlib_metadata.version('example'), '21.12')

    def test_zip_entry_points(self):
        parser = importlib_metadata.entry_points('example')
        entry_point = parser.get('console_scripts', 'example')
        self.assertEqual(entry_point, 'example:main')

    def test_missing_metadata(self):
        distribution = importlib_metadata.distribution('example')
        self.assertIsNone(distribution.read_text('does not exist'))

    def test_case_insensitive(self):
        self.assertEqual(importlib_metadata.version('Example'), '21.12')
Exemple #5
0
 def _logged_func(self):
     stack = ExitStack()
     self.exc = None
     self.timer = Timer()
     stack.callback(self.timer.stop)
     stack.callback(self.stop)
     try:
         if not self.console_logging:
             stack.enter_context(_logger.suppressed())
         _logger.debug("%s - starting", self)
         while True:
             self._result = self.func(*self.args, **self.kwargs)
             if not self.loop:
                 return
             if self.wait(self.sleep):
                 _logger.debug("%s - stopped", self)
                 return
     except ProcessExiting as exc:
         _logger.debug(exc)
         raise
     except (KeyboardInterrupt, Exception) as exc:
         _logger.silent_exception(
             "Exception in thread running %s: %s (traceback can be found in debug-level logs)",
             self.func, type(exc))
         self.exc = exc
         try:
             exc.timestamp = time.time()
         except Exception:
             pass
     finally:
         stack.close()
Exemple #6
0
def inner_context():
    stack = ExitStack()
    with stack:
        stack.enter_context(null_context())
        stack.push(exit_cb)
        stack.callback(other_cb, 10, "hi", answer=42)
        yield
Exemple #7
0
 async def get_points_pipe(estack : contextlib.ExitStack)\
                           -> Tuple[asyncio.StreamReader, int]:
     rfd, wfd = os.pipe()
     ro = open(rfd, 'rb', buffering=0)
     loop = asyncio.get_running_loop()
     reader = asyncio.StreamReader(loop=loop)
     protocol = asyncio.StreamReaderProtocol(reader, loop=loop)
     transport, _ = await loop.connect_read_pipe(lambda: protocol, ro)
     estack.callback(lambda t: t.close(), transport)
     return reader, wfd
Exemple #8
0
def _load_fetch_result(
    wf_module: WfModule, basedir: Path, exit_stack: contextlib.ExitStack
) -> Optional[FetchResult]:
    """
    Download user-selected StoredObject to `basedir`, so render() can read it.

    Edge cases:

    Create no file (and return `None`) if the user did not select a
    StoredObject, or if the selected StoredObject does not point to a file
    on minio.

    The caller should ensure "leave `path` alone" means "return an empty
    FetchResult". The FetchResult may still have an error.
    """
    try:
        stored_object = wf_module.stored_objects.get(
            stored_at=wf_module.stored_data_version
        )
    except StoredObject.DoesNotExist:
        return None
    if not stored_object.bucket or not stored_object.key:
        return None

    with contextlib.ExitStack() as inner_stack:
        path = inner_stack.enter_context(
            tempfile_context(prefix="fetch-result-", dir=basedir)
        )

        try:
            minio.download(stored_object.bucket, stored_object.key, path)
            # Download succeeded, so we no longer want to delete `path`
            # right _now_ ("now" means, "in inner_stack.close()"). Instead,
            # transfer ownership of `path` to exit_stack.
            exit_stack.callback(inner_stack.pop_all().close)
        except FileNotFoundError:
            # A few StoredObjects -- very old ones with size=0 -- are
            # *intentionally* not in minio. It turns out modules from that era
            # treated empty-file and None as identical. The _modules_ must
            # preserve that logic for backwards compatibility; so it's safe to
            # return `None` here.
            #
            # Other than that, if the file doesn't exist it's a race: either
            # the fetch result is too _new_ (it's in the database but its file
            # hasn't been written yet) or the fetch result is half-deleted (its
            # file was deleted and it's still in the database). In either case,
            # pretend the fetch result does not exist in the database -- i.e.,
            # return `None`.
            return None

    if wf_module.fetch_error:
        errors = [RenderError(I18nMessage.TODO_i18n(wf_module.fetch_error))]
    else:
        errors = []
    return FetchResult(path, errors)
Exemple #9
0
class TestBase(TestCase):
    def setUp(self):
        self.stack = ExitStack()
        self.addCleanup(self.stack.close)

        # Directory to be backed up
        self.backupdir = self.stack.enter_context(
            tempfile.TemporaryDirectory(), )
        # Directory to store the data files
        self.datadir = self.stack.enter_context(
            tempfile.TemporaryDirectory(), )

        # Create a repo object with a temporary database. We can't use sqlite
        # in-memory databases because the backup routine is multi-threaded
        # and all threads access the same database.
        tmpdb = tempfile.NamedTemporaryFile(delete=False)
        tmpdb.close()
        self.stack.callback(os.unlink, tmpdb.name)
        self.repo = Repository(tmpdb.name)

        self.repo.set_storage("local", {"base_dir": self.datadir})
        self.repo.set_compression(False)
        self.repo.set_encrypter(backathon.encryption.NullEncryption.init_new())
        self.repo.backup_inline_threshold = 0

        # Shortcut for a few managers to prevent lots of typing in the unit
        # tests
        self.db = self.repo.db
        self.fsentry = models.FSEntry.objects.using(self.db)
        self.object = models.Object.objects.using(self.db)
        self.snapshot = models.Snapshot.objects.using(self.db)
        self.obj_relation = models.ObjectRelation.objects.using(self.db)

        # Create the root of the backup set
        self.fsentry.create(path=self.backupdir)

    def tearDown(self):
        # You can't "close" an in-memory database in Django, so instead we
        # just delete it from the connection handler. The garbage collector
        # will hopefully free the resources, but the important thing is we get a
        # fresh database for each test
        import django.db
        del django.db.connections[self.repo.db]
        del django.db.connections.databases[self.repo.db]

    def path(self, *args):
        return os.path.join(self.backupdir, *args)

    def create_file(self, path, contents):
        assert not path.startswith("/")
        pathobj = pathlib.Path(self.path(path))
        if not pathobj.parent.exists():
            pathobj.parent.mkdir(parents=True)
        pathobj.write_text(contents, encoding="UTF-8")
        return pathobj
Exemple #10
0
class MultiPage:
    """
    Multi-page output, for formats that support it.

    Usage is similar to `matplotlib.backends.backend_pdf.PdfPages`::

        with MultiPage(path, metadata=...) as mp:
            mp.savefig(fig1)
            mp.savefig(fig2)

    Note that the only other method of `PdfPages` that is implemented is
    `close`, and that empty files are not created -- as if the *keep_empty*
    argument to `PdfPages` was always False.
    """

    def __init__(self, path_or_stream=None, format=None, *, metadata=None):
        self._stack = ExitStack()
        self._renderer = None

        def _make_renderer():
            stream = self._stack.enter_context(
                cbook.open_file_cm(path_or_stream, "wb"))
            fmt = (format
                   or Path(getattr(stream, "name", "")).suffix[1:]
                   or rcParams["savefig.format"]).lower()
            renderer_cls = {
                "pdf": GraphicsContextRendererCairo._for_pdf_output,
                "ps": GraphicsContextRendererCairo._for_ps_output,
            }[fmt]
            self._renderer = renderer_cls(stream, 1, 1, 1)
            self._stack.callback(self._renderer._finish)
            self._renderer._set_metadata(copy.copy(metadata))

        self._make_renderer = _make_renderer

    def savefig(self, figure, **kwargs):
        # FIXME[Upstream]: Not all kwargs are supported here -- but I plan to
        # deprecate them upstream.
        if self._renderer is None:
            self._make_renderer()
        figure.set_dpi(72)
        self._renderer._set_size(*figure.canvas.get_width_height(),
                                 kwargs.get("dpi", 72))
        with _LOCK:
            figure.draw(self._renderer)
        self._renderer._show_page()

    def close(self):
        return self._stack.__exit__(None, None, None)

    def __enter__(self):
        return self

    def __exit__(self, *args):
        return self._stack.__exit__(*args)
Exemple #11
0
    def _run_invocation(self, logger: logging.Logger, cleanup: ExitStack,
                        image: str) -> List[str]:
        """
        Formulate `podman run` command-line invocation
        """
        ans = ["podman"]
        if os.geteuid():
            ans = ["sudo", "-n"] + ans
            _sudo_canary()
        ans += [
            "run",
            "--rm",
            "--workdir",
            os.path.join(self.container_dir, "work"),
        ]

        cpu = self.runtime_values.get("cpu", 0)
        if cpu > 0:
            ans += ["--cpus", str(cpu)]
        memory_limit = self.runtime_values.get("memory_limit", 0)
        if memory_limit > 0:
            ans += ["--memory", str(memory_limit)]

        if self.cfg.get_bool("task_runtime", "as_user"):
            if os.geteuid() == 0:
                logger.warning(
                    "container command will run explicitly as root, since you are root and set --as-me"
                )
            ans += ["--user", f"{os.geteuid()}:{os.getegid()}"]

        if self.runtime_values.get("privileged", False) is True:
            logger.warning(
                "runtime.privileged enabled (security & portability warning)")
            ans.append("--privileged")

        mounts = self.prepare_mounts()
        logger.info(
            _(
                "podman invocation",
                args=" ".join(shlex.quote(s) for s in (ans + [image])),
                binds=len(mounts),
            ))
        for (container_path, host_path, writable) in mounts:
            if ":" in (container_path + host_path):
                raise InputError("Podman input filenames cannot contain ':'")
            ans.append("-v")
            bind_arg = f"{host_path}:{container_path}"
            if not writable:
                bind_arg += ":ro"
            ans.append(bind_arg)
        ans.append(image)

        cleanup.callback(lambda: self._chown(logger))
        return ans
 def setUp(self):
     resources = ExitStack()
     self.addCleanup(resources.close)
     var_dir = resources.enter_context(TemporaryDirectory())
     config.push(
         'template config', """\
     [paths.testing]
     var_dir: {}
     """.format(var_dir))
     resources.callback(config.pop, 'template config')
     self._mlist = create_list('*****@*****.**')
     self._loader = getUtility(ITemplateLoader)
     self._manager = getUtility(ITemplateManager)
class TestEgg(TestZip):
    def setUp(self):
        # Find the path to the example-*.egg so we can add it to the front of
        # sys.path, where we'll then try to find the metadata thereof.
        self.resources = ExitStack()
        self.addCleanup(self.resources.close)
        egg = self.resources.enter_context(
            path(self.root, "example-21.12-py3.6.egg"))
        sys.path.insert(0, str(egg))
        self.resources.callback(sys.path.pop, 0)

    def test_files(self):
        for file in files("example"):
            path = str(file.dist.locate_file(file))
            assert ".egg/" in path, path
 def setUp(self):
     resources = ExitStack()
     self.addCleanup(resources.close)
     self.var_dir = resources.enter_context(TemporaryDirectory())
     config.push('template config', """\
     [paths.testing]
     var_dir: {}
     """.format(self.var_dir))
     resources.callback(config.pop, 'template config')
     # Put a demo template in the site directory.
     path = os.path.join(self.var_dir, 'templates', 'site', 'en')
     os.makedirs(path)
     with open(os.path.join(path, 'demo.txt'), 'w') as fp:
         print('Test content', end='', file=fp)
     self._mlist = create_list('*****@*****.**')
Exemple #15
0
    def build_context(self, environ):
        """
        Start a request context.

        :param environ: A WSGI environment.
        :return: A context manager for the request. When the context
            manager exits, the request context variables are destroyed and
            all cleanup hooks are run.

        .. note:: This method is intended for internal use; Findig will
            call this method internally on its own. It is *not* re-entrant
            with a single request.

        """
        self.__run_startup_hooks()

        ctx.app = self
        ctx.url_adapter = adapter = self.url_map.bind_to_environ(environ)
        ctx.request = self.request_class(environ) # ALWAYS set this after adapter

        rule, url_values = adapter.match(return_rule=True)
        dispatcher = self #self.get_dispatcher(rule)

        # Set up context variables
        ctx.url_values = url_values
        ctx.dispatcher = dispatcher
        ctx.resource = dispatcher.get_resource(rule)

        context = ExitStack()
        context.callback(self.__cleanup)
        # Add all the application's context managers to
        # the exit stack. If any of them return a value,
        # we'll add the value to the application context
        # with the function name.
        for hook in self.context_hooks:
            retval = context.enter_context(hook())
            if retval is not None:
                setattr(ctx, hook.__name__, retval)
        return context
class TestZip(unittest.TestCase):
    root = "importlib_metadata.tests.data"

    def setUp(self):
        # Find the path to the example-*.whl so we can add it to the front of
        # sys.path, where we'll then try to find the metadata thereof.
        self.resources = ExitStack()
        self.addCleanup(self.resources.close)
        wheel = self.resources.enter_context(
            path(self.root, "example-21.12-py3-none-any.whl"))
        sys.path.insert(0, str(wheel))
        self.resources.callback(sys.path.pop, 0)

    def test_zip_version(self):
        self.assertEqual(version("example"), "21.12")

    def test_zip_version_does_not_match(self):
        with self.assertRaises(PackageNotFoundError):
            version("definitely-not-installed")

    def test_zip_entry_points(self):
        scripts = dict(entry_points()["console_scripts"])
        entry_point = scripts["example"]
        self.assertEqual(entry_point.value, "example:main")
        entry_point = scripts["Example"]
        self.assertEqual(entry_point.value, "example:main")

    def test_missing_metadata(self):
        self.assertIsNone(distribution("example").read_text("does not exist"))

    def test_case_insensitive(self):
        self.assertEqual(version("Example"), "21.12")

    def test_files(self):
        for file in files("example"):
            path = str(file.dist.locate_file(file))
            assert ".whl/" in path, path
Exemple #17
0
    def _run(self, stack: contextlib.ExitStack) -> int:
        """Run the operations on the local side."""
        events = EventQueue()

        # Set up stdout redirection where RPC token can be read from remote outrun
        out_reader, out_writer = os.pipe()
        token_thread = self._start_thread(self._run_token_skimmer, events,
                                          out_reader)
        stack.callback(token_thread.join, timeout=5.0)
        stack.callback(os.close, out_writer)

        # Start SSH session with redirected stdout
        # stderr is suppressed in TTY mode for undesired output like "connection closed"
        ssh_proc = self._start_ssh(out_writer, self._is_tty())
        ssh_thread = self._start_thread(self._watch_ssh, events, ssh_proc)
        stack.callback(ssh_thread.join, timeout=5.0)
        stack.callback(self._ignore_process_error(ssh_proc.terminate))

        # Wait for RPC token to be read
        try:
            token: str = events.expect(Event.TOKEN_READ)
        except UnexpectedEvent as e:
            if e.actual_event == Event.PROGRAM_EXIT:
                raise RuntimeError("remote outrun failed to start")
            else:
                raise e

        # Start services to expose local environment
        self._start_disposable_thread(self._run_environment_service, events,
                                      token)
        self._start_disposable_thread(self._run_filesystem_service, events,
                                      token)

        if self._args.cache:
            self._start_disposable_thread(self._run_cache_service, events,
                                          token)

        # Wait for program on remote to finish executing
        exit_code: int = events.expect(Event.PROGRAM_EXIT)

        return exit_code
Exemple #18
0
class TestZip(unittest.TestCase):
    def setUp(self):
        # Find the path to the example.*.whl so we can add it to the front of
        # sys.path, where we'll then try to find the metadata thereof.
        self.resources = ExitStack()
        self.addCleanup(self.resources.close)
        wheel = self.resources.enter_context(
            path('importlib_metadata.tests.data',
                 'example-21.12-py3-none-any.whl'))
        sys.path.insert(0, str(wheel))
        self.resources.callback(sys.path.pop, 0)

    def test_zip_version(self):
        self.assertEqual(importlib_metadata.version('example'), '21.12')

    def test_zip_entry_points(self):
        parser = importlib_metadata.entry_points('example')
        entry_point = parser.get('console_scripts', 'example')
        self.assertEqual(entry_point, 'example:main')

    def test_not_a_zip(self):
        # For coverage purposes, this module is importable, but has neither a
        # location on the file system, nor a .archive attribute.
        sys.modules['bespoke'] = ModuleType('bespoke')
        self.resources.callback(sys.modules.pop, 'bespoke')
        self.assertRaises(ImportError, importlib_metadata.version, 'bespoke')

    def test_unversioned_dist_info(self):
        # For coverage purposes, give the module an unversioned .archive
        # attribute.
        bespoke = sys.modules['bespoke'] = ModuleType('bespoke')
        bespoke.__loader__ = BespokeLoader()
        self.resources.callback(sys.modules.pop, 'bespoke')
        self.assertRaises(ImportError, importlib_metadata.version, 'bespoke')

    def test_missing_metadata(self):
        distribution = importlib_metadata.distribution('example')
        self.assertIsNone(distribution.read_text('does not exist'))
Exemple #19
0
    class TestCase(unittest.TestCase):
        def setUp(self):
            super(TestCase, self).setUp()
            self.__all_cleanups = ExitStack()

        def tearDown(self):
            self.__all_cleanups.close()
            unittest.TestCase.tearDown(self)

        def addCleanup(self, function, *args, **kws):
            self.__all_cleanups.callback(function, *args, **kws)

        def assertIs(self, expr1, expr2, msg=None):
            if expr1 is not expr2:
                standardMsg = '%r is not %r' % (expr1, expr2)
                self.fail(self._formatMessage(msg, standardMsg))

        def assertIn(self, member, container, msg=None):
            if member not in container:
                standardMsg = '%r not found in %r' % (member, container)
                self.fail(self._formatMessage(msg, standardMsg))

        def assertNotIn(self, member, container, msg=None):
            if member in container:
                standardMsg = '%r unexpectedly found in %r'
                standardMsg = standardMsg % (member, container)
                self.fail(self._formatMessage(msg, standardMsg))

        def assertIsNone(self, value, msg=None):
            if value is not None:
                standardMsg = '%r is not None'
                standardMsg = standardMsg % (value)
                self.fail(self._formatMessage(msg, standardMsg))

        def assertIsInstance(self, obj, cls, msg=None):
            """Same as self.assertTrue(isinstance(obj, cls)), with a nicer
            default message."""
            if not isinstance(obj, cls):
                standardMsg = '%s is not an instance of %r' % (repr(obj), cls)
                self.fail(self._formatMessage(msg, standardMsg))

        def assertDictContainsSubset(self, expected, actual, msg=None):
            missing = []
            mismatched = []
            for k, v in expected.items():
                if k not in actual:
                    missing.append(k)
                elif actual[k] != v:
                    mismatched.append('%r, expected: %r, actual: %r'
                                      % (k, v, actual[k]))

            if len(missing) == 0 and len(mismatched) == 0:
                return

            standardMsg = ''
            if missing:
                standardMsg = 'Missing: %r' % ','.join(m for m in missing)
            if mismatched:
                if standardMsg:
                    standardMsg += '; '
                standardMsg += 'Mismatched values: %s' % ','.join(mismatched)

            self.fail(self._formatMessage(msg, standardMsg))
class KiwoomOpenApiPlusEventHandler(KiwoomOpenApiPlusEventHandlerFunctions):
    def __init__(self, control):
        self._control = control
        self._observer = QueueBasedIterableObserver()
        self._enter_count = 0
        self._should_exit = False
        self._stack = ExitStack()
        self._lock = RLock()

    @property
    def control(self):
        return self._control

    @property
    def observer(self):
        return self._observer

    @classmethod
    def names(cls):
        names = [
            name for name in dir(KiwoomOpenApiPlusEventHandlerFunctions)
            if name.startswith("On")
        ]
        return names

    def slots(self):
        names = self.names()
        slots = [getattr(self, name) for name in names]
        names_and_slots_implemented = [(name, slot)
                                       for name, slot in zip(names, slots)
                                       if isimplemented(slot)]
        return names_and_slots_implemented

    def connect(self):
        for name, slot in self.slots():
            getattr(self.control, name).connect(slot)

    def disconnect(self):
        for name, slot in self.slots():
            getattr(self.control, name).disconnect(slot)

    def on_enter(self):
        pass

    def on_exit(self, exc_type=None, exc_value=None, traceback=None):
        pass

    def add_callback(self, callback, *args, **kwargs):
        self._stack.callback(callback, *args, **kwargs)

    def enter(self):
        with self._lock:
            self.connect()
            self.on_enter()
            self._should_exit = True

    def exit(self, exc_type=None, exc_value=None, traceback=None):
        with self._lock:
            if self._should_exit:
                self.disconnect()
                self.on_exit(exc_type, exc_value, traceback)
                self._stack.__exit__(exc_type, exc_value, traceback)
                self._should_exit = False

    def stop(self):
        return self.observer.stop()

    def close(self):
        self.exit()
        self.stop()

    def __enter__(self):
        with self._lock:
            if self._enter_count == 0:
                self.enter()
            self._enter_count += 1
            return self

    def __exit__(self, exc_type, exc_value, traceback):
        if exc_type is not None:
            return self.exit(exc_type, exc_value, traceback)
        with self._lock:
            if self._enter_count > 0:
                self._enter_count -= 1
            if self._enter_count == 0:
                return self.exit(exc_type, exc_value, traceback)
        return

    def __iter__(self):
        with self:
            return iter(self.observer)
Exemple #21
0
class TestMain(TestCase):
    def setUp(self):
        super().setUp()
        self._resources = ExitStack()
        self.addCleanup(self._resources.close)
        # Capture builtin print() output.
        self._stdout = StringIO()
        self._stderr = StringIO()
        self._resources.enter_context(
            patch('argparse._sys.stdout', self._stdout))
        # Capture stderr since this is where argparse will spew to.
        self._resources.enter_context(
            patch('argparse._sys.stderr', self._stderr))

    def test_help(self):
        with self.assertRaises(SystemExit) as cm:
            main(('--help', ))
        self.assertEqual(cm.exception.code, 0)
        lines = self._stdout.getvalue().splitlines()
        self.assertTrue(lines[0].startswith('Usage'), lines[0])
        self.assertTrue(lines[1].startswith('  ubuntu-image'), lines[1])

    def test_debug(self):
        with ExitStack() as resources:
            mock = resources.enter_context(
                patch('ubuntu_image.__main__.logging.basicConfig'))
            resources.enter_context(
                patch('ubuntu_image.__main__.ModelAssertionBuilder',
                      EarlyExitModelAssertionBuilder))
            # Prevent actual main() from running.
            resources.enter_context(patch('ubuntu_image.__main__.main'))
            code = main(('--debug', 'model.assertion'))
        self.assertEqual(code, 0)
        mock.assert_called_once_with(level=logging.DEBUG)

    def test_no_debug(self):
        with ExitStack() as resources:
            mock = resources.enter_context(
                patch('ubuntu_image.__main__.logging.basicConfig'))
            resources.enter_context(
                patch('ubuntu_image.__main__.ModelAssertionBuilder',
                      EarlyExitModelAssertionBuilder))
            # Prevent actual main() from running.
            resources.enter_context(patch('ubuntu_image.__main__.main'))
            code = main(('model.assertion', ))
        self.assertEqual(code, 0)
        mock.assert_not_called()

    def test_state_machine_exception(self):
        with ExitStack() as resources:
            resources.enter_context(
                patch('ubuntu_image.__main__.ModelAssertionBuilder',
                      CrashingModelAssertionBuilder))
            mock = resources.enter_context(
                patch('ubuntu_image.__main__._logger.exception'))
            code = main(('model.assertion', ))
            self.assertEqual(code, 1)
            self.assertEqual(mock.call_args_list[-1],
                             call('Crash in state machine'))

    def test_state_machine_snap_command_fails(self):
        # The `snap prepare-image` command fails and main exits with non-zero.
        #
        # This tests needs to run the actual snap() helper function, not
        # the testsuite-wide mock.  This is appropriate since we're
        # mocking it ourselves here.
        if NosePlugin.snap_mocker is not None:
            NosePlugin.snap_mocker.patcher.stop()
            self._resources.callback(NosePlugin.snap_mocker.patcher.start)
        self._resources.enter_context(
            patch('ubuntu_image.helpers.subprocess_run',
                  return_value=SimpleNamespace(
                      returncode=1,
                      stdout='command stdout',
                      stderr='command stderr',
                      check_returncode=check_returncode,
                  )))
        self._resources.enter_context(LogCapture())
        self._resources.enter_context(
            patch('ubuntu_image.__main__.ModelAssertionBuilder',
                  XXXModelAssertionBuilder))
        workdir = self._resources.enter_context(TemporaryDirectory())
        imgfile = os.path.join(workdir, 'my-disk.img')
        code = main(
            ('--until', 'prepare_filesystems', '--channel', 'edge',
             '--workdir', workdir, '--output', imgfile, 'model.assertion'))
        self.assertEqual(code, 1)

    def test_no_arguments(self):
        with self.assertRaises(SystemExit) as cm:
            main(())
        self.assertEqual(cm.exception.code, 2)
        lines = self._stderr.getvalue().splitlines()
        self.assertTrue(
            lines[0].startswith('Warning: for backwards compatibility'),
            lines[0])
        self.assertTrue(lines[1], 'Usage:')
        self.assertEqual(lines[2], '  ubuntu-image COMMAND [OPTIONS]...')

    def test_with_none(self):
        with self.assertRaises(SystemExit) as cm:
            main((None))  # code coverage __main__.py 308-309
        self.assertEqual(cm.exception.code, 2)

    def test_snap_subcommand_help(self):
        with self.assertRaises(SystemExit) as cm:
            main((
                'snap',
                '--help',
            ))
        self.assertEqual(cm.exception.code, 0)
        lines = self._stdout.getvalue().splitlines()
        self.assertTrue(lines[0].startswith('usage: ubuntu-image snap'),
                        lines[0])

    def test_classic_subcommand_help(self):
        with self.assertRaises(SystemExit) as cm:
            main((
                'classic',
                '--help',
            ))
        self.assertEqual(cm.exception.code, 0)
        lines = self._stdout.getvalue().splitlines()
        self.assertTrue(lines[0].startswith('usage: ubuntu-image classic'),
                        lines[0])
class TestMain(TestCase):
    def setUp(self):
        super().setUp()
        self._resources = ExitStack()
        self.addCleanup(self._resources.close)
        # Capture builtin print() output.
        self._stdout = StringIO()
        self._stderr = StringIO()
        self._resources.enter_context(
            patch('argparse._sys.stdout', self._stdout))
        # Capture stderr since this is where argparse will spew to.
        self._resources.enter_context(
            patch('argparse._sys.stderr', self._stderr))

    def test_help(self):
        with self.assertRaises(SystemExit) as cm:
            main(('--help',))
        self.assertEqual(cm.exception.code, 0)
        lines = self._stdout.getvalue().splitlines()
        self.assertTrue(lines[0].startswith('Usage'),
                        lines[0])
        self.assertTrue(lines[1].startswith('  ubuntu-image'),
                        lines[1])

    def test_debug(self):
        with ExitStack() as resources:
            mock = resources.enter_context(
                patch('ubuntu_image.__main__.logging.basicConfig'))
            resources.enter_context(patch(
                'ubuntu_image.__main__.ModelAssertionBuilder',
                EarlyExitModelAssertionBuilder))
            # Prevent actual main() from running.
            resources.enter_context(patch('ubuntu_image.__main__.main'))
            code = main(('--debug', 'model.assertion'))
        self.assertEqual(code, 0)
        mock.assert_called_once_with(level=logging.DEBUG)

    def test_no_debug(self):
        with ExitStack() as resources:
            mock = resources.enter_context(
                patch('ubuntu_image.__main__.logging.basicConfig'))
            resources.enter_context(patch(
                'ubuntu_image.__main__.ModelAssertionBuilder',
                EarlyExitModelAssertionBuilder))
            # Prevent actual main() from running.
            resources.enter_context(patch('ubuntu_image.__main__.main'))
            code = main(('model.assertion',))
        self.assertEqual(code, 0)
        mock.assert_not_called()

    def test_state_machine_exception(self):
        with ExitStack() as resources:
            resources.enter_context(patch(
                'ubuntu_image.__main__.ModelAssertionBuilder',
                CrashingModelAssertionBuilder))
            mock = resources.enter_context(patch(
                'ubuntu_image.__main__._logger.exception'))
            code = main(('model.assertion',))
            self.assertEqual(code, 1)
            self.assertEqual(
                mock.call_args_list[-1], call('Crash in state machine'))

    def test_state_machine_snap_command_fails(self):
        # The `snap prepare-image` command fails and main exits with non-zero.
        #
        # This tests needs to run the actual snap() helper function, not
        # the testsuite-wide mock.  This is appropriate since we're
        # mocking it ourselves here.
        if NosePlugin.snap_mocker is not None:
            NosePlugin.snap_mocker.patcher.stop()
            self._resources.callback(NosePlugin.snap_mocker.patcher.start)
        self._resources.enter_context(patch(
            'ubuntu_image.helpers.subprocess_run',
            return_value=SimpleNamespace(
                returncode=1,
                stdout='command stdout',
                stderr='command stderr',
                check_returncode=check_returncode,
                )))
        self._resources.enter_context(LogCapture())
        self._resources.enter_context(patch(
            'ubuntu_image.__main__.ModelAssertionBuilder',
            XXXModelAssertionBuilder))
        workdir = self._resources.enter_context(TemporaryDirectory())
        imgfile = os.path.join(workdir, 'my-disk.img')
        code = main(('--until', 'prepare_filesystems',
                     '--channel', 'edge',
                     '--workdir', workdir,
                     '--output', imgfile,
                     'model.assertion'))
        self.assertEqual(code, 1)

    def test_no_arguments(self):
        with self.assertRaises(SystemExit) as cm:
            main(())
        self.assertEqual(cm.exception.code, 2)
        lines = self._stderr.getvalue().splitlines()
        self.assertTrue(
                lines[0].startswith('Warning: for backwards compatibility'),
                lines[0])
        self.assertTrue(lines[1], 'Usage:')
        self.assertEqual(
                lines[2],
                '  ubuntu-image COMMAND [OPTIONS]...')

    def test_with_none(self):
        with self.assertRaises(SystemExit) as cm:
            main((None))    # code coverage __main__.py 308-309
        self.assertEqual(cm.exception.code, 2)

    def test_snap_subcommand_help(self):
        with self.assertRaises(SystemExit) as cm:
            main(('snap', '--help',))
        self.assertEqual(cm.exception.code, 0)
        lines = self._stdout.getvalue().splitlines()
        self.assertTrue(
              lines[0].startswith('usage: ubuntu-image snap'),
              lines[0])

    def test_classic_subcommand_help(self):
        with self.assertRaises(SystemExit) as cm:
            main(('classic', '--help',))
        self.assertEqual(cm.exception.code, 0)
        lines = self._stdout.getvalue().splitlines()
        self.assertTrue(
              lines[0].startswith('usage: ubuntu-image classic'),
              lines[0])
Exemple #23
0
class TestMain(unittest.TestCase):
    def setUp(self):
        old_log_level = log.getEffectiveLevel()
        self.addCleanup(log.setLevel, old_log_level)
        self.resources = ExitStack()
        # Create a new event loop, and arrange for that loop to end almost
        # immediately.  This will allow the calls to main() in these tests to
        # also exit almost immediately.  Otherwise, the foreground test
        # process will hang.
        #
        # I think this introduces a race condition.  It depends on whether the
        # call_later() can possibly run before the run_forever() does, or could
        # cause it to not complete all its tasks.  In that case, you'd likely
        # get an error or warning on stderr, which may or may not cause the
        # test to fail.  I've only seen this happen once and don't have enough
        # information to know for sure.
        default_loop = asyncio.get_event_loop()
        loop = asyncio.new_event_loop()
        loop.call_later(0.1, loop.stop)
        self.resources.callback(asyncio.set_event_loop, default_loop)
        asyncio.set_event_loop(loop)
        self.addCleanup(self.resources.close)

    @unittest.skipIf(pwd is None, 'No pwd module available')
    def test_setuid(self):
        with patch('os.setuid') as mock:
            main(args=())
            mock.assert_called_with(pwd.getpwnam('nobody').pw_uid)

    @unittest.skipIf(pwd is None, 'No pwd module available')
    def test_setuid_permission_error(self):
        mock = self.resources.enter_context(
            patch('os.setuid', side_effect=PermissionError))
        stderr = StringIO()
        self.resources.enter_context(patch('sys.stderr', stderr))
        with self.assertRaises(SystemExit) as cm:
            main(args=())
        self.assertEqual(cm.exception.code, 1)
        mock.assert_called_with(pwd.getpwnam('nobody').pw_uid)
        self.assertEqual(
            stderr.getvalue(),
            'Cannot setuid "nobody"; try running with -n option.\n')

    @unittest.skipIf(pwd is None, 'No pwd module available')
    def test_setuid_no_pwd_module(self):
        self.resources.enter_context(patch('aiosmtpd.main.pwd', None))
        stderr = StringIO()
        self.resources.enter_context(patch('sys.stderr', stderr))
        with self.assertRaises(SystemExit) as cm:
            main(args=())
        self.assertEqual(cm.exception.code, 1)
        self.assertEqual(
            stderr.getvalue(),
            'Cannot import module "pwd"; try running with -n option.\n')

    @unittest.skipUnless(has_setuid, 'setuid is unvailable')
    def test_n(self):
        self.resources.enter_context(patch('aiosmtpd.main.pwd', None))
        self.resources.enter_context(
            patch('os.setuid', side_effect=PermissionError))
        # Just to short-circuit the main() function.
        self.resources.enter_context(
            patch('aiosmtpd.main.partial', side_effect=RuntimeError))
        # Getting the RuntimeError means that a SystemExit was never
        # triggered in the setuid section.
        self.assertRaises(RuntimeError, main, ('-n',))

    @unittest.skipUnless(has_setuid, 'setuid is unvailable')
    def test_nosetuid(self):
        self.resources.enter_context(patch('aiosmtpd.main.pwd', None))
        self.resources.enter_context(
            patch('os.setuid', side_effect=PermissionError))
        # Just to short-circuit the main() function.
        self.resources.enter_context(
            patch('aiosmtpd.main.partial', side_effect=RuntimeError))
        # Getting the RuntimeError means that a SystemExit was never
        # triggered in the setuid section.
        self.assertRaises(RuntimeError, main, ('--nosetuid',))

    def test_debug_0(self):
        # For this test, the runner will have already set the log level so it
        # may not be logging.ERROR.
        log = logging.getLogger('mail.log')
        default_level = log.getEffectiveLevel()
        with patch.object(log, 'info'):
            main(('-n',))
            self.assertEqual(log.getEffectiveLevel(), default_level)

    def test_debug_1(self):
        # Mock the logger to eliminate console noise.
        with patch.object(logging.getLogger('mail.log'), 'info'):
            main(('-n', '-d'))
            self.assertEqual(log.getEffectiveLevel(), logging.INFO)

    def test_debug_2(self):
        # Mock the logger to eliminate console noise.
        with patch.object(logging.getLogger('mail.log'), 'info'):
            main(('-n', '-dd'))
            self.assertEqual(log.getEffectiveLevel(), logging.DEBUG)

    def test_debug_3(self):
        # Mock the logger to eliminate console noise.
        with patch.object(logging.getLogger('mail.log'), 'info'):
            main(('-n', '-ddd'))
            self.assertEqual(log.getEffectiveLevel(), logging.DEBUG)
            self.assertTrue(asyncio.get_event_loop().get_debug())
Exemple #24
0
class Controller:
    """Start and stop D-Bus service under test."""

    MODULE = 'systemimage.testing.service'

    def __init__(self, logfile=None, loglevel='info'):
        self.loglevel = loglevel
        # Non-public.
        self._stack = ExitStack()
        self._stoppers = []
        # Public.
        self.tmpdir = self._stack.enter_context(temporary_directory())
        self.config_path = os.path.join(self.tmpdir, 'dbus-system.conf')
        self.serverdir = self._stack.enter_context(temporary_directory())
        self.daemon_pid = None
        self.mode = 'live'
        self.udm_certs = ''
        self.curl_cert = ''
        self.patcher = None
        # Set up the dbus-daemon system configuration file.
        path = data_path('dbus-system.conf.in')
        with open(path, 'r', encoding='utf-8') as fp:
            template = fp.read()
        username = pwd.getpwuid(os.getuid()).pw_name
        config = template.format(tmpdir=self.tmpdir, user=username)
        with open(self.config_path, 'w', encoding='utf-8') as fp:
            fp.write(config)
        # We need a client.ini file for the subprocess.
        self.ini_tmpdir = self._stack.enter_context(temporary_directory())
        self.ini_vardir = self._stack.enter_context(temporary_directory())
        self.ini_logfile = (os.path.join(self.ini_tmpdir, 'client.log')
                            if logfile is None
                            else logfile)
        self.ini_path = os.path.join(self.tmpdir, 'config.d')
        makedirs(self.ini_path)
        self._reset_configs()

    def _reset_configs(self):
        for filename in os.listdir(self.ini_path):
            if filename.endswith('.ini'):
                os.remove(os.path.join(self.ini_path, filename))
        template = resource_bytes(
            'systemimage.tests.data', '01.ini').decode('utf-8')
        defaults = os.path.join(self.ini_path, '00_defaults.ini')
        with open(defaults, 'w', encoding='utf-8') as fp:
            print(template.format(tmpdir=self.ini_tmpdir,
                                  vardir=self.ini_vardir,
                                  logfile=self.ini_logfile,
                                  loglevel=self.loglevel),
                  file=fp)

    def _configure_services(self):
        self.stop_children()
        # Now we have to set up the .service files.  We use the Python
        # executable used to run the tests, executing the entry point as would
        # happen in a deployed script or virtualenv.
        for service, command_template, starter, stopper in SERVICES:
            command = command_template.format(python=sys.executable, self=self)
            service_file = service + '.service'
            path = data_path(service_file + '.in')
            with open(path, 'r', encoding='utf-8') as fp:
                template = fp.read()
            config = template.format(command=command)
            service_path = os.path.join(self.tmpdir, service_file)
            with open(service_path, 'w', encoding='utf-8') as fp:
                fp.write(config)
            self._stoppers.append(stopper)
        # If the dbus-daemon is running, reload its configuration files.
        if self.daemon_pid is not None:
            wait_for_service()

    def _set_udm_certs(self, cert_pem, certificate_path):
        self.udm_certs = (
            '' if cert_pem is None
            else '-self-signed-certs ' + certificate_path)

    def _set_curl_certs(self, cert_pem, certificate_path):
        # We have to set up the PyCURL downloader's self-signed certificate for
        # the test in two ways.  First, because we might be spawning the D-Bus
        # service, we have to pass the path to the cert to that service...
        self.curl_cert = (
            '' if cert_pem is None
            else '--self-signed-cert ' + certificate_path)
        # ...but the controller is also used to set the mode for foreground
        # tests, such as test_download.py.  Here we don't spawn any D-Bus
        # processes, but we still have to mock make_testable() in curl.py so
        # that the PyCURL object accepts the self-signed cert.
        if self.patcher is not None:
            self.patcher.stop()
            self.patcher = None
        if cert_pem is not None:
            def self_sign(c):
                c.setopt(pycurl.CAINFO, certificate_path)
            self.patcher = patch('systemimage.curl.make_testable', self_sign)
            self.patcher.start()

    def set_mode(self, *, cert_pem=None, service_mode=''):
        self.mode = service_mode
        certificate_path = data_path(cert_pem)
        if USING_PYCURL:
            self._set_curl_certs(cert_pem, certificate_path)
        else:
            self._set_udm_certs(cert_pem, certificate_path)
        self._reset_configs()
        self._configure_services()

    def _start(self):
        """Start the SystemImage service in a subprocess.

        Use the output from dbus-daemon to gather the address and pid of the
        service in the subprocess.  We'll use those in the foreground process
        to talk to our test instance of the service (rather than any similar
        service running normally on the development desktop).
        """
        daemon_exe = find_executable('dbus-daemon')
        if daemon_exe is None:
            print('Cannot find the `dbus-daemon` executable', file=sys.stderr)
            return
        os.environ['DBUS_VERBOSE'] = '1'
        dbus_args = [
            daemon_exe,
            #'/usr/lib/x86_64-linux-gnu/dbus-1.0/debug-build/bin/dbus-daemon',
            '--fork',
            '--config-file=' + str(self.config_path),
            # Return the address and pid on stdout.
            '--print-address=1',
            '--print-pid=1',
            ]
        stdout = subprocess.check_output(dbus_args, bufsize=4096,
                                         universal_newlines=True)
        lines = stdout.splitlines()
        dbus_address = lines[0].strip()
        self.daemon_pid = int(lines[1].strip())
        #print('DBUS_LAUNCH PID:', self.daemon_pid)
        self._stack.callback(self._kill, self.daemon_pid)
        #print("DBUS_SYSTEM_BUS_ADDRESS='{}'".format(dbus_address))
        # Set the service's address into the environment for rendezvous.
        self._stack.enter_context(reset_envar('DBUS_SYSTEM_BUS_ADDRESS'))
        os.environ['DBUS_SYSTEM_BUS_ADDRESS'] = dbus_address
        # Try to start the DBus services.
        for service, command_template, starter, stopper in SERVICES:
            starter(self)

    def start(self):
        if self.daemon_pid is not None:
            # Already started.
            return
        try:
            self._configure_services()
            self._start()
        except:
            self._stack.close()
            raise

    def stop_children(self):
        # If the dbus-daemon is already running, kill all the children.
        if self.daemon_pid is not None:
            for stopper in self._stoppers:
                stopper(self)
        del self._stoppers[:]

    def _kill(self, pid):
        self.stop_children()
        process = psutil.Process(pid)
        process.terminate()
        process.wait(60)
        self.daemon_pid = None

    def stop(self):
        self._stack.close()
    def validate(self, data):
        Schema = self.__class__
        s = self._schema
        e = self._error
        i = self._ignore_extra_keys
        flavor = _priority(s)
        if flavor == ITERABLE:
            data = Schema(type(s), error=e).validate(data)
            o = Or(*s, error=e, schema=Schema, ignore_extra_keys=i)
            return type(data)(o.validate(d) for d in data)
        if flavor == DICT:
            exitstack = ExitStack()
            data = Schema(dict, error=e).validate(data)
            new = type(data)()  # new - is a dict of the validated values
            coverage = set()  # matched schema keys
            # for each key and value find a schema entry matching them, if any
            sorted_skeys = sorted(s, key=self._dict_key_priority)
            for skey in sorted_skeys:
                if hasattr(skey, "reset"):
                    exitstack.callback(skey.reset)

            with exitstack:
                # Evaluate dictionaries last
                data_items = sorted(data.items(), key=lambda value: isinstance(value[1], dict))
                for key, value in data_items:
                    for skey in sorted_skeys:
                        svalue = s[skey]
                        try:
                            nkey = Schema(skey, error=e).validate(key)
                        except SchemaError:
                            pass
                        else:
                            if isinstance(skey, Hook):
                                # As the content of the value makes little sense for
                                # keys with a hook, we reverse its meaning:
                                # we will only call the handler if the value does match
                                # In the case of the forbidden key hook,
                                # we will raise the SchemaErrorForbiddenKey exception
                                # on match, allowing for excluding a key only if its
                                # value has a certain type, and allowing Forbidden to
                                # work well in combination with Optional.
                                try:
                                    nvalue = Schema(svalue, error=e).validate(value)
                                except SchemaError:
                                    continue
                                skey.handler(nkey, data, e)
                            else:
                                try:
                                    nvalue = Schema(svalue, error=e, ignore_extra_keys=i).validate(value)
                                except SchemaError as x:
                                    k = "Key '%s' error:" % nkey
                                    raise SchemaError([k] + x.autos, [e] + x.errors)
                                else:
                                    new[nkey] = nvalue
                                    coverage.add(skey)
                                    break
            required = set(k for k in s if not self._is_optional_type(k))
            if not required.issubset(coverage):
                missing_keys = required - coverage
                s_missing_keys = ", ".join(repr(k) for k in sorted(missing_keys, key=repr))
                raise SchemaMissingKeyError("Missing key%s: %s" % (_plural_s(missing_keys), s_missing_keys), e)
            if not self._ignore_extra_keys and (len(new) != len(data)):
                wrong_keys = set(data.keys()) - set(new.keys())
                s_wrong_keys = ", ".join(repr(k) for k in sorted(wrong_keys, key=repr))
                raise SchemaWrongKeyError(
                    "Wrong key%s %s in %r" % (_plural_s(wrong_keys), s_wrong_keys, data), e.format(data) if e else None
                )

            # Apply default-having optionals that haven't been used:
            defaults = set(k for k in s if type(k) is Optional and hasattr(k, "default")) - coverage
            for default in defaults:
                new[default.key] = default.default() if callable(default.default) else default.default

            return new
        if flavor == TYPE:
            if isinstance(data, s) and not (isinstance(data, bool) and s == int):
                return data
            else:
                raise SchemaUnexpectedTypeError(
                    "%r should be instance of %r" % (data, s.__name__), e.format(data) if e else None
                )
        if flavor == VALIDATOR:
            try:
                return s.validate(data)
            except SchemaError as x:
                raise SchemaError([None] + x.autos, [e] + x.errors)
            except BaseException as x:
                raise SchemaError(
                    "%r.validate(%r) raised %r" % (s, data, x), self._error.format(data) if self._error else None
                )
        if flavor == CALLABLE:
            f = _callable_str(s)
            try:
                if s(data):
                    return data
            except SchemaError as x:
                raise SchemaError([None] + x.autos, [e] + x.errors)
            except BaseException as x:
                raise SchemaError("%s(%r) raised %r" % (f, data, x), self._error.format(data) if self._error else None)
            raise SchemaError("%s(%r) should evaluate to True" % (f, data), e)
        if s == data:
            return data
        else:
            raise SchemaError("%r does not match %r" % (s, data), e.format(data) if e else None)
Exemple #26
0
class Command(command):
    """
	Add pallets to this machine's pallet directory. This command copies all
	files in ISOs or paths recognized by stacki to be pallets to the local
	machine. The default location is a directory under /export/stack/pallets.
	See also the 'probepal' utility to ascertain how, if at all, stacki will
	recognize it.

	<arg optional='1' type='string' name='pallet' repeat='1'>
	A list of pallets to add to the local machine. If no list is supplied
	stacki will check if a pallet is mounted on /mnt/cdrom, and if so copy it
	to the local machine. If the pallet is hosted on the internet, it will
	be downloaded to a temporary directory before being added.  All temporary
	files and mounts will be cleaned up, with the exception of /mnt/cdrom.
	</arg>

	<param type='string' name='username'>
	A username that will be used for authenticating to any remote pallet locations
	</param>

	<param type='string' name='password'>
	A password that will be used for authenticating to any remote pallet locations
	</param>
		
	<param type='bool' name='clean'>
	If set, then remove all files from any existing pallets of the same
	name, version, and architecture before copying the contents of the
	pallets onto the local disk.
	</param>

	<param type='string' name='dir'>
	The base directory to copy the pallet to.
	The default is: /export/stack/pallets.
	</param>

	<param type='string' name='updatedb'>
	Add the pallet info to the cluster database.
	The default is: true.
	</param>
	
	<example cmd='add pallet clean=true kernel*iso'>
	Adds the Kernel pallet to local pallet directory.  Before the pallet is
	added the old Kernel pallet packages are removed from the pallet
	directory.
	</example>
	
	<example cmd='add pallet kernel*iso https://10.0.1.3/pallets/'>
	Added the Kernel pallet along with any pallets found at the remote server
	 to the local pallet directory.
	</example>

	<related>remove pallet</related>
	<related>enable pallet</related>
	<related>disable pallet</related>
	<related>list pallet</related>
	<related>create pallet</related>
	<related>create new pallet</related>
	"""
    def write_pallet_xml(self, stacki_pallet_root, pallet_info):
        '''
		Create a roll-*.xml file compatible with the rest of stacki's tooling
		note: if we copied an existing roll-*.xml, don't overwrite it here as it may have
		more metadata
		'''
        destdir = pathlib.Path(stacki_pallet_root).joinpath(
            *info_getter(pallet_info))
        name, version, release, distro_family, arch = info_getter(pallet_info)

        if destdir.joinpath(f'roll-{name}.xml').exists():
            return

        with open(f'{destdir}/roll-{name}.xml', 'w') as xml:
            xml.write(
                dedent(f'''\
			<roll name="{name}" interface="6.0.2">
			<info version="{version}" release="{release}" arch="{arch}" os="{distro_family}"/>
			<iso maxsize="0" addcomps="0" bootable="0"/>
			<rpm rolls="0" bin="1" src="0"/>
			</roll>
			'''))

    def copy(self, stacki_pallet_root, pallet_info, clean):
        '''
		Copy a pallet to the local filesystem

		Specifically, rsync from `pallet_info.pallet_root` to
		`stacki_pallet_root`/name/version/release/os/arch/
		'''
        pallet_dir = pallet_info.pallet_root
        destdir = pathlib.Path(stacki_pallet_root).joinpath(
            *info_getter(pallet_info))

        if destdir.exists() and clean:
            print(
                f'Cleaning {"-".join(info_getter(pallet_info))} from pallets directory'
            )
            shutil.rmtree(destdir)

        print(f'Copying {"-".join(info_getter(pallet_info))} ...')

        if not destdir.exists():
            destdir.mkdir(parents=True, exist_ok=True)

        # use rsync to perform the copy
        # archive implies
        # --recursive,
        # --links - copy symlinks as symlinks
        # --perms - preserve permissions
        # --times - preserve mtimes
        # --group - preserve group
        # --owner - preserve owner
        # --devices - preserve device files
        # --specials - preserve special files
        # we then overwrite the permissions to make apache happy.
        cmd = f'rsync --archive --chmod=D755 --chmod=F644 --exclude "TRANS.TBL" {pallet_dir}/ {destdir}/'
        result = self._exec(cmd, shlexsplit=True)
        if result.returncode != 0:
            raise CommandError(self,
                               f'Unable to copy pallet:\n{result.stderr}')

        return destdir

    def update_db(self, pallet_info, URL):
        """
		Insert the pallet information into the database if not already present.
		"""

        if self.db.count(
                '(ID) from rolls where name=%s and version=%s and rel=%s and os=%s and arch=%s',
                info_getter(pallet_info)) == 0:
            self.db.execute(
                """
				insert into rolls(name, version, rel, os, arch, URL)
				values (%s, %s, %s, %s, %s, %s)
				""", (*info_getter(pallet_info), URL))

    def mount(self, iso_name, mount_point):
        '''
		mount `iso_name` to `mount_point`
		we automatically register an unmount callback
		'''

        # mount readonly explicitly to get around a weird behavior
        # in sles12 that prevents re-mounting an already mounted iso
        proc = self._exec(
            f'mount --read-only -o loop {iso_name} {mount_point}',
            shlexsplit=True)
        if proc.returncode != 0:
            msg = f'Pallet could not be added - unable to mount {iso_name}.'
            msg += f'\nTried: {" ".join(str(arg) for arg in proc.args)}'
            raise CommandError(self, f'{msg}\n{proc.stdout}\n{proc.stderr}')
        self.deferred.callback(self.umount, iso_name, mount_point)

    def umount(self, iso_name, mount_point):
        '''
		un-mount `mount_point`, first checking to see if it is actually mounted
		'''

        proc = self._exec(f'mount | grep {mount_point}', shell=True)
        if proc.returncode == 1 and proc.stdout.strip() == '':
            return
        proc = self._exec(f'umount {mount_point}', shlexsplit=True)
        if proc.returncode != 0:
            msg = f'Pallet could not be unmounted from {mount_point} ({iso_name}).'
            msg += f'\nTried: {" ".join(str(arg) for arg in proc.args)}'
            raise CommandError(self, f'{msg}\n{proc.stdout}\n{proc.stderr}')

    def patch_pallet(self, pallet_info):
        '''
		Run any available pallet patches
		'''

        pallet_patch_dir = '-'.join(info_getter(pallet_info))
        patch_dir = pathlib.Path(
            f'/opt/stack/pallet-patches/{pallet_patch_dir}')
        print(f'checking for patches in {patch_dir}')
        if not patch_dir.is_dir():
            return

        patches = sorted(list(patch_dir.glob('*.sh')) +
                         list(patch_dir.glob('*.py')),
                         key=lambda p: p.name)
        for patch in patches:
            print(f'applying patch: {patch}')
            try:
                self._exec(str(patch), cwd=patch_dir, check=True)
            except PermissionError as e:
                raise CommandError(
                    self, f'Unable to apply patch: {str(patch)}\n{e}')
            except subprocess.CalledProcessError as e:
                print(e)

    def run(self, params, args):
        clean, stacki_pallet_dir, updatedb, self.username, self.password = self.fillParams(
            [
                ('clean', False),
                ('dir', '/export/stack/pallets'),
                ('updatedb', True),
                ('username', None),
                ('password', None),
            ])

        # need to provide either both or none
        if self.username or self.password and not all(
            (self.username, self.password)):
            raise UsageError(self,
                             'must supply a password along with the username')

        clean = self.str2bool(clean)
        updatedb = self.str2bool(updatedb)

        # create a contextmanager that we can append cleanup jobs to
        # add its closing to run atexit, so we know it will run
        self.deferred = ExitStack()
        atexit.register(self.deferred.close)

        # special case: no args were specified - check if a pallet is mounted at /mnt/cdrom
        if not args:
            mount_point = '/mnt/cdrom'
            result = self._exec(f'mount | grep {mount_point}', shell=True)
            if result.returncode != 0:
                raise CommandError(
                    self, 'no pallets specified and /mnt/cdrom is unmounted')
            args.append(mount_point)

        # resolve args and check for existence
        bad_args = []
        for i, arg in enumerate(list(args)):
            # TODO: is this a problem?
            if arg.startswith(('https://', 'http://', 'ftp://')):
                args[i] = arg
                continue

            p = pathlib.Path(arg)
            if not p.exists():
                bad_args.append(arg)
            else:
                args[i] = str(p.resolve())

        if bad_args:
            msg = 'The following arguments appear to be local paths that do not exist: '
            raise CommandError(self, msg + ', '.join(bad_args))

        # most plugins will need a temporary directory, so allocate them here so we do cleanup
        # 'canonical_arg' is the arg provided by the user, but cleaned to be explicit (relative
        # paths resolved, etc)
        # 'exploded_path' is the directory where we will start searching for pallets
        # 'matched_pallets' is a list of pallet_info objects found at that path.
        pallet_args = {}
        for arg in args:
            tmpdir = tempfile.mkdtemp()
            self.deferred.callback(shutil.rmtree, tmpdir)
            pallet_args[arg] = {
                'canonical_arg': arg,
                'exploded_path': tmpdir,
                'matched_pallets': [],
            }

        self.runPlugins(pallet_args)

        prober = probepal.Prober()
        pallet_infos = prober.find_pallets(
            *[pallet_args[path]['exploded_path'] for path in pallet_args])

        # pallet_infos returns a dict {path: [pallet1, ...]}
        # note the list - an exploded_path can point to a jumbo pallet

        for path, pals in pallet_infos.items():
            for arg in pallet_args:
                if pallet_args[arg]['exploded_path'] == path:
                    pallet_args[arg]['matched_pallets'] = pals

        # TODO what to do if we match something twice.
        bad_args = [
            arg for arg, info in pallet_args.items()
            if not info['matched_pallets']
        ]
        if bad_args:
            msg = 'The following arguments do not appear to be pallets: '
            raise CommandError(self, msg + ', '.join(bad_args))

        # work off of a copy of pallet args, as we modify it as we go
        for arg, data in pallet_args.copy().items():
            if len(data['matched_pallets']) == 1:
                pallet_args[arg]['exploded_path'] = data['matched_pallets'][
                    0].pallet_root
                continue

            # delete the arg pointing to a jumbo and replace it with N new 'dummy' args
            del pallet_args[arg]
            for pal in data['matched_pallets']:
                fake_arg_name = '-'.join(info_getter(pal))
                pallet_args[fake_arg_name] = data.copy()
                pallet_args[fake_arg_name]['exploded_path'] = pal.pallet_root
                pallet_args[fake_arg_name]['matched_pallets'] = [pal]

        # we want to be able to go tempdir to arg
        # this is because we want `canonical_arg` to be what goes in as the `URL` field in the db
        paths_to_args = {
            data['exploded_path']: data['canonical_arg']
            for data in pallet_args.values()
        }

        # we have everything we need, copy the pallet to the fs, add it to the db, and maybe patch it
        for pallet in flatten(pallet_infos.values()):
            self.copy(stacki_pallet_dir, pallet, clean)
            self.write_pallet_xml(stacki_pallet_dir, pallet)
            if updatedb:
                self.update_db(pallet, paths_to_args[pallet.pallet_root])
            if stacki_pallet_dir == '/export/stack/pallets':
                self.patch_pallet(pallet)

        # Clear the old packages
        self._exec('systemctl start ludicrous-cleaner'.split())
Exemple #27
0
class TestMain(unittest.TestCase):
    def setUp(self):
        old_log_level = log.getEffectiveLevel()
        self.addCleanup(log.setLevel, old_log_level)
        self.resources = ExitStack()
        # Create a new event loop, and arrange for that loop to end almost
        # immediately.  This will allow the calls to main() in these tests to
        # also exit almost immediately.  Otherwise, the foreground test
        # process will hang.
        #
        # I think this introduces a race condition.  It depends on whether the
        # call_later() can possibly run before the run_forever() does, or could
        # cause it to not complete all its tasks.  In that case, you'd likely
        # get an error or warning on stderr, which may or may not cause the
        # test to fail.  I've only seen this happen once and don't have enough
        # information to know for sure.
        default_loop = asyncio.get_event_loop()
        loop = asyncio.new_event_loop()
        loop.call_later(0.1, loop.stop)
        self.resources.callback(asyncio.set_event_loop, default_loop)
        asyncio.set_event_loop(loop)
        self.addCleanup(self.resources.close)

    @unittest.skipIf(pwd is None, 'No pwd module available')
    def test_setuid(self):
        with patch('os.setuid') as mock:
            main(args=())
            mock.assert_called_with(pwd.getpwnam('nobody').pw_uid)

    @unittest.skipIf(pwd is None, 'No pwd module available')
    def test_setuid_permission_error(self):
        mock = self.resources.enter_context(
            patch('os.setuid', side_effect=PermissionError))
        stderr = StringIO()
        self.resources.enter_context(patch('sys.stderr', stderr))
        with self.assertRaises(SystemExit) as cm:
            main(args=())
        self.assertEqual(cm.exception.code, 1)
        mock.assert_called_with(pwd.getpwnam('nobody').pw_uid)
        self.assertEqual(
            stderr.getvalue(),
            'Cannot setuid "nobody"; try running with -n option.\n')

    @unittest.skipIf(pwd is None, 'No pwd module available')
    def test_setuid_no_pwd_module(self):
        self.resources.enter_context(patch('aiosmtpd.main.pwd', None))
        stderr = StringIO()
        self.resources.enter_context(patch('sys.stderr', stderr))
        with self.assertRaises(SystemExit) as cm:
            main(args=())
        self.assertEqual(cm.exception.code, 1)
        self.assertEqual(
            stderr.getvalue(),
            'Cannot import module "pwd"; try running with -n option.\n')

    @unittest.skipUnless(has_setuid, 'setuid is unvailable')
    def test_n(self):
        self.resources.enter_context(patch('aiosmtpd.main.pwd', None))
        self.resources.enter_context(
            patch('os.setuid', side_effect=PermissionError))
        # Just to short-circuit the main() function.
        self.resources.enter_context(
            patch('aiosmtpd.main.partial', side_effect=RuntimeError))
        # Getting the RuntimeError means that a SystemExit was never
        # triggered in the setuid section.
        self.assertRaises(RuntimeError, main, ('-n', ))

    @unittest.skipUnless(has_setuid, 'setuid is unvailable')
    def test_nosetuid(self):
        self.resources.enter_context(patch('aiosmtpd.main.pwd', None))
        self.resources.enter_context(
            patch('os.setuid', side_effect=PermissionError))
        # Just to short-circuit the main() function.
        self.resources.enter_context(
            patch('aiosmtpd.main.partial', side_effect=RuntimeError))
        # Getting the RuntimeError means that a SystemExit was never
        # triggered in the setuid section.
        self.assertRaises(RuntimeError, main, ('--nosetuid', ))

    def test_debug_0(self):
        # For this test, the runner will have already set the log level so it
        # may not be logging.ERROR.
        log = logging.getLogger('mail.log')
        default_level = log.getEffectiveLevel()
        with patch.object(log, 'info'):
            main(('-n', ))
            self.assertEqual(log.getEffectiveLevel(), default_level)

    def test_debug_1(self):
        # Mock the logger to eliminate console noise.
        with patch.object(logging.getLogger('mail.log'), 'info'):
            main(('-n', '-d'))
            self.assertEqual(log.getEffectiveLevel(), logging.INFO)

    def test_debug_2(self):
        # Mock the logger to eliminate console noise.
        with patch.object(logging.getLogger('mail.log'), 'info'):
            main(('-n', '-dd'))
            self.assertEqual(log.getEffectiveLevel(), logging.DEBUG)

    def test_debug_3(self):
        # Mock the logger to eliminate console noise.
        with patch.object(logging.getLogger('mail.log'), 'info'):
            main(('-n', '-ddd'))
            self.assertEqual(log.getEffectiveLevel(), logging.DEBUG)
            self.assertTrue(asyncio.get_event_loop().get_debug())
Exemple #28
0
class TestBase(TestCase):
    def setUp(self):
        self.stack = ExitStack()
        self.addCleanup(self.stack.close)

        # Directory to be backed up
        self.backupdir = self.stack.enter_context(
            tempfile.TemporaryDirectory(), )
        # Directory to store the data files
        self.datadir = self.stack.enter_context(
            tempfile.TemporaryDirectory(), )

        # Create a repo object with a temporary database. We can't use sqlite
        # in-memory databases because the backup routine is multi-threaded
        # and all threads access the same database.
        tmpdb = tempfile.NamedTemporaryFile(delete=False)
        tmpdb.close()
        self.stack.callback(os.unlink, tmpdb.name)

        self.backathon = Backathon(tmpdb.name)
        self.repo = self.backathon.repository

        self.repo.set_storage("local", {"base_dir": self.datadir})
        self.repo.set_compression(False)
        self.repo.set_encrypter(backathon.encryption.NullEncryption.init_new())
        self.repo.backup_inline_threshold = 0

        # Shortcut for a few managers to prevent lots of typing in the unit
        # tests
        self.db = self.repo.db
        self.fsentry = models.FSEntry.objects.using(self.db)
        self.object = models.Object.objects.using(self.db)
        self.snapshot = models.Snapshot.objects.using(self.db)
        self.obj_relation = models.ObjectRelation.objects.using(self.db)

        # Create the root of the backup set
        self.fsentry.create(path=self.backupdir)

    def tearDown(self):
        # You can't "close" an in-memory database in Django, so instead we
        # just delete it from the connection handler. The garbage collector
        # will hopefully free the resources, but the important thing is we get a
        # fresh database for each test
        import django.db
        del django.db.connections[self.repo.db]
        del django.db.connections.databases[self.repo.db]

    def path(self, *args):
        return os.path.join(self.backupdir, *args)

    def create_file(self, path, contents):
        assert not path.startswith("/")
        pathobj = pathlib.Path(self.path(path))
        if not pathobj.parent.exists():
            pathobj.parent.mkdir(parents=True)
        pathobj.write_text(contents, encoding="UTF-8")
        return pathobj

    def _insert_objects(self, *objects):
        """Insert a set of objects into the Object table

        Each object is a tuple of (objid, [children])

        Callers must be careful to avoid reference loops in the object
        hierarchy, as that is not a valid object tree.
        """
        # Since SQLite has deferrable foreign key constraints, we can insert
        # references to rows that don't exist yet as long as they exist when
        # the transaction is committed.
        with atomic(using=self.db):
            for objid, children in objects:
                if isinstance(objid, str):
                    objid = objid.encode("ASCII")
                obj = self.object.create(objid=objid, )
                self.obj_relation.bulk_create([
                    models.ObjectRelation(parent=obj,
                                          child_id=c.encode("ASCII")
                                          if isinstance(c, str) else c)
                    for c in children
                ])

    def assert_objects(self, objs, roots=None, no_extras=True):
        """Asserts that the given hierarchy exists in the database and that
        no other objects exist in the database

        """
        if roots is None:
            roots = self.object.filter(parents__isnull=True, )

        rootmap = {r.objid: r for r in roots}
        for name, children in objs.items():
            obj = rootmap.pop(
                name.encode("ASCII") if isinstance(name, str) else name)
            self.assert_objects(
                children,
                obj.children.all(),
            )

        if no_extras:
            self.assertDictEqual(rootmap, {}, "Unexpected object found")
Exemple #29
0
    def validate(self, data):
        Schema = self.__class__
        s = self._schema
        e = self._error
        i = self._ignore_extra_keys

        if isinstance(s, Literal):
            s = s.schema

        flavor = _priority(s)
        if flavor == ITERABLE:
            data = Schema(type(s), error=e).validate(data)
            o = Or(*s, error=e, schema=Schema, ignore_extra_keys=i)
            return type(data)(o.validate(d) for d in data)
        if flavor == DICT:
            exitstack = ExitStack()
            data = Schema(dict, error=e).validate(data)
            new = type(data)()  # new - is a dict of the validated values
            coverage = set()  # matched schema keys
            # for each key and value find a schema entry matching them, if any
            sorted_skeys = sorted(s, key=self._dict_key_priority)
            for skey in sorted_skeys:
                if hasattr(skey, "reset"):
                    exitstack.callback(skey.reset)

            with exitstack:
                # Evaluate dictionaries last
                data_items = sorted(data.items(), key=lambda value: isinstance(value[1], dict))
                for key, value in data_items:
                    for skey in sorted_skeys:
                        svalue = s[skey]
                        try:
                            nkey = Schema(skey, error=e).validate(key)
                        except SchemaError:
                            pass
                        else:
                            if isinstance(skey, Hook):
                                # As the content of the value makes little sense for
                                # keys with a hook, we reverse its meaning:
                                # we will only call the handler if the value does match
                                # In the case of the forbidden key hook,
                                # we will raise the SchemaErrorForbiddenKey exception
                                # on match, allowing for excluding a key only if its
                                # value has a certain type, and allowing Forbidden to
                                # work well in combination with Optional.
                                try:
                                    nvalue = Schema(svalue, error=e).validate(value)
                                except SchemaError:
                                    continue
                                skey.handler(nkey, data, e)
                            else:
                                try:
                                    nvalue = Schema(svalue, error=e, ignore_extra_keys=i).validate(value)
                                except SchemaError as x:
                                    k = "Key '%s' error:" % nkey
                                    message = self._prepend_schema_name(k)
                                    raise SchemaError([message] + x.autos, [e] + x.errors)
                                else:
                                    new[nkey] = nvalue
                                    coverage.add(skey)
                                    break
            required = set(k for k in s if not self._is_optional_type(k))
            if not required.issubset(coverage):
                missing_keys = required - coverage
                s_missing_keys = ", ".join(repr(k) for k in sorted(missing_keys, key=repr))
                message = "Missing key%s: %s" % (_plural_s(missing_keys), s_missing_keys)
                message = self._prepend_schema_name(message)
                raise SchemaMissingKeyError(message, e)
            if not self._ignore_extra_keys and (len(new) != len(data)):
                wrong_keys = set(data.keys()) - set(new.keys())
                s_wrong_keys = ", ".join(repr(k) for k in sorted(wrong_keys, key=repr))
                message = "Wrong key%s %s in %r" % (_plural_s(wrong_keys), s_wrong_keys, data)
                message = self._prepend_schema_name(message)
                raise SchemaWrongKeyError(message, e.format(data) if e else None)

            # Apply default-having optionals that haven't been used:
            defaults = set(k for k in s if type(k) is Optional and hasattr(k, "default")) - coverage
            for default in defaults:
                new[default.key] = default.default() if callable(default.default) else default.default

            return new
        if flavor == TYPE:
            if isinstance(data, s) and not (isinstance(data, bool) and s == int):
                return data
            else:
                message = "%r should be instance of %r" % (data, s.__name__)
                message = self._prepend_schema_name(message)
                raise SchemaUnexpectedTypeError(message, e.format(data) if e else None)
        if flavor == VALIDATOR:
            try:
                return s.validate(data)
            except SchemaError as x:
                raise SchemaError([None] + x.autos, [e] + x.errors)
            except BaseException as x:
                message = "%r.validate(%r) raised %r" % (s, data, x)
                message = self._prepend_schema_name(message)
                raise SchemaError(message, self._error.format(data) if self._error else None)
        if flavor == CALLABLE:
            f = _callable_str(s)
            try:
                if s(data):
                    return data
            except SchemaError as x:
                raise SchemaError([None] + x.autos, [e] + x.errors)
            except BaseException as x:
                message = "%s(%r) raised %r" % (f, data, x)
                message = self._prepend_schema_name(message)
                raise SchemaError(message, self._error.format(data) if self._error else None)
            message = "%s(%r) should evaluate to True" % (f, data)
            message = self._prepend_schema_name(message)
            raise SchemaError(message, e)
        if s == data:
            return data
        else:
            message = "%r does not match %r" % (s, data)
            message = self._prepend_schema_name(message)
            raise SchemaError(message, e.format(data) if e else None)
Exemple #30
0
class Worker:
    def __init__(self, argv):
        self.__cached_copies = {}
        self.__command_wrapper_enabled = False
        self.__dpkg_architecture = None
        self.call_argv = None
        self.capabilities = set()
        self.command_wrapper = None
        self.argv = argv
        self.stack = ExitStack()
        self.user = '******'
        self.virt_process = None

    def __enter__(self):
        argv = list(map(os.path.expanduser, self.argv))

        for prefix in ('autopkgtest-virt-', 'adt-virt-', ''):
            if shutil.which(prefix + argv[0]):
                argv[0] = prefix + argv[0]
                break
        else:
            raise WorkerError('virtualization provider %r not found' % argv[0])

        logger.info('Starting worker: %r', argv)
        self.virt_process = subprocess.Popen(argv,
                                             stdin=subprocess.PIPE,
                                             stdout=subprocess.PIPE,
                                             universal_newlines=True)
        self.stack.enter_context(self.virt_process)
        self.stack.callback(self.virt_process.terminate)
        # FIXME: timed wait for a response?
        self.stack.callback(self.virt_process.stdin.flush)
        self.stack.callback(self.virt_process.stdin.write, 'quit\n')

        line = self.virt_process.stdout.readline()

        if line != 'ok\n':
            raise WorkerError('Virtual machine {!r} failed to start: '
                              '{}'.format(argv, line.strip()))

        self.virt_process.stdin.write('capabilities\n')
        self.virt_process.stdin.flush()
        line = self.virt_process.stdout.readline()

        if not line.startswith('ok '):
            raise WorkerError('Virtual machine {!r} failed to report '
                              'capabilities: {}'.format(line.strip()))

        for word in line.split()[1:]:
            self.capabilities.add(word)
            if word.startswith('suggested-normal-user='******'suggested-normal-user='******'root-on-testbed' not in self.capabilities:
            raise WorkerError('Virtual machine {!r} does not have '
                              'root-on-testbed capability: {}'.format(
                                  argv, line.strip()))

        if ('isolation-machine' not in self.capabilities
                and 'isolation-container' not in self.capabilities):
            raise WorkerError('Virtual machine {!r} does not have '
                              'sufficient isolation: {}'.format(
                                  argv, line.strip()))

        self.virt_process.stdin.write('open\n')
        self.virt_process.stdin.flush()
        line = self.virt_process.stdout.readline()
        if not line.startswith('ok '):
            raise WorkerError('Failed to open virtual machine session {!r}: '
                              '{}'.format(argv, line))
        self.scratch = line[3:].rstrip('\n')

        self.virt_process.stdin.write('print-execute-command\n')
        self.virt_process.stdin.flush()
        line = self.virt_process.stdout.readline()
        if not line.startswith('ok '):
            raise WorkerError('Failed to get virtual machine {!r} command '
                              'wrapper: {}'.format(argv, line.strip()))

        wrapper_argv = line.rstrip('\n').split(None, 1)[1].split(',')
        self.call_argv = list(map(urllib.parse.unquote, wrapper_argv))
        if not self.call_argv:
            raise WorkerError('Virtual machine {!r} command wrapper did not '
                              'provide any arguments: {}'.format(
                                  argv, line.strip()))

        wrapper = '{}/vectis-command-wrapper'.format(self.scratch)
        self.copy_to_guest(_WRAPPER, wrapper)
        self.check_call(['chmod', '+x', wrapper])
        self.command_wrapper = wrapper

        return self

    def call(self, argv, **kwargs):
        logger.info('%r', argv)
        return subprocess.call(self.call_argv + list(argv), **kwargs)

    def check_call(self, argv, **kwargs):
        logger.info('%r', argv)
        subprocess.check_call(self.call_argv + list(argv), **kwargs)

    def check_output(self, argv, **kwargs):
        logger.info('%r', argv)
        return subprocess.check_output(self.call_argv + list(argv), **kwargs)

    def copy_to_guest(self, host_path, guest_path, *, cache=False):
        assert host_path is not None
        assert guest_path is not None

        if cache and self.__cached_copies.get(host_path) == guest_path:
            return

        if not os.path.exists(host_path):
            raise WorkerError('Cannot copy host:{!r} to guest: it does '
                              'not exist'.format(host_path))

        self.virt_process.stdin.write('copydown {} {}\n'.format(
            urllib.parse.quote(host_path),
            urllib.parse.quote(guest_path),
        ))
        self.virt_process.stdin.flush()
        line = self.virt_process.stdout.readline()

        if line != 'ok\n':
            raise WorkerError('Failed to copy host:{!r} to guest:{!r}: '
                              '{}'.format(host_path, guest_path, line.strip()))

        if cache:
            self.__cached_copies[host_path] = guest_path

    def copy_to_host(self, guest_path, host_path):
        if self.call(['test', '-e', guest_path]) != 0:
            raise WorkerError('Cannot copy guest:{!r} to host: it does '
                              'not exist'.format(guest_path))

        self.virt_process.stdin.write('copyup {} {}\n'.format(
            urllib.parse.quote(guest_path),
            urllib.parse.quote(host_path),
        ))
        self.virt_process.stdin.flush()
        line = self.virt_process.stdout.readline()
        if line != 'ok\n':
            raise WorkerError('Failed to copy guest:{!r} to host:{!r}: '
                              '{}'.format(guest_path, host_path, line.strip()))

    def open_shell(self):
        self.virt_process.stdin.write('shell\n')
        self.virt_process.stdin.flush()
        line = self.virt_process.stdout.readline()
        if line != 'ok\n':
            logger.warning('Unable to open a shell in guest: %s', line.strip())

    @property
    def dpkg_architecture(self):
        if self.__dpkg_architecture is None:
            self.__dpkg_architecture = self.check_output(
                ['dpkg', '--print-architecture'],
                universal_newlines=True).strip()

        return self.__dpkg_architecture

    def __exit__(self, et, ev, tb):
        return self.stack.__exit__(et, ev, tb)

    def set_up_apt(self, suite, components=()):
        with TemporaryDirectory() as tmp:
            with AtomicWriter(os.path.join(tmp, 'sources.list')) as writer:
                for ancestor in suite.hierarchy:
                    if components:
                        filtered_components = (set(components)
                                               & set(ancestor.all_components))
                    else:
                        filtered_components = ancestor.components

                    writer.write(
                        textwrap.dedent('''
                    deb {mirror} {suite} {components}
                    deb-src {mirror} {suite} {components}
                    ''').format(
                            components=' '.join(filtered_components),
                            mirror=ancestor.mirror,
                            suite=ancestor.apt_suite,
                        ))

                    if ancestor.apt_key is not None:
                        self.copy_to_guest(
                            ancestor.apt_key, '/etc/apt/trusted.gpg.d/' +
                            os.path.basename(ancestor.apt_key))

            self.copy_to_guest(os.path.join(tmp, 'sources.list'),
                               '/etc/apt/sources.list')
            self.check_call([
                'env',
                'DEBIAN_FRONTEND=noninteractive',
                'apt-get',
                '-y',
                'update',
            ])
Exemple #31
0
class TestMain(unittest.TestCase):
    def setUp(self):
        old_log_level = log.getEffectiveLevel()
        self.addCleanup(log.setLevel, old_log_level)
        self.resources = ExitStack()
        # Create a new event loop, and arrange for that loop to end almost
        # immediately.  This will allow the calls to main() in these tests to
        # also exit almost immediately.  Otherwise, the foreground test
        # process will hang.
        #
        # I think this introduces a race condition.  It depends on whether the
        # call_later() can possibly run before the run_forever() does, or could
        # cause it to not complete all its tasks.  In that case, you'd likely
        # get an error or warning on stderr, which may or may not cause the
        # test to fail.  I've only seen this happen once and don't have enough
        # information to know for sure.
        default_loop = asyncio.get_event_loop()
        loop = asyncio.new_event_loop()
        # The original value of 0.1 is too small; on underpowered test benches
        # (like my laptop) the initialization of the whole asyncio 'system'
        # (i.e., create_server + run_until_complete + run_forever) *sometimes*
        # takes more than 0.1 seconds, causing tests to fail intermittently
        # with “Event loop stopped before Future completed.” error.
        #
        # Because the error is intermittent and infrequently happen (maybe
        # only about 5-10% of testing attempts), I figure the actual time
        # needed would be 0.1 +/- 20%; so raising this value by 900%
        # *should* be enough. We can revisit this in the future if it needs
        # to be longer.
        loop.call_later(1.0, loop.stop)
        self.resources.callback(asyncio.set_event_loop, default_loop)
        asyncio.set_event_loop(loop)
        self.addCleanup(self.resources.close)

    @unittest.skipIf(pwd is None, 'No pwd module available')
    def test_setuid(self):
        with patch('os.setuid') as mock:
            main(args=())
            mock.assert_called_with(pwd.getpwnam('nobody').pw_uid)

    @unittest.skipIf(pwd is None, 'No pwd module available')
    def test_setuid_permission_error(self):
        mock = self.resources.enter_context(
            patch('os.setuid', side_effect=PermissionError))
        stderr = StringIO()
        self.resources.enter_context(patch('sys.stderr', stderr))
        with self.assertRaises(SystemExit) as cm:
            main(args=())
        self.assertEqual(cm.exception.code, 1)
        mock.assert_called_with(pwd.getpwnam('nobody').pw_uid)
        self.assertEqual(
            stderr.getvalue(),
            'Cannot setuid "nobody"; try running with -n option.\n')

    @unittest.skipIf(pwd is None, 'No pwd module available')
    def test_setuid_no_pwd_module(self):
        self.resources.enter_context(patch('aiosmtpd.main.pwd', None))
        stderr = StringIO()
        self.resources.enter_context(patch('sys.stderr', stderr))
        with self.assertRaises(SystemExit) as cm:
            main(args=())
        self.assertEqual(cm.exception.code, 1)
        # On Python 3.8 on Linux, a bunch of "RuntimeWarning: coroutine
        # 'AsyncMockMixin._execute_mock_call' was never awaited" messages
        # gets mixed up into stderr causing test fail.
        # Therefore, we use assertIn instead of assertEqual here, because
        # the string DOES appear in stderr, just buried.
        self.assertIn(
            'Cannot import module "pwd"; try running with -n option.\n',
            stderr.getvalue(),
        )

    @unittest.skipUnless(has_setuid, 'setuid is unvailable')
    def test_n(self):
        self.resources.enter_context(patch('aiosmtpd.main.pwd', None))
        self.resources.enter_context(
            patch('os.setuid', side_effect=PermissionError))
        # Just to short-circuit the main() function.
        self.resources.enter_context(
            patch('aiosmtpd.main.partial', side_effect=RuntimeError))
        # Getting the RuntimeError means that a SystemExit was never
        # triggered in the setuid section.
        self.assertRaises(RuntimeError, main, ('-n', ))

    @unittest.skipUnless(has_setuid, 'setuid is unvailable')
    def test_nosetuid(self):
        self.resources.enter_context(patch('aiosmtpd.main.pwd', None))
        self.resources.enter_context(
            patch('os.setuid', side_effect=PermissionError))
        # Just to short-circuit the main() function.
        self.resources.enter_context(
            patch('aiosmtpd.main.partial', side_effect=RuntimeError))
        # Getting the RuntimeError means that a SystemExit was never
        # triggered in the setuid section.
        self.assertRaises(RuntimeError, main, ('--nosetuid', ))

    def test_debug_0(self):
        # For this test, the runner will have already set the log level so it
        # may not be logging.ERROR.
        _log = logging.getLogger('mail.log')
        default_level = _log.getEffectiveLevel()
        with patch.object(_log, 'info'):
            main(('-n', ))
            self.assertEqual(_log.getEffectiveLevel(), default_level)

    def test_debug_1(self):
        # Mock the logger to eliminate console noise.
        with patch.object(logging.getLogger('mail.log'), 'info'):
            main(('-n', '-d'))
            self.assertEqual(log.getEffectiveLevel(), logging.INFO)

    def test_debug_2(self):
        # Mock the logger to eliminate console noise.
        with patch.object(logging.getLogger('mail.log'), 'info'):
            main(('-n', '-dd'))
            self.assertEqual(log.getEffectiveLevel(), logging.DEBUG)

    def test_debug_3(self):
        # Mock the logger to eliminate console noise.
        with patch.object(logging.getLogger('mail.log'), 'info'):
            main(('-n', '-ddd'))
            self.assertEqual(log.getEffectiveLevel(), logging.DEBUG)
            self.assertTrue(asyncio.get_event_loop().get_debug())
Exemple #32
0
class Rollback:
    @attr.attrs
    class Handler:
        callback = attr.attrib()
        enabled = attr.attrib(default=True)
        ignore_errors = attr.attrib(default=False)

        def __call__(self):
            if self.enabled:
                try:
                    self.callback()
                except:  # pylint: disable=bare-except
                    if not self.ignore_errors:
                        raise

    def __init__(self):
        self._handlers = {}
        self._stack = ExitStack()
        self.enabled = True

    def add(self,
            callback,
            *args,
            name=None,
            enabled=True,
            ignore_errors=False,
            fwd_kwargs=None,
            **kwargs):
        if args or kwargs or fwd_kwargs:
            if fwd_kwargs:
                kwargs.update(fwd_kwargs)
            callback = partial(callback, *args, **kwargs)
        name = name or hash(callback)
        assert name not in self._handlers
        handler = self.Handler(callback,
                               enabled=enabled,
                               ignore_errors=ignore_errors)
        self._handlers[name] = handler
        self._stack.callback(handler)
        return name

    do = add  # readability alias

    def enable(self, name=None):
        if name:
            self._handlers[name].enabled = True
        else:
            self.enabled = True

    def disable(self, name=None):
        if name:
            self._handlers[name].enabled = False
        else:
            self.enabled = False

    def clean(self):
        self.__exit__(None, None, None)

    def __enter__(self):
        return self

    # pylint: disable=redefined-builtin
    def __exit__(self, type=None, value=None, traceback=None):
        if type is None:
            return
        if not self.enabled:
            return
        self._stack.__exit__(type, value, traceback)
Exemple #33
0
class HLinkDB:
    def __init__(self, filename):
        self.closed = False
        self._cleanup = ExitStack()
        self._filename = filename
        self._pending_save = None
        # Map a "dev:ino" node to a list of paths associated with that node.
        self._node_paths = pickle_load(filename) or {}
        # Map a path to a "dev:ino" node (a reverse hard link index).
        self._path_node = {}
        for node, paths in self._node_paths.items():
            for path in paths:
                self._path_node[path] = node

    def prepare_save(self):
        """ Commit all of the relevant data to disk.  Do as much work
        as possible without actually making the changes visible."""
        if self._pending_save:
            raise Error('save of %r already in progress' % self._filename)
        with self._cleanup:
            if self._node_paths:
                dir, name = os.path.split(self._filename)
                self._pending_save = atomically_replaced_file(self._filename,
                                                              mode='wb',
                                                              buffering=65536)
                with self._cleanup.enter_context(self._pending_save) as f:
                    pickle.dump(self._node_paths, f, 2)
            else:  # No data
                self._cleanup.callback(lambda: unlink(self._filename))
            self._cleanup = self._cleanup.pop_all()

    def commit_save(self):
        self.closed = True
        if self._node_paths and not self._pending_save:
            raise Error('cannot commit save of %r; no save prepared' %
                        self._filename)
        self._cleanup.close()
        self._pending_save = None

    def abort_save(self):
        self.closed = True
        with self._cleanup:
            if self._pending_save:
                self._pending_save.cancel()
        self._pending_save = None

    def __enter__(self):
        return self

    def __exit__(self, type, value, traceback):
        self.abort_save()

    def __del__(self):
        assert self.closed

    def add_path(self, path, dev, ino):
        # Assume path is new.
        node = b'%d:%d' % (dev, ino)
        self._path_node[path] = node
        link_paths = self._node_paths.get(node)
        if link_paths and path not in link_paths:
            link_paths.append(path)
        else:
            self._node_paths[node] = [path]

    def _del_node_path(self, node, path):
        link_paths = self._node_paths[node]
        link_paths.remove(path)
        if not link_paths:
            del self._node_paths[node]

    def change_path(self, path, new_dev, new_ino):
        prev_node = self._path_node.get(path)
        if prev_node:
            self._del_node_path(prev_node, path)
        self.add_path(new_dev, new_ino, path)

    def del_path(self, path):
        # Path may not be in db (if updating a pre-hardlink support index).
        node = self._path_node.get(path)
        if node:
            self._del_node_path(node, path)
            del self._path_node[path]

    def node_paths(self, dev, ino):
        node = b'%d:%d' % (dev, ino)
        return self._node_paths[node]
Exemple #34
0
    def validate(self, data):
        """
        Validates a dict.
        Check each key and value, call the hooks if needed, check for required
        keys and default values
        """
        # check that this is a dict
        if not isinstance(data, dict):
            message = "%r should be instance of dict" % (data)
            self._raise_error(message, data, SchemaUnexpectedTypeError)
        # check the length
        if not self._min_length <= len(data) <= self._max_length:
            message = "%r should have a length between %s and %s (is %s)" % (data, self._min_length, self._max_length, len(data))
            self._raise_error(message, data, SchemaWrongLengthError)

        e = self._error
        exitstack = ExitStack()
        new = type(data)() # the data to return
        coverage = set() # which keys have been seen
        wrong_keys = [] # which keys are extra
        # call reset of all keys once finished
        for skey in self._reset:
            exitstack.callback(skey.reset)

        with exitstack:
            # treat the simple values first
            data_items = sorted(data.items(),
                key=lambda value: isinstance(value[1],
                    (dict, list, tuple, set, frozenset)))
            for key, value in data_items:
                # look for the best list of schemas
                sitems = self._comparable_keys.get(key, None)
                if sitems is None:
                    for t in type(key).__mro__:
                        sitems = self._type_keys.get(t, None)
                        if sitems is not None: break
                    else: sitems = self._global_keys

                for skey, svalue in sitems:
                    # check if the key schema matches the key
                    try:
                        nkey = skey.validate(key)
                    except SchemaError:
                        continue
                    # check if the value schema matches the value
                    try:
                        nvalue = svalue.validate(value)
                    # it doesn't match, try to call catch, else continue
                    except SchemaError as x:
                        if hasattr(skey, 'catch'):
                            action = skey.catch(nkey, x, new, data)
                        else: action = True
                        if action is True:
                            message = "Key '%s' error:" % nkey
                            message = self._prepend_schema_name(message)
                            x.prepend(message, e)
                            raise x
                        elif action is False: break
                    # it matches, try to call handle, else sve the key/value
                    else:
                        coverage.add(skey)
                        if hasattr(skey, 'handle'):
                            action = skey.handle(nkey, nvalue, new, data)
                        else: action = True
                        if action is True:
                            new[nkey] = nvalue
                            break
                        elif action is False: break
                # no key has matched
                else: wrong_keys.append(key)
        # check that all required keys have been seen
        if not self._required <= coverage:
            missing_keys = self._required - coverage
            s_missing_keys = ", ".join(repr(self._key_names.get(k, k)) \
                for k in sorted(missing_keys, key=repr))
            message = "Missing key%s: %s" % (_plural_s(missing_keys), s_missing_keys)
            self._raise_error(message, data, SchemaMissingKeyError)
        # check if extra keys are authorized
        if not self._ignore_extra_keys and wrong_keys:
            s_wrong_keys = ", ".join(repr(k) for k in sorted(wrong_keys, key=repr))
            message = "Wrong key%s %s in %r" % \
                (_plural_s(wrong_keys), s_wrong_keys, data)
            self._raise_error(message, data, SchemaWrongKeyError)
        # get the default value of all unseen keys
        for skey in self._default - coverage:
            default = skey.default
            if callable(default):
                new[skey._schema] = default()
            else: new[skey._schema] = default

        return new
class TestService(unittest.TestCase):
    def setUp(self):
        self.es = ExitStack()

        self.service = Service(
            # rabbitmq config
            connection_parameters=pika.ConnectionParameters(
                host=RABBITMQ_HOST,
                port=RABBITMQ_PORT,
                credentials=pika.PlainCredentials(
                    username="******",
                    password="******",
                ),
                client_properties={"name": "wes-data-sharing-service"},
                connection_attempts=3,
                retry_delay=10,
            ),
            src_queue="to-validator",
            dst_exchange_beehive="to-beehive",
            dst_exchange_node="data.topic",

            # metrics config
            metrics_host="0.0.0.0",
            metrics_port=8080,
            upload_publish_name="upload",

            # app meta cache config
            app_meta_cache=AppMetaCache(APP_META_CACHE_HOST,
                                        APP_META_CACHE_PORT),
            system_meta={
                "node": "0000000000000001",
                "vsn": "W001",
            },
            system_users=["service"],
        )

        # turn off info logging for unit tests
        self.service.logger.setLevel(logging.ERROR)

        self.clearAppMetaCache()

        # setup rabbitmq connection to purge queues for testing
        self.connection = self.es.enter_context(
            pika.BlockingConnection(
                pika.ConnectionParameters(host=RABBITMQ_HOST,
                                          port=RABBITMQ_PORT,
                                          credentials=pika.PlainCredentials(
                                              username="******",
                                              password="******",
                                          ))))
        self.channel = self.es.enter_context(self.connection.channel())
        self.channel.queue_purge(self.service.src_queue)
        self.channel.queue_purge(self.service.dst_exchange_beehive)

        # setup upload dir
        # NOTE(sean) pywaggle uses /run/waggle as WAGGLE_PLUGIN_UPLOAD_PATH default. we hack this for now so we can run these unit tests.
        self.upload_dir = self.es.enter_context(TemporaryDirectory())
        os.environ["WAGGLE_PLUGIN_UPLOAD_PATH"] = str(
            Path(self.upload_dir).absolute())

        # run new background instance of service for testing
        threading.Thread(target=self.service.run, daemon=True).start()
        self.es.callback(self.service.shutdown)

    def tearDown(self):
        self.es.close()

    def clearAppMetaCache(self):
        with Redis(APP_META_CACHE_HOST) as redis:
            redis.flushall()

    def updateAppMetaCache(self, app_uid, meta):
        with Redis(APP_META_CACHE_HOST) as redis:
            redis.set(f"app-meta.{app_uid}", json.dumps(meta))

    def getSubscriber(self, topics):
        subscriber = self.es.enter_context(get_plugin(""))
        subscriber.subscribe(topics)
        time.sleep(0.1)
        return subscriber

    def assertMessages(self, queue, messages, timeout=1.0):
        results = []

        def on_message_callback(ch, method, properties, body):
            self.assertEqual(properties.delivery_mode,
                             pika.DeliveryMode.Persistent.value)
            results.append(wagglemsg.load(body))
            if len(results) >= len(messages):
                ch.stop_consuming()

        self.connection.call_later(timeout, self.channel.stop_consuming)
        self.channel.basic_consume(queue, on_message_callback)
        self.channel.start_consuming()

        self.assertEqual(results, messages)

    def assertSubscriberMessages(self, subscriber, messages):
        for msg in messages:
            self.assertEqual(msg, subscriber.get(timeout=1.0))

    def assertMetrics(self, want_metrics):
        metrics = get_metrics()
        for k, v in want_metrics.items():
            self.assertAlmostEqual(metrics[k], v)

    def getCommonTestMessages(self):
        messages = [
            wagglemsg.Message(
                name="test",
                value=1234,
                timestamp=time.time_ns(),
                meta={},
            ),
            wagglemsg.Message(
                name="e",
                value=2.71828,
                timestamp=time.time_ns(),
                meta={"user": "******"},
            ),
            wagglemsg.Message(
                name="replace.app.meta.with.sys.meta",
                value="should replace meta with app and sys meta",
                timestamp=time.time_ns(),
                meta={
                    "vsn": "Z123",
                    "job": "sure",
                    "task": "ok",
                },
            ),
        ]
        shuffle(messages)
        return messages

    def getPublishTestCases(self):
        # TODO(sean) should we fuzz test this to try lot's of different arguments
        messages = self.getCommonTestMessages()

        app_uid = str(uuid4())
        app_meta = {
            "job": f"sage-{randint(1, 1000000)}",
            "task": f"testing-{randint(1, 1000000)}",
            "host": f"{randint(1, 1000000)}.ws-nxcore",
            "plugin": f"plugin-test:{randtag()}",
            "vsn": "should be replaced",
        }
        self.updateAppMetaCache(app_uid, app_meta)

        # we expect the same messages, but with the app and sys meta tagged
        want_messages = [
            wagglemsg.Message(
                name=msg.name,
                value=msg.value,
                timestamp=msg.timestamp,
                # NOTE(sean) the order of meta is important. we should expect:
                # 1. sys meta overrides msg meta and app meta
                # 2. app meta overrides msg meta
                meta={
                    **msg.meta,
                    **app_meta,
                    **self.service.system_meta
                }) for msg in messages
        ]

        return app_uid, messages, want_messages

    def getSystemPublishTestCases(self):
        messages = self.getCommonTestMessages()

        # we expect the same messages, but for system publishers we only want sys meta tagged
        want_messages = [
            wagglemsg.Message(
                name=msg.name,
                value=msg.value,
                timestamp=msg.timestamp,
                # NOTE(sean) the order of meta is important. we should expect:
                # 1. sys meta overrides msg meta
                meta={
                    **msg.meta,
                    **self.service.system_meta
                }) for msg in messages
        ]

        return messages, want_messages

    def publishMessages(self, app_uid, messages, scope):
        with get_plugin(app_uid) as plugin:
            for msg in messages:
                plugin.publish(msg.name,
                               msg.value,
                               timestamp=msg.timestamp,
                               meta=msg.meta,
                               scope=scope)

    def publishSystemMessages(self, messages, scope, username):
        with ExitStack() as es:
            # TODO(sean) try to consolidate this with existing testing scaffolding
            conn = es.enter_context(
                pika.BlockingConnection(
                    pika.ConnectionParameters(
                        host=RABBITMQ_HOST,
                        port=RABBITMQ_PORT,
                        credentials=pika.PlainCredentials(
                            username=username,
                            # we're assuming password = username for test purposes
                            password=username,
                        ))))
            ch = es.enter_context(conn.channel())

            for msg in messages:
                properties = pika.BasicProperties(user_id=username)
                ch.basic_publish("to-validator",
                                 scope,
                                 wagglemsg.dump(msg),
                                 properties=properties)

    def testPublishBeehive(self):
        app_uid, messages, want_messages = self.getPublishTestCases()
        self.publishMessages(app_uid, messages, scope="beehive")
        self.assertMessages("to-beehive", want_messages)
        self.assertMetrics({
            "wes_data_service_messages_total":
            len(want_messages),
            "wes_data_service_messages_rejected_total":
            0,
            "wes_data_service_messages_published_node_total":
            0,
            "wes_data_service_messages_published_beehive_total":
            len(want_messages),
        })

    def testPublishNode(self):
        app_uid, messages, want_messages = self.getPublishTestCases()
        subscriber = self.getSubscriber("#")
        self.publishMessages(app_uid, messages, scope="node")
        self.assertSubscriberMessages(subscriber, want_messages)
        self.assertMetrics({
            "wes_data_service_messages_total":
            len(want_messages),
            "wes_data_service_messages_rejected_total":
            0,
            "wes_data_service_messages_published_node_total":
            len(want_messages),
            "wes_data_service_messages_published_beehive_total":
            0,
        })

    def testPublishAll(self):
        app_uid, messages, want_messages = self.getPublishTestCases()
        subscriber = self.getSubscriber("#")
        self.publishMessages(app_uid, messages, scope="all")
        self.assertSubscriberMessages(subscriber, want_messages)
        self.assertMessages("to-beehive", want_messages)
        self.assertMetrics({
            "wes_data_service_messages_total":
            len(want_messages),
            "wes_data_service_messages_rejected_total":
            0,
            "wes_data_service_messages_published_node_total":
            len(want_messages),
            "wes_data_service_messages_published_beehive_total":
            len(want_messages),
        })

    def testSubscribeTopic(self):
        app_uid, messages, want_messages = self.getPublishTestCases()

        subscriber1 = self.getSubscriber("test")
        subscriber2 = self.getSubscriber("e")

        self.publishMessages(app_uid, messages, scope="all")

        self.assertSubscriberMessages(
            subscriber1, [msg for msg in want_messages if msg.name == "test"])
        self.assertSubscriberMessages(
            subscriber2, [msg for msg in want_messages if msg.name == "e"])

        self.assertMetrics({
            "wes_data_service_messages_total":
            len(want_messages),
            "wes_data_service_messages_rejected_total":
            0,
            "wes_data_service_messages_published_node_total":
            len(want_messages),
            "wes_data_service_messages_published_beehive_total":
            len(want_messages),
        })

    def testBadMessageBody(self):
        app_uid = str(uuid4())
        self.channel.basic_publish(
            "to-validator",
            "all",
            b"{bad data",
            properties=pika.BasicProperties(app_id=app_uid))

        time.sleep(0.1)

        self.assertMetrics({
            "wes_data_service_messages_total":
            1,
            "wes_data_service_messages_rejected_total":
            1,
            "wes_data_service_messages_published_node_total":
            0,
            "wes_data_service_messages_published_beehive_total":
            0,
        })

    def testNoAppUID(self):
        with get_plugin("") as plugin:
            plugin.publish("test", 123)

        time.sleep(0.1)

        self.assertMetrics({
            "wes_data_service_messages_total":
            1,
            "wes_data_service_messages_rejected_total":
            1,
            "wes_data_service_messages_published_node_total":
            0,
            "wes_data_service_messages_published_beehive_total":
            0,
        })

    def testNoAppMeta(self):
        app_uid = str(uuid4())

        with get_plugin(app_uid) as plugin:
            plugin.publish("test", 123)

        time.sleep(0.1)

        self.assertMetrics({
            "wes_data_service_messages_total":
            1,
            "wes_data_service_messages_rejected_total":
            1,
            "wes_data_service_messages_published_node_total":
            0,
            "wes_data_service_messages_published_beehive_total":
            0,
        })

    def testSystemServicePublish(self):
        messages, want_messages = self.getSystemPublishTestCases()
        subscriber = self.getSubscriber("#")
        self.publishSystemMessages(messages, scope="all", username="******")
        self.assertSubscriberMessages(subscriber, want_messages)
        self.assertMessages("to-beehive", want_messages)
        self.assertMetrics({
            "wes_data_service_messages_total":
            len(want_messages),
            "wes_data_service_messages_rejected_total":
            0,
            "wes_data_service_messages_published_node_total":
            len(want_messages),
            "wes_data_service_messages_published_beehive_total":
            len(want_messages),
        })

    def testSystemServicePublishBadUser(self):
        messages, _ = self.getSystemPublishTestCases()
        self.publishSystemMessages(messages, "all", username="******")
        time.sleep(0.1)
        self.assertMetrics({
            "wes_data_service_messages_total":
            len(messages),
            "wes_data_service_messages_rejected_total":
            len(messages),
            "wes_data_service_messages_published_node_total":
            0,
            "wes_data_service_messages_published_beehive_total":
            0,
        })

    def testPublishUpload(self):
        # TODO(sean) clean up! added as a regression test for now.
        app_uid = str(uuid4())

        tag = randtag()

        app_meta = {
            "job": f"sage-{randint(1, 1000000)}",
            "task": f"testing-{randint(1, 1000000)}",
            "host": f"{randint(1, 1000000)}.ws-nxcore",
            "plugin": f"plugin-test:{tag}",
            "vsn": "should be replaced",
        }
        self.updateAppMetaCache(app_uid, app_meta)

        timestamp = time.time_ns()
        filename = "hello.txt"

        with TemporaryDirectory() as dir:
            file = Path(dir, filename)
            file.write_text("hello")
            with get_plugin(app_uid) as plugin:
                plugin.upload_file(file,
                                   meta={"user": "******"},
                                   timestamp=timestamp)

        job = app_meta["job"]
        task = app_meta["task"]
        node = self.service.system_meta["node"]

        self.assertMessages("to-beehive", [
            wagglemsg.Message(
                name="upload",
                value=
                f"https://storage.sagecontinuum.org/api/v1/data/{job}/sage-{task}-{tag}/{node}/{timestamp}-{filename}",
                timestamp=timestamp,
                meta={
                    "user": "******",
                    "filename": "hello.txt",
                    **app_meta,
                    **self.service.system_meta,
                })
        ])

        self.assertMetrics({
            "wes_data_service_messages_total":
            1,
            "wes_data_service_messages_rejected_total":
            0,
            "wes_data_service_messages_published_node_total":
            1,
            "wes_data_service_messages_published_beehive_total":
            1,
        })