Пример #1
0
def send_email(send_target, send_port, send_sender, send_recipient, send_body,
               send_tls):
    logging.info("Sending email")
    t = TemporaryFile()
    available_fd = t.fileno()
    t.close()
    os.dup2(2, available_fd)
    t = TemporaryFile()
    os.dup2(t.fileno(), 2)
    try:
        server = SMTP(send_target, send_port)
        server.set_debuglevel(100)
        server.ehlo_or_helo_if_needed()
        if send_tls:
            try_tls(server)
        server.sendmail(send_sender, send_recipient, send_body)
        server.quit()
        sys.stderr.flush()
        t.flush()
        t.seek(0)
        stderr_output = t.read()
        t.close()
        os.dup2(available_fd, 2)
        os.close(available_fd)
        count = 0
        for line in stderr_output.decode('utf-8').split("\n"):
            count += 1
            logging.debug(line)
            print(line)
    except Exception, exc:
        logging.critical("Email failed: %s\r\nExiting." %
                         str(exc))  # log error message
        sys.exit("Email failed: %s\r\nExiting." %
                 str(exc))  # give an error message
Пример #2
0
class MockSys:
    def __init__(self):
        self.stdin = TemporaryFile("w")
        self.stdout = TemporaryFile("r")
        self.stderr = TemporaryFile("r")
        self.__stderr__ = self.stderr
        self.stdio_fds = [self.stdin.fileno(), self.stdout.fileno(), self.stderr.fileno()]
Пример #3
0
class MockSys():

    def __init__(self):
        self.stdin = TemporaryFile('w')
        self.stdout = TemporaryFile('r')
        self.stderr = TemporaryFile('r')
        self.__stderr__ = self.stderr
        self.stdio_fds = [self.stdin.fileno(), self.stdout.fileno(),
                          self.stderr.fileno()]
Пример #4
0
    def apply(self, req, proj):
        """Run this prototype on a new project.
        NOTE: If you pass in a project that isn't new, this could explode. Don't do that.
        """
        from api import TracForgeAdminSystem

        steps = TracForgeAdminSystem(self.env).get_project_setup_participants()

        db = self.env.get_db_cnx()
        cursor = db.cursor()
        cursor.execute("DELETE FROM tracforge_project_log WHERE project=%s", (proj.name,))
        db.commit()

        for step in self:
            action = args = None
            if isinstance(step, dict):
                action = step["action"]
                args = step["args"]
            else:
                action, args = step

            pid = os.fork()
            if not pid:
                # o_fd, o_file = mkstemp('tracforge-step', text=True)
                # e_fd, e_file = mkstemp('tracforge-step', text=True)

                o_file = TemporaryFile(prefix="tracforge-step", bufsize=0)
                e_file = TemporaryFile(prefix="tracforge-step", bufsize=0)

                sys.stdout = o_file
                sys.stderr = e_file

                os.dup2(o_file.fileno(), 1)
                os.dup2(e_file.fileno(), 2)

                rv = steps[action]["provider"].execute_setup_action(req, proj, action, args)
                self.env.log.debug("TracForge: %s() => %r", action, rv)

                o_file.seek(0, 0)
                o_data = o_file.read()
                o_file.close()
                e_file.seek(0, 0)
                e_data = e_file.read()
                e_file.close()

                db = self.env.get_db_cnx()
                cursor = db.cursor()
                cursor.execute(
                    "INSERT INTO tracforge_project_log (project, action, args, return, stdout, stderr) VALUES (%s, %s, %s, %s, %s, %s)",
                    (proj.name, action, args, int(rv), o_data, e_data),
                )
                db.commit()
                db.close()

                os._exit(0)
        os.waitpid(pid, 0)
Пример #5
0
    def apply(self, req, proj):
        """Run this prototype on a new project.
        NOTE: If you pass in a project that isn't new, this could explode. Don't do that.
        """
        from api import TracForgeAdminSystem
        steps = TracForgeAdminSystem(self.env).get_project_setup_participants()

        db = self.env.get_db_cnx()
        cursor = db.cursor()
        cursor.execute('DELETE FROM tracforge_project_log WHERE project=%s',
                       (proj.name, ))
        db.commit()

        for step in self:
            action = args = None
            if isinstance(step, dict):
                action = step['action']
                args = step['args']
            else:
                action, args = step

            pid = os.fork()
            if not pid:
                #o_fd, o_file = mkstemp('tracforge-step', text=True)
                #e_fd, e_file = mkstemp('tracforge-step', text=True)

                o_file = TemporaryFile(prefix='tracforge-step', bufsize=0)
                e_file = TemporaryFile(prefix='tracforge-step', bufsize=0)

                sys.stdout = o_file
                sys.stderr = e_file

                os.dup2(o_file.fileno(), 1)
                os.dup2(e_file.fileno(), 2)

                rv = steps[action]['provider'].execute_setup_action(
                    req, proj, action, args)
                self.env.log.debug('TracForge: %s() => %r', action, rv)

                o_file.seek(0, 0)
                o_data = o_file.read()
                o_file.close()
                e_file.seek(0, 0)
                e_data = e_file.read()
                e_file.close()

                db = self.env.get_db_cnx()
                cursor = db.cursor()
                cursor.execute(
                    'INSERT INTO tracforge_project_log (project, action, args, return, stdout, stderr) VALUES (%s, %s, %s, %s, %s, %s)',
                    (proj.name, action, args, int(rv), o_data, e_data))
                db.commit()
                db.close()

                os._exit(0)
        os.waitpid(pid, 0)
class T(threading.Thread):
    _shutdown_msg = "shutdown"

    def __init__(self):
        threading.Thread.__init__(self)
        self._fd = TemporaryFile()
        self._comm_fd = TemporaryFile()
        self._run = False

    def get_file_handle(self):
        return self._fd

    def run(self):
        self._run = True
        while self._run:
            t1 = time.time()
            r, _, _ = select.select([self._fd.fileno(), self._comm_fd.fileno()], [], [])
            print "select time:", time.time()-t1
            for elem in r:
                if elem == self._fd.fileno():
                    s = self._fd.tell()
                    self._fd.seek(0, os.SEEK_END)  # to the end
                    e = self._fd.tell()
                    if s == e:  # nothing new
                        continue
                    self._fd.seek(-(e-s), os.SEEK_END)
                    diff = self._fd.read(e-s)
                    if True:
                        sys.stdout.write(diff)
                        sys.stdout.flush()

                # exit
                elif elem == self._comm_fd.fileno():
                    self._comm_fd.seek(0, os.SEEK_END)
                    if self._comm_fd.tell() == len(T._shutdown_msg):
                        self._run = False
        self._comm_fd.write(T._shutdown_msg)
        self._comm_fd.flush()

    def stop(self):
        self._comm_fd.seek(0, os.SEEK_END)
        if self._comm_fd.tell() != 0:
            return
        self._comm_fd.write(T._shutdown_msg)
        self._comm_fd.flush()
        while self._comm_fd.tell() != 2*len(T._shutdown_msg):
            self._comm_fd.seek(0, os.SEEK_END)

    def __del__(self, ):
        self._fd.close()
Пример #7
0
    def backup(self):
        if self.dry_run:
            return
        if not os.path.exists(self.config['tar']['directory']) \
         or not os.path.isdir(self.config['tar']['directory']):
            raise BackupError('{0} is not a directory!'.format(self.config['tar']['directory']))
        out_name = "{0}.tar".format(
            self.config['tar']['directory'].lstrip('/').replace('/', '_'))
        outfile = os.path.join(self.target_directory, out_name)
        args = ['tar', 'c', self.config['tar']['directory']]
        errlog = TemporaryFile()
        stream = self._open_stream(outfile, 'w')
        LOG.info("Executing: %s", list2cmdline(args))
        pid = Popen(
            args,
            stdout=stream.fileno(),
            stderr=errlog.fileno(),
            close_fds=True)
        status = pid.wait()
        try:
            errlog.flush()
            errlog.seek(0)
            for line in errlog:
                LOG.error("%s[%d]: %s", list2cmdline(args), pid.pid, line.rstrip())
        finally:
            errlog.close()

        if status != 0:
            raise BackupError('tar failed (status={0})'.format(status))
Пример #8
0
def build_compose_file(repo: mzbuild.Repository, command: str,
                       config_file: str) -> IO[bytes]:
    """Substitute known keys with mzbuild-provided values

    * Replace `mzimage` with fingerprinted image names
    """
    images = []
    default = os.getenv(f"MZBUILD_DOCKER_TAG", None)
    with open(config_file) as f:
        compose = yaml.safe_load(f)
        # strip mzconduct top-level key, if it exists
        compose.pop("mzconduct", None)
        for config in compose["services"].values():
            if "mzbuild" in config:
                image_name = config["mzbuild"]

                if image_name not in repo.images:
                    raise errors.BadSpec(
                        f"mzcompose: unknown image {image_name}")

                image = repo.images[image_name]
                override_tag = os.getenv(f"MZBUILD_{image.env_var_name()}_TAG",
                                         default)
                if override_tag is not None:
                    config["image"] = image.docker_name(override_tag)
                    print(
                        f"mzcompose: warning: overriding {image_name} image to tag {override_tag}",
                        file=sys.stderr,
                    )
                    del config["mzbuild"]
                else:
                    images.append(image)

            if "propagate-uid-gid" in config:
                config["user"] = f"{os.getuid()}:{os.getgid()}"
                del config["propagate-uid-gid"]

    deps = repo.resolve_dependencies(images)
    for d in deps:
        say(d.spec())

    for config in compose["services"].values():
        if "mzbuild" in config:
            config["image"] = deps[config["mzbuild"]].spec()
            del config["mzbuild"]

    # Check if the command is going to create or start containers, and if so
    # build the dependencies. This can be slow, so we don't want to do it if we
    # can help it (e.g., for `down` or `ps`).
    if command in ["create", "run", "start", "up"]:
        deps.acquire()

    # Construct a configuration that will point Docker Compose at the correct
    # images.
    tempfile = TemporaryFile()
    os.set_inheritable(tempfile.fileno(), True)
    yaml.dump(compose, tempfile, encoding="utf-8")  # type: ignore
    tempfile.flush()
    tempfile.seek(0)
    return tempfile
Пример #9
0
    def backup(self):
        """
        Create backup
        """
        if self.dry_run:
            return
        if not os.path.exists(self.config["tar"]["directory"]) or not os.path.isdir(
            self.config["tar"]["directory"]
        ):
            raise BackupError("{0} is not a directory!".format(self.config["tar"]["directory"]))
        out_name = "{0}.tar".format(self.config["tar"]["directory"].lstrip("/").replace("/", "_"))
        outfile = os.path.join(self.target_directory, out_name)
        args = ["tar", "c", self.config["tar"]["directory"]]
        errlog = TemporaryFile()
        stream = open_stream(outfile, "w", **self.config["compression"])
        LOG.info("Executing: %s", list2cmdline(args))
        pid = Popen(args, stdout=stream.fileno(), stderr=errlog.fileno(), close_fds=True)
        status = pid.wait()
        try:
            errlog.flush()
            errlog.seek(0)
            for line in errlog:
                LOG.error("%s[%d]: %s", list2cmdline(args), pid.pid, line.rstrip())
        finally:
            errlog.close()

        if status != 0:
            raise BackupError("tar failed (status={0})".format(status))
Пример #10
0
    def backup(self):
        """
        Create backup
        """
        if self.dry_run:
            return
        if not os.path.exists(
                self.config["tar"]["directory"]) or not os.path.isdir(
                    self.config["tar"]["directory"]):
            raise BackupError("{0} is not a directory!".format(
                self.config["tar"]["directory"]))
        out_name = "{0}.tar".format(
            self.config["tar"]["directory"].lstrip("/").replace("/", "_"))
        outfile = os.path.join(self.target_directory, out_name)
        args = ["tar", "c", self.config["tar"]["directory"]]
        errlog = TemporaryFile()
        stream = open_stream(outfile, "w", **self.config["compression"])
        LOG.info("Executing: %s", list2cmdline(args))
        pid = Popen(args,
                    stdout=stream.fileno(),
                    stderr=errlog.fileno(),
                    close_fds=True)
        status = pid.wait()
        try:
            errlog.flush()
            errlog.seek(0)
            for line in errlog:
                LOG.error("%s[%d]: %s", list2cmdline(args), pid.pid,
                          line.rstrip())
        finally:
            errlog.close()

        if status != 0:
            raise BackupError("tar failed (status={0})".format(status))
Пример #11
0
def convert_hwp5file_into_odtpkg(hwp5file):
    from tempfile import TemporaryFile
    tmpfile = TemporaryFile()
    import os
    tmpfile2 = os.fdopen( os.dup(tmpfile.fileno()), 'r')

    from zipfile import ZipFile
    zf = ZipFile(tmpfile, 'w')
    from hwp5.hwp5odt import ODTPackage
    odtpkg = ODTPackage(zf)
    try:
        from hwp5.hwp5odt import Converter
        import hwp5.plat

        if haveXSLTTransformer():
            xslt = xslt_with_libreoffice
        else:
            # we use default xslt
            xslt = hwp5.plat.get_xslt()

        # convert without RelaxNG validation
        convert = Converter(xslt)

        # Embed images: see #32 - https://github.com/mete0r/pyhwp/issues/32
        convert(hwp5file, odtpkg, embedimage=True)
    finally:
        odtpkg.close()

    tmpfile2.seek(0)
    odtpkg_stream = InputStreamFromFileLike(tmpfile2)
    odtpkg_storage = StorageFromInputStream(odtpkg_stream)
    return odtpkg_storage
Пример #12
0
def convert_hwp5file_into_odtpkg(hwp5file):
    from tempfile import TemporaryFile
    tmpfile = TemporaryFile()
    import os
    tmpfile2 = os.fdopen(os.dup(tmpfile.fileno()), 'r')

    from zipfile import ZipFile
    zf = ZipFile(tmpfile, 'w')
    from hwp5.hwp5odt import ODTPackage
    odtpkg = ODTPackage(zf)
    try:
        from hwp5.hwp5odt import Converter
        import hwp5.plat

        if haveXSLTTransformer():
            xslt = xslt_with_libreoffice
        else:
            # we use default xslt
            xslt = hwp5.plat.get_xslt()

        # convert without RelaxNG validation
        convert = Converter(xslt)

        # Embed images: see #32 - https://github.com/mete0r/pyhwp/issues/32
        convert(hwp5file, odtpkg, embedimage=True)
    finally:
        odtpkg.close()

    tmpfile2.seek(0)
    odtpkg_stream = InputStreamFromFileLike(tmpfile2)
    odtpkg_storage = StorageFromInputStream(odtpkg_stream)
    return odtpkg_storage
Пример #13
0
    def __init__(self, repo: mzbuild.Repository, name: str):
        self.name = name
        self.repo = repo
        self.images: List[mzbuild.Image] = []

        default_tag = os.getenv(f"MZBUILD_TAG", None)

        if name in self.repo.compositions:
            self.path = self.repo.compositions[name]
        else:
            raise errors.UnknownComposition

        with open(self.path) as f:
            compose = yaml.safe_load(f)

        # Stash away sub workflows so that we can load them with the correct environment variables
        self.workflows = compose.pop("mzworkflows", None)

        # Resolve all services that reference an `mzbuild` image to a specific
        # `image` reference.
        for config in compose["services"].values():
            if "mzbuild" in config:
                image_name = config["mzbuild"]

                if image_name not in self.repo.images:
                    raise errors.BadSpec(f"mzcompose: unknown image {image_name}")

                image = self.repo.images[image_name]
                override_tag = os.getenv(
                    f"MZBUILD_{image.env_var_name()}_TAG", default_tag
                )
                if override_tag is not None:
                    config["image"] = image.docker_name(override_tag)
                    print(
                        f"mzcompose: warning: overriding {image_name} image to tag {override_tag}",
                        file=sys.stderr,
                    )
                    del config["mzbuild"]
                else:
                    self.images.append(image)

                if "propagate-uid-gid" in config:
                    config["user"] = f"{os.getuid()}:{os.getgid()}"
                    del config["propagate-uid-gid"]

        deps = self.repo.resolve_dependencies(self.images)
        for config in compose["services"].values():
            if "mzbuild" in config:
                config["image"] = deps[config["mzbuild"]].spec()
                del config["mzbuild"]

        # Emit the munged configuration to a temporary file so that we can later
        # pass it to Docker Compose.
        tempfile = TemporaryFile()
        os.set_inheritable(tempfile.fileno(), True)
        yaml.dump(compose, tempfile, encoding="utf-8")  # type: ignore
        tempfile.flush()
        self.file = tempfile
Пример #14
0
def test_dup_temp_file(selenium):
    # See https://github.com/emscripten-core/emscripten/issues/15012
    import os
    from tempfile import TemporaryFile

    tf = TemporaryFile(buffering=0)
    fd1 = os.dup(tf.fileno())
    os.dup2(tf.fileno(), 50)
    s = b"hello there!"
    tf.write(s)
    tf2 = open(fd1, "w+")
    assert tf2.tell() == len(s)
    # This next assertion actually demonstrates a bug in dup: the correct value
    # to return should be b"".
    assert os.read(fd1, 50) == b""
    tf2.seek(1)
    assert tf.tell() == 1
    assert tf.read(100) == b"ello there!"
Пример #15
0
class LeptonicaErrorTrap(object):
    """Context manager to trap errors reported by Leptonica.

    Leptonica's error return codes are unreliable to the point of being
    almost useless.  It does, however, write errors to stderr provided that is
    not disabled at its compile time.  Fortunately this is done using error
    macros so it is very self-consistent.

    This context manager redirects stderr to a temporary file which is then
    read and parsed for error messages.  As a side benefit, debug messages
    from Leptonica are also suppressed.

    """
    def __enter__(self):
        from io import UnsupportedOperation
        self.tmpfile = TemporaryFile()

        # Save the old stderr, and redirect stderr to temporary file
        sys.stderr.flush()
        try:
            self.copy_of_stderr = os.dup(sys.stderr.fileno())
            os.dup2(self.tmpfile.fileno(),
                    sys.stderr.fileno(),
                    inheritable=False)
        except UnsupportedOperation:
            self.copy_of_stderr = None
        return

    def __exit__(self, exc_type, exc_value, traceback):
        # Restore old stderr
        sys.stderr.flush()
        if self.copy_of_stderr is not None:
            os.dup2(self.copy_of_stderr, sys.stderr.fileno())
            os.close(self.copy_of_stderr)

        # Get data from tmpfile (in with block to ensure it is closed)
        with self.tmpfile as tmpfile:
            tmpfile.seek(0)  # Cursor will be at end, so move back to beginning
            leptonica_output = tmpfile.read().decode(errors='replace')

        assert self.tmpfile.closed
        assert not sys.stderr.closed

        # If there are Python errors, let them bubble up
        if exc_type:
            logger.warning(leptonica_output)
            return False

        # If there are Leptonica errors, wrap them in Python excpetions
        if 'Error' in leptonica_output:
            if 'image file not found' in leptonica_output:
                raise FileNotFoundError()
            if 'pixWrite: stream not opened' in leptonica_output:
                raise LeptonicaIOError()
            raise LeptonicaError(leptonica_output)

        return False
Пример #16
0
class LeptonicaErrorTrap(object):
    """Context manager to trap errors reported by Leptonica.

    Leptonica's error return codes are unreliable to the point of being
    almost useless.  It does, however, write errors to stderr provided that is
    not disabled at its compile time.  Fortunately this is done using error
    macros so it is very self-consistent.

    This context manager redirects stderr to a temporary file which is then
    read and parsed for error messages.  As a side benefit, debug messages
    from Leptonica are also suppressed.

    """
    def __enter__(self):
        from io import UnsupportedOperation
        self.tmpfile = TemporaryFile()

        # Save the old stderr, and redirect stderr to temporary file
        sys.stderr.flush()
        try:
            self.copy_of_stderr = os.dup(sys.stderr.fileno())
            os.dup2(self.tmpfile.fileno(), sys.stderr.fileno(),
                    inheritable=False)
        except UnsupportedOperation:
            self.copy_of_stderr = None
        return

    def __exit__(self, exc_type, exc_value, traceback):
        # Restore old stderr
        sys.stderr.flush()
        if self.copy_of_stderr is not None:
            os.dup2(self.copy_of_stderr, sys.stderr.fileno())
            os.close(self.copy_of_stderr)

        # Get data from tmpfile (in with block to ensure it is closed)
        with self.tmpfile as tmpfile:
            tmpfile.seek(0)  # Cursor will be at end, so move back to beginning
            leptonica_output = tmpfile.read().decode(errors='replace')

        assert self.tmpfile.closed
        assert not sys.stderr.closed

        # If there are Python errors, let them bubble up
        if exc_type:
            logger.warning(leptonica_output)
            return False

        # If there are Leptonica errors, wrap them in Python excpetions
        if 'Error' in leptonica_output:
            if 'image file not found' in leptonica_output:
                raise FileNotFoundError()
            if 'pixWrite: stream not opened' in leptonica_output:
                raise LeptonicaIOError()
            raise LeptonicaError(leptonica_output)

        return False
Пример #17
0
class LeptonicaErrorTrap(object):

    """Context manager to trap errors reported by Leptonica.

    Leptonica's error return codes are unreliable to the point of being
    almost useless.  It does, however, write errors to stderr provided that is
    not disabled at its compile time.  Fortunately this is done using error
    macros so it is very self-consistent.

    This context manager redirects stderr to a temporary file which is then
    read and parsed for error messages.  As a side benefit, debug messages
    from Leptonica are also suppressed.

    """
    def __init__(self, verbose=False):
        self.verbose = verbose

    def __enter__(self):
        self.tmpfile = TemporaryFile()

        # Save the old stderr, and redirect stderr to temporary file
        self.old_stderr_fileno = os.dup(sys.stderr.fileno())
        os.dup2(self.tmpfile.fileno(), sys.stderr.fileno())
        return

    def __exit__(self, exc_type, exc_value, traceback):
        # Restore old stderr
        os.dup2(self.old_stderr_fileno, sys.stderr.fileno())
        os.close(self.old_stderr_fileno)

        # Get data from tmpfile (in with block to ensure it is closed)
        with self.tmpfile as tmpfile:
            tmpfile.seek(0)  # Cursor will be at end, so move back to beginning
            leptonica_output = tmpfile.read().decode(errors='replace')

        # If there are Python errors, let them bubble up
        if exc_type:
            stderr(leptonica_output)
            return False

        if self.verbose and leptonica_output.strip() != '':
            stderr(leptonica_output)

        # If there are Leptonica errors, wrap them in Python excpetions
        if 'Error' in leptonica_output:
            if 'image file not found' in leptonica_output:
                raise LeptonicaIOError()
            if 'pixWrite: stream not opened' in leptonica_output:
                raise LeptonicaIOError()
            if 'not enough conf to get orientation' in leptonica_output:
                pass
            else:
                raise LeptonicaError(leptonica_output)

        return False
Пример #18
0
    def run(self, databases, stream, additional_options=None):
        """Run mysqldump with the options configured on this instance"""
        if self.mock_env:
            subprocess.Popen = self.mock_env.mocked_popen

        if not hasattr(stream, 'fileno'):
            raise MySQLDumpError("Invalid output stream")

        if not databases:
            raise MySQLDumpError("No databases specified to backup")

        args = [
            self.cmd_path,
        ]

        if self.defaults_file:
            if self.extra_defaults:
                args.append('--defaults-extra-file=%s' % self.defaults_file)
            else:
                args.append('--defaults-file=%s' % self.defaults_file)

        args.extend([str(opt) for opt in self.options])

        if additional_options:
            args.extend(additional_options)

        if databases is ALL_DATABASES:
            args.append('--all-databases')
        else:
            if len(databases) > 1:
                args.append('--databases')
            args.extend(databases)

        if self.mock_env:
            LOG.info("Dry Run: %s", subprocess.list2cmdline(args))
        else:
            LOG.info("Executing: %s", subprocess.list2cmdline(args))
        errlog = TemporaryFile()
        pid = subprocess.Popen(args,
                               stdout=stream.fileno(),
                               stderr=errlog.fileno(),
                               close_fds=True)
        status = pid.wait()
        try:
            errlog.flush()
            errlog.seek(0)
            for line in errlog:
                LOG.error("%s[%d]: %s", self.cmd_path, pid.pid, line.rstrip())
        finally:
            errlog.close()
        if status != 0:
            raise MySQLDumpError("mysqldump exited with non-zero status %d" %
                                 pid.returncode)
Пример #19
0
class CaptureOutput(object):
    def __init__(self):
        self._file = TemporaryFile()

    def fileno(self):
        return self._file.fileno()

    def __contains__(self, substr):
        return substr in str(self)

    def __str__(self):
        self._file.seek(0, 0)
        return self._file.read()
Пример #20
0
def test_dup_stdout(selenium):
    # Test redirecting stdout using low level os.dup operations.
    # This sort of redirection is used in pytest.
    import os
    import sys
    from tempfile import TemporaryFile

    tf = TemporaryFile(buffering=0)
    save_stdout = os.dup(sys.stdout.fileno())
    os.dup2(tf.fileno(), sys.stdout.fileno())
    print("hi!!")
    print("there...")
    assert tf.tell() == len("hi!!\nthere...\n")
    os.dup2(save_stdout, sys.stdout.fileno())
    print("not captured")
    os.dup2(tf.fileno(), sys.stdout.fileno())
    print("captured")
    assert tf.tell() == len("hi!!\nthere...\ncaptured\n")
    os.dup2(save_stdout, sys.stdout.fileno())
    os.close(save_stdout)
    tf.seek(0)
    assert tf.read(1000).decode() == "hi!!\nthere...\ncaptured\n"
Пример #21
0
class StackTraceBuffer(object):

    def __init__(self):
        self.f = None

    def fileno(self):
        if not self.f:
            self.f = TemporaryFile()
        return self.f.fileno()

    def getvalue(self):
        if self.f:
            self.f.seek(0)
            return self.f.read()
        return ''
Пример #22
0
class PassphraseFile(object):
    def __init__(self, passphrase):
        self.passphrase = passphrase.encode(
            'utf-8') if type(passphrase) != bytes else passphrase
        self.file = TemporaryFile()

    def __enter__(self):
        self.file.write(self.passphrase)
        self.file.flush()
        return self.name()

    def __exit__(self, type, value, traceback):
        self.file.close()

    def name(self):
        return '/proc/%d/fd/%d' % (getpid(), self.file.fileno())
Пример #23
0
    def run(self, databases, stream, additional_options=None):
        """Run mysqldump with the options configured on this instance"""
        if not hasattr(stream, "fileno"):
            raise MySQLDumpError("Invalid output stream")

        if not databases:
            raise MySQLDumpError("No databases specified to backup")

        args = [self.cmd_path]

        if self.defaults_file:
            if self.extra_defaults:
                args.append("--defaults-extra-file=%s" % self.defaults_file)
            else:
                args.append("--defaults-file=%s" % self.defaults_file)

        args.extend([str(opt) for opt in self.options])

        if additional_options:
            args.extend(additional_options)

        if databases is ALL_DATABASES:
            args.append("--all-databases")
        else:
            if len(databases) > 1:
                args.append("--databases")
            args.extend(databases)

        if self.mock_env:
            LOG.info("Dry Run: %s", subprocess.list2cmdline(args))
            popen = self.mock_env.mocked_popen
        else:
            LOG.info("Executing: %s", subprocess.list2cmdline(args))
            popen = subprocess.Popen
        errlog = TemporaryFile()
        pid = popen(args, stdout=stream.fileno(), stderr=errlog.fileno(), close_fds=True)
        status = pid.wait()
        try:
            errlog.flush()
            errlog.seek(0)
            for line in errlog:
                LOG.error("%s[%d]: %s", self.cmd_path, pid.pid, line.rstrip())
        finally:
            errlog.close()
        if status != 0:
            raise MySQLDumpError("mysqldump exited with non-zero status %d" % pid.returncode)
Пример #24
0
def run_command(conn, command):
    global TIMEOUT
    temp_out = TemporaryFile(mode='w+')
    fileno = temp_out.fileno()
    p = Popen(command, shell=True, stdout=fileno, stderr=fileno)
    start_time = time()
    while p.poll() == None:
        if time() > start_time + TIMEOUT:
            temp_out.seek(0)
            result = temp_out.read()
            temp_out.close()
            break
    if 'closed' not in str(temp_out):
        temp_out.seek(0)
        result = temp_out.read()
        temp_out.close()
    conn.send(str(len(result)).zfill(16) + result)
Пример #25
0
class _stdout_redirected_win:

    def __enter__(self):
        self._org = sys.stdout
        sys.stdout = sys.__stdout__
        fdout = sys.stdout.fileno()
        self._file = TemporaryFile()
        self._dup = None
        if fdout >= 0:
            self._dup = os.dup(fdout)
            os.dup2(self._file.fileno(), fdout)

    def __exit__(self, exc_type, exc_val, exc_tb):
        sys.stdout.flush()
        if self._dup is not None:
            os.dup2(self._dup, sys.stdout.fileno())
            os.close(self._dup)
        sys.stdout = self._org
        self._file.seek(0)
        self._file.close()
Пример #26
0
    def __init__(self, stream, length, _shared=None):
        """
        :param stream:  THE STREAM WE WILL GET THE BYTES FROM
        :param length:  THE MAX NUMBER OF BYTES WE ARE EXPECTING
        :param _shared: FOR INTERNAL USE TO SHARE THE BUFFER
        :return:
        """
        self.position = 0
        file_ = TemporaryFile()
        if not _shared:
            self.shared = Data(length=length,
                               locker=Lock(),
                               stream=stream,
                               done_read=0,
                               file=file_,
                               buffer=mmap(file_.fileno(), length))
        else:
            self.shared = _shared

        self.shared.ref_count += 1
Пример #27
0
class APRFile(object):
    """Wrap a Python file-like object as an APR File"""

    def __init__(self, pyfile):
        self.pyfile = pyfile
        self.pool = Pool()
        self._as_parameter_ = POINTER(apr_file_t)()
        self.tempfile = None
        if hasattr(pyfile, "fileno"):
            # Looks like this is a real file. We can just write
            # directly to said file
            osfile = apr_os_file_t(get_osfhandle(pyfile.fileno()))
        else:
            # Looks like this is a StringIO buffer or a fake file.
            # Write to a temporary file and copy the output to the
            # buffer when we are closed or flushed
            self.tempfile = TemporaryFile()
            osfile = apr_os_file_t(get_osfhandle(self.tempfile.fileno()))
        apr_os_file_put(byref(self._as_parameter_), byref(osfile),
                        APR_CREATE | APR_WRITE | APR_BINARY, self.pool)

    def flush(self):
        """Flush output to the underlying Python object"""
        if self.tempfile:
            self.tempfile.seek(0)
            copyfileobj(self.tempfile, self.pyfile)
            self.tempfile.truncate(0)

    def close(self):
        """Close the APR file wrapper, leaving the underlying Python object
           untouched"""
        self.flush()
        if self.tempfile:
            self.tempfile.close()
            self.tempfile = None
        self.pool.destroy()
        self.pool = None

    def __del__(self):
        if self.pool:
            self.close()
Пример #28
0
class APRFile(object):
    """Wrap a Python file-like object as an APR File"""

    def __init__(self, pyfile):
        self.pyfile = pyfile
        self.pool = Pool()
        self._as_parameter_ = POINTER(apr_file_t)()
        self.tempfile = None
        if hasattr(pyfile, "fileno"):
            # Looks like this is a real file. We can just write
            # directly to said file
            osfile = apr_os_file_t(get_osfhandle(pyfile.fileno()))
        else:
            # Looks like this is a StringIO buffer or a fake file.
            # Write to a temporary file and copy the output to the
            # buffer when we are closed or flushed
            self.tempfile = TemporaryFile()
            osfile = apr_os_file_t(get_osfhandle(self.tempfile.fileno()))
        apr_os_file_put(byref(self._as_parameter_), byref(osfile),
                        APR_CREATE | APR_WRITE | APR_BINARY, self.pool)

    def flush(self):
        """Flush output to the underlying Python object"""
        if self.tempfile:
            self.tempfile.seek(0)
            copyfileobj(self.tempfile, self.pyfile)
            self.tempfile.truncate(0)

    def close(self):
        """Close the APR file wrapper, leaving the underlying Python object
           untouched"""
        self.flush()
        if self.tempfile:
            self.tempfile.close()
            self.tempfile = None
        self.pool.destroy()
        self.pool = None

    def __del__(self):
        if self.pool:
            self.close()
Пример #29
0
	def _open_stream(self, path, mode, method=None):
        """Open a stream through the holland compression api, relative to
        this instance's target directory
        """
        compression_method = method or self.config['compression']['method']
        compression_level = self.config['compression']['level']
        compression_options = self.config['compression']['options']
        stream = open_stream(path,
                             mode,
                             compression_method,
                             compression_level,
                             extra_args=compression_options)
        return stream

	def backup(self):
		if self.dry_run:
			return
		if not os.path.exists(self.config['tar']['directory'])
		 or not os.path.isdir(self.config['tar']['directory']):
			raise BackupError('{0} is not a directory!'.format(self.config['tar']['directory']))
		out_name = "{0}.tar".format(
			self.config['tar']['directory'].lstrip('/').replace('/', '_'))
		outfile = os.path.join(self.target_directory, out_name)
		args = ['tar', 'c', self.config['tar']['directory']]
		errlog = TemporaryFile()
		stream = self._open_stream(outfile, 'w')
		LOG.info("Executing: %s", list2cmdline(args))
		pid = Popen(
			args,
			stdout=stream.fileno(),
			stderr=errlog.fileno(),
			close_fds=True)
		status = pid.wait()
		try:
			errlog.flush()
			errlog.seek(0)
			for line in errlog:
				LOG.error("%s[%d]: %s", list2cmdline(args), pid.pid, line.rstrip())
		finally:
			errlog.close()
Пример #30
0
    def __init__(self, stream, length, _shared=None):
        """
        :param stream:  THE STREAM WE WILL GET THE BYTES FROM
        :param length:  THE MAX NUMBER OF BYTES WE ARE EXPECTING
        :param _shared: FOR INTERNAL USE TO SHARE THE BUFFER
        :return:
        """
        self.position = 0
        file_ = TemporaryFile()
        if not _shared:
            self.shared = Dict(
                length=length,
                locker=Lock(),
                stream=stream,
                done_read=0,
                file=file_,
                buffer=mmap(file_.fileno(), length)
            )
        else:
            self.shared = _shared

        self.shared.ref_count += 1
Пример #31
0
def main(argv: List[str]) -> int:
    # Lightly parse the arguments so we know what to do.
    args, unknown_args = ArgumentParser().parse_known_args(argv)
    if not args.file:
        config_file = "mzcompose.yml"
    elif len(args.file) > 1:
        print("mzcompose: multiple -f/--file options are not yet supported",
              file=sys.stderr)
        return 1
    else:
        config_file = args.file[0]

    def say(s: str) -> None:
        if not args.mz_quiet:
            print(s)

    root = Path(os.environ["MZ_ROOT"])
    repo = mzbuild.Repository(root)

    if args.command == "gen-shortcuts":
        return gen_shortcuts(repo)

    # Determine what images this particular compose file depends upon.
    say("==> Collecting mzbuild dependencies")
    images = []
    with open(config_file) as f:
        compose = yaml.safe_load(f)
        for config in compose["services"].values():
            if "mzbuild" in config:
                image_name = config["mzbuild"]
                del config["mzbuild"]

                if image_name not in repo.images:
                    print("mzcompose: unknown image {}".format(image_name),
                          file=sys.stderr)
                    return 1

                image = repo.images[image_name]
                override_tag = os.environ.get(
                    "MZBUILD_{}_TAG".format(image.env_var_name()), None)
                if override_tag is not None:
                    config["image"] = image.docker_name(override_tag)
                    print("mzcompose: warning: overriding {} image to tag {}".
                          format(image_name, override_tag, file=sys.stderr))
                else:
                    config["image"] = image.spec()
                    images.append(image)

            if "propagate-uid-gid" in config:
                config["user"] = "******".format(os.getuid(), os.getgid())
                del config["propagate-uid-gid"]

    deps = repo.resolve_dependencies(images)
    for d in deps:
        say(d.spec())

    # Check if the command is going to create or start containers, and if so
    # build the dependencies. This can be slow, so we don't want to do it if we
    # can help it (e.g., for `down` or `ps`).
    if args.command in ["create", "run", "start", "up"]:
        deps.acquire()

    # Construct a configuration that will point Docker Compose at the correct
    # images.
    tempfile = TemporaryFile()
    os.set_inheritable(tempfile.fileno(), True)
    yaml.dump(compose, tempfile, encoding="utf-8")  # type: ignore
    tempfile.flush()
    tempfile.seek(0)

    # Hand over control to Docker Compose.
    say("==> Delegating to Docker Compose")
    dc_args = [
        "docker-compose",
        "-f",
        "/dev/fd/{}".format(tempfile.fileno()),
        "--project-directory",
        args.project_directory or str(Path(config_file).parent),
        *unknown_args,
        *([args.command] if args.command is not None else []),
        *args.extra,
    ]
    os.execvp("docker-compose", dc_args)
Пример #32
0
class _LeptonicaErrorTrap:
    """
    Context manager to trap errors reported by Leptonica.

    Leptonica's error return codes don't provide much informatino about what
    went wrong. Leptonica does, however, write more detailed errors to stderr
    (provided this is not disabled at compile time). The Leptonica source
    code is very consistent in its use of macros to generate errors.

    This context manager redirects stderr to a temporary file which is then
    read and parsed for error messages.  As a side benefit, debug messages
    from Leptonica are also suppressed.

    """
    def __init__(self):
        self.tmpfile = None
        self.copy_of_stderr = -1
        self.no_stderr = False

    def __enter__(self):
        from io import UnsupportedOperation

        self.tmpfile = TemporaryFile()

        # Save the old stderr, and redirect stderr to temporary file
        with suppress(AttributeError):
            sys.stderr.flush()
        try:
            self.copy_of_stderr = os.dup(sys.stderr.fileno())
            os.dup2(self.tmpfile.fileno(),
                    sys.stderr.fileno(),
                    inheritable=False)
        except AttributeError:
            # We are in some unusual context where our Python process does not
            # have a sys.stderr. Leptonica still expects to write to file
            # descriptor 2, so we are going to ensure it is redirected.
            self.copy_of_stderr = None
            self.no_stderr = True
            os.dup2(self.tmpfile.fileno(), 2, inheritable=False)
        except UnsupportedOperation:
            self.copy_of_stderr = None
        return

    def __exit__(self, exc_type, exc_value, traceback):
        # Restore old stderr
        with suppress(AttributeError):
            sys.stderr.flush()

        if self.copy_of_stderr is not None:
            os.dup2(self.copy_of_stderr, sys.stderr.fileno())
            os.close(self.copy_of_stderr)
        if self.no_stderr:
            os.close(2)

        # Get data from tmpfile
        self.tmpfile.seek(
            0)  # Cursor will be at end, so move back to beginning
        leptonica_output = self.tmpfile.read().decode(errors='replace')
        self.tmpfile.close()
        # If there are Python errors, record them
        if exc_type:
            logger.warning(leptonica_output)

        # If there are Leptonica errors, wrap them in Python excpetions
        if 'Error' in leptonica_output:
            if 'image file not found' in leptonica_output:
                raise FileNotFoundError()
            if 'pixWrite: stream not opened' in leptonica_output:
                raise LeptonicaIOError()
            if 'index not valid' in leptonica_output:
                raise IndexError()
            raise LeptonicaError(leptonica_output)

        return False
Пример #33
0
class Composition:
    """A parsed mzcompose.yml with a loaded mzcompose.py file."""

    @dataclass
    class TestResult:
        duration: float
        error: Optional[str]

    def __init__(
        self,
        repo: mzbuild.Repository,
        name: str,
        preserve_ports: bool = False,
        silent: bool = False,
        munge_services: bool = True,
    ):
        self.name = name
        self.description = None
        self.repo = repo
        self.preserve_ports = preserve_ports
        self.silent = silent
        self.workflows: Dict[str, Callable[..., None]] = {}
        self.test_results: OrderedDict[str, Composition.TestResult] = OrderedDict()

        if name in self.repo.compositions:
            self.path = self.repo.compositions[name]
        else:
            raise UnknownCompositionError(name)

        # load the mzcompose.yml file, if one exists
        mzcompose_yml = self.path / "mzcompose.yml"
        if mzcompose_yml.exists():
            with open(mzcompose_yml) as f:
                compose = yaml.safe_load(f) or {}
        else:
            compose = {}

        self.compose = compose

        if "version" not in compose:
            compose["version"] = "3.7"

        if "services" not in compose:
            compose["services"] = {}

        # Load the mzcompose.py file, if one exists
        mzcompose_py = self.path / "mzcompose.py"
        if mzcompose_py.exists():
            spec = importlib.util.spec_from_file_location("mzcompose", mzcompose_py)
            assert spec
            module = importlib.util.module_from_spec(spec)
            assert isinstance(spec.loader, importlib.abc.Loader)
            spec.loader.exec_module(module)
            self.description = inspect.getdoc(module)
            for name, fn in getmembers(module, isfunction):
                if name.startswith("workflow_"):
                    # The name of the workflow is the name of the function
                    # with the "workflow_" prefix stripped and any underscores
                    # replaced with dashes.
                    name = name[len("workflow_") :].replace("_", "-")
                    self.workflows[name] = fn

            for python_service in getattr(module, "SERVICES", []):
                name = python_service.name
                if name in compose["services"]:
                    raise UIError(f"service {name!r} specified more than once")
                compose["services"][name] = python_service.config

        # Add default volumes
        compose.setdefault("volumes", {}).update(
            {
                "mzdata": None,
                "pgdata": None,
                "mydata": None,
                "tmp": None,
                "secrets": None,
            }
        )

        # The CLI driver will handle acquiring these dependencies.
        if munge_services:
            self.dependencies = self._munge_services(compose["services"].items())

        # Emit the munged configuration to a temporary file so that we can later
        # pass it to Docker Compose.
        self.file = TemporaryFile(mode="w")
        os.set_inheritable(self.file.fileno(), True)
        self._write_compose()

    def _munge_services(
        self, services: List[Tuple[str, dict]]
    ) -> mzbuild.DependencySet:
        images = []

        for name, config in services:
            # Remember any mzbuild references.
            if "mzbuild" in config:
                image_name = config["mzbuild"]
                if image_name not in self.repo.images:
                    raise UIError(f"mzcompose: unknown image {image_name}")
                image = self.repo.images[image_name]
                images.append(image)

            if "propagate_uid_gid" in config:
                if config["propagate_uid_gid"]:
                    config["user"] = f"{os.getuid()}:{os.getgid()}"
                del config["propagate_uid_gid"]

            ports = config.setdefault("ports", [])
            for i, port in enumerate(ports):
                if self.preserve_ports and not ":" in str(port):
                    # If preserving ports, bind the container port to the same
                    # host port, assuming the host port is available.
                    ports[i] = f"{port}:{port}"
                elif ":" in str(port) and not config.get("allow_host_ports", False):
                    # Raise an error for host-bound ports, unless
                    # `allow_host_ports` is `True`
                    raise UIError(
                        "programming error: disallowed host port in service {name!r}",
                        hint=f'Add `"allow_host_ports": True` to the service config to disable this check.',
                    )

            if "allow_host_ports" in config:
                config.pop("allow_host_ports")

            if self.repo.rd.coverage:
                # Emit coverage information to a file in a directory that is
                # bind-mounted to the "coverage" directory on the host. We
                # inject the configuration to all services for simplicity, but
                # this only have an effect if the service runs instrumented Rust
                # binaries.
                config.setdefault("environment", []).append(
                    f"LLVM_PROFILE_FILE=/coverage/{name}-%m.profraw"
                )
                config.setdefault("volumes", []).append("./coverage:/coverage")

        # Determine mzbuild specs and inject them into services accordingly.
        deps = self.repo.resolve_dependencies(images)
        for _name, config in services:
            if "mzbuild" in config:
                config["image"] = deps[config["mzbuild"]].spec()
                del config["mzbuild"]

        return deps

    def _write_compose(self) -> None:
        self.file.seek(0)
        self.file.truncate()
        yaml.dump(self.compose, self.file)
        self.file.flush()

    @classmethod
    def lint(cls, repo: mzbuild.Repository, name: str) -> List[LintError]:
        """Checks a composition for common errors."""
        if not name in repo.compositions:
            raise UnknownCompositionError(name)

        errs: List[LintError] = []

        path = repo.compositions[name] / "mzcompose.yml"

        if path.exists():
            with open(path) as f:
                composition = yaml.safe_load(f) or {}

            _lint_composition(path, composition, errs)
        return errs

    def invoke(
        self, *args: str, capture: bool = False, stdin: Optional[str] = None
    ) -> subprocess.CompletedProcess:
        """Invoke `docker-compose` on the rendered composition.

        Args:
            args: The arguments to pass to `docker-compose`.
            capture: Whether to capture the child's stdout stream.
            input: A string to provide as stdin for the command.
        """

        if not self.silent:
            print(f"$ docker-compose {' '.join(args)}", file=sys.stderr)

        self.file.seek(0)

        stdout = None
        if capture:
            stdout = subprocess.PIPE

        try:
            return subprocess.run(
                [
                    "docker-compose",
                    *(["--log-level=ERROR"] if self.silent else []),
                    f"-f/dev/fd/{self.file.fileno()}",
                    "--project-directory",
                    self.path,
                    *args,
                ],
                close_fds=False,
                check=True,
                stdout=stdout,
                input=stdin,
                text=True,
            )
        except subprocess.CalledProcessError as e:
            if e.stdout:
                print(e.stdout)
            raise UIError(f"running docker-compose failed (exit status {e.returncode})")

    def port(self, service: str, private_port: Union[int, str]) -> int:
        """Get the public port for a service's private port.

        Delegates to `docker-compose port`. See that command's help for details.

        Args:
            service: The name of a service in the composition.
            private_port: A private port exposed by the service.
        """
        proc = self.invoke("port", service, str(private_port), capture=True)
        if not proc.stdout.strip():
            raise UIError(
                f"service f{service!r} is not exposing port {private_port!r}",
                hint="is the service running?",
            )
        return int(proc.stdout.split(":")[1])

    def default_port(self, service: str) -> int:
        """Get the default public port for a service.

        Args:
            service: The name of a service in the composition.
        """
        ports = self.compose["services"][service]["ports"]
        if not ports:
            raise UIError(f"service f{service!r} does not expose any ports")
        private_port = str(ports[0]).split(":")[0]
        return self.port(service, private_port)

    def workflow(self, name: str, *args: str) -> None:
        """Run a workflow in the composition.

        Raises a `KeyError` if the workflow does not exist.

        Args:
            name: The name of the workflow to run.
            args: The arguments to pass to the workflow function.
        """
        ui.header(f"Running workflow {name}")
        func = self.workflows[name]
        parser = WorkflowArgumentParser(name, inspect.getdoc(func), list(args))
        if len(inspect.signature(func).parameters) > 1:
            func(self, parser)
        else:
            # If the workflow doesn't have an `args` parameter, parse them here
            # with an empty parser to reject bogus arguments and to handle the
            # trivial help message.
            parser.parse_args()
            func(self)

    @contextmanager
    def override(self, *services: "Service") -> Iterator[None]:
        """Temporarily update the composition with the specified services.

        The services must already exist in the composition. They restored to
        their old definitions when the `with` block ends. Note that the service
        definition is written in its entirety; i.e., the configuration is not
        deep merged but replaced wholesale.

        Lest you are tempted to change this function to allow dynamically
        injecting new services: do not do this! These services will not be
        visible to other commands, like `mzcompose run`, `mzcompose logs`, or
        `mzcompose down`, which makes debugging or inspecting the composition
        challenging.
        """
        # Remember the old composition.
        old_compose = copy.deepcopy(self.compose)

        # Update the composition with the new service definitions.
        deps = self._munge_services([(s.name, cast(dict, s.config)) for s in services])
        for service in services:
            self.compose["services"][service.name] = service.config

        # Re-acquire dependencies, as the override may have swapped an `image`
        # config for an `mzbuild` config.
        deps.acquire()

        self._write_compose()

        # Ensure image freshness
        self.pull_if_variable([service.name for service in services])

        try:
            # Run the next composition.
            yield
        finally:
            # Restore the old composition.
            self.compose = old_compose
            self._write_compose()

    @contextmanager
    def test_case(self, name: str) -> Iterator[None]:
        """Execute a test case.

        This context manager provides a very lightweight testing framework. If
        the body of the context manager raises an exception, the test case is
        considered to have failed; otherwise it is considered to have succeeded.
        In either case the execution time and status of the test are recorded in
        `test_results`.

        Example:
            A simple workflow that executes a table-driven test:

            ```
            @dataclass
            class TestCase:
                name: str
                files: list[str]

            test_cases = [
                TestCase(name="short", files=["quicktests.td"]),
                TestCase(name="long", files=["longtest1.td", "longtest2.td"]),
            ]

            def workflow_default(c: Composition):
                for tc in test_cases:
                    with c.test_case(tc.name):
                        c.run("testdrive", *tc.files)
            ```

        Args:
            name: The name of the test case. Must be unique across the lifetime
                of a composition.
        """
        if name in self.test_results:
            raise UIError(f"test case {name} executed twice")
        ui.header(f"Running test case {name}")
        error = None
        start_time = time.time()
        try:
            yield
            ui.header(f"mzcompose: test case {name} succeeded")
        except Exception as e:
            error = str(e)
            if isinstance(e, UIError):
                print(f"mzcompose: test case {name} failed: {e}", file=sys.stderr)
            else:
                print(f"mzcompose: test case {name} failed:", file=sys.stderr)
                traceback.print_exc()
        elapsed = time.time() - start_time
        self.test_results[name] = Composition.TestResult(elapsed, error)

    def sql_cursor(self) -> Cursor:
        """Get a cursor to run SQL queries against the materialized service."""
        port = self.default_port("materialized")
        conn = pg8000.connect(host="localhost", user="******", port=port)
        conn.autocommit = True
        return conn.cursor()

    def sql(self, sql: str) -> None:
        """Run a batch of SQL statements against the materialized service."""
        with self.sql_cursor() as cursor:
            for statement in sqlparse.split(sql):
                print(f"> {statement}")
                cursor.execute(statement)

    def sql_query(self, sql: str) -> Any:
        """Execute and return results of a SQL query."""
        with self.sql_cursor() as cursor:
            cursor.execute(sql)
            return cursor.fetchall()

    def create_cluster(
        self,
        cluster: List,
        cluster_name: str = "cluster1",
        replica_name: str = "replica1",
    ) -> None:
        """Construct and run a CREATE CLUSTER statement based a list of Computed instances

        Args:
            cluster: a List of Computed instances that will form the cluster
            cluster_name: The cluster name to use
            replica_name: The replica name to use
        """
        self.sql(
            f"CREATE CLUSTER {cluster_name} REPLICAS ( {replica_name} ( REMOTE ["
            + ", ".join(f'"{p.name}:2100"' for p in cluster)
            + "]))"
        )

    def start_and_wait_for_tcp(self, services: List[str]) -> None:
        """Sequentially start the named services, waiting for eaach to become
        available via TCP before moving on to the next."""
        for service in services:
            self.up(service)
            for port in self.compose["services"][service].get("ports", []):
                self.wait_for_tcp(host=service, port=port)

    def run(
        self,
        service: str,
        *args: str,
        detach: bool = False,
        rm: bool = False,
        env_extra: Dict[str, str] = {},
        capture: bool = False,
        stdin: Optional[str] = None,
    ) -> subprocess.CompletedProcess:
        """Run a one-off command in a service.

        Delegates to `docker-compose run`. See that command's help for details.
        Note that unlike `docker compose run`, any services whose definitions
        have changed are rebuilt (like `docker-compose up` would do) before the
        command is executed.

        Args:
            service: The name of a service in the composition.
            args: Arguments to pass to the service's entrypoint.
            detach: Run the container in the background.
            stdin: read STDIN from a string.
            env_extra: Additional environment variables to set in the container.
            rm: Remove container after run.
            capture: Capture the stdout of the `docker-compose` invocation.
        """
        # Restart any dependencies whose definitions have changed. The trick,
        # taken from Buildkite's Docker Compose plugin, is to run an `up`
        # command that requests zero instances of the requested service.
        self.invoke("up", "--detach", "--scale", f"{service}=0", service)
        return self.invoke(
            "run",
            *(f"-e{k}={v}" for k, v in env_extra.items()),
            *(["--detach"] if detach else []),
            *(["--rm"] if rm else []),
            service,
            *args,
            capture=capture,
            stdin=stdin,
        )

    def exec(
        self,
        service: str,
        *args: str,
        detach: bool = False,
        capture: bool = False,
        stdin: Optional[str] = None,
    ) -> subprocess.CompletedProcess:
        """Execute a one-off command in a service's running container

        Delegates to `docker-compose exec`.

        Args:
            service: The service whose container will be used.
            command: The command to run.
            args: Arguments to pass to the command.
            detach: Run the container in the background.
            stdin: read STDIN from a string.
        """

        return self.invoke(
            "exec",
            *(["--detach"] if detach else []),
            "-T",
            service,
            *(
                self.compose["services"][service]["entrypoint"]
                if "entrypoint" in self.compose["services"][service]
                else []
            ),
            *args,
            capture=capture,
            stdin=stdin,
        )

    def pull_if_variable(self, services: List[str]) -> None:
        """Pull fresh service images in case the tag indicates thee underlying image may change over time.

        Args:
            services: List of service names
        """

        for service in services:
            if "image" in self.compose["services"][service] and any(
                self.compose["services"][service]["image"].endswith(tag)
                for tag in [":latest", ":unstable", ":rolling"]
            ):
                self.invoke("pull", service)

    def up(self, *services: str, detach: bool = True, persistent: bool = False) -> None:
        """Build, (re)create, and start the named services.

        Delegates to `docker-compose up`. See that command's help for details.

        Args:
            services: The names of services in the composition.
            detach: Run containers in the background.
            persistent: Replace the container's entrypoint and command with
                `sleep infinity` so that additional commands can be scheduled
                on the container with `Composition.exec`.
        """
        if persistent:
            old_compose = copy.deepcopy(self.compose)
            for service in self.compose["services"].values():
                service["entrypoint"] = ["sleep", "infinity"]
                service["command"] = []
            self._write_compose()

        self.invoke("up", *(["--detach"] if detach else []), *services)

        if persistent:
            self.compose = old_compose
            self._write_compose()

    def down(self, destroy_volumes: bool = True, remove_orphans: bool = True) -> None:
        """Stop and remove resources.

        Delegates to `docker-compose down`. See that command's help for details.

        Args:
            destroy_volumes: Remove named volumes and anonymous volumes attached
                to containers.
        """
        self.invoke(
            "down",
            *(["--volumes"] if destroy_volumes else []),
            *(["--remove-orphans"] if remove_orphans else []),
        )

    def stop(self, *services: str) -> None:
        """Stop the docker containers for the named services.

        Delegates to `docker-compose stop`. See that command's help for details.

        Args:
            services: The names of services in the composition.
        """
        self.invoke("stop", *services)

    def kill(self, *services: str, signal: str = "SIGKILL") -> None:
        """Force stop service containers.

        Delegates to `docker-compose kill`. See that command's help for details.

        Args:
            services: The names of services in the composition.
            signal: The signal to deliver.
        """
        self.invoke("kill", f"-s{signal}", *services)

    def pause(self, *services: str) -> None:
        """Pause service containers.

        Delegates to `docker-compose pause`. See that command's help for details.

        Args:
            services: The names of services in the composition.
        """
        self.invoke("pause", *services)

    def unpause(self, *services: str) -> None:
        """Unpause service containers

        Delegates to `docker-compose unpause`. See that command's help for details.

        Args:
            services: The names of services in the composition.
        """
        self.invoke("unpause", *services)

    def rm(
        self, *services: str, stop: bool = True, destroy_volumes: bool = True
    ) -> None:
        """Remove stopped service containers.

        Delegates to `docker-compose rm`. See that command's help for details.

        Args:
            services: The names of services in the composition.
            stop: Stop the containers if necessary.
            destroy_volumes: Destroy any anonymous volumes associated with the
                service. Note that this does not destroy any named volumes
                attached to the service.
        """
        self.invoke(
            "rm",
            "--force",
            *(["--stop"] if stop else []),
            *(["-v"] if destroy_volumes else []),
            *services,
        )

    def rm_volumes(self, *volumes: str, force: bool = False) -> None:
        """Remove the named volumes.

        Args:
            volumes: The names of volumes in the composition.
            force: Whether to force the removal (i.e., don't error if the
                volume does not exist).
        """
        volumes = (f"{self.name}_{v}" for v in volumes)
        spawn.runv(
            ["docker", "volume", "rm", *(["--force"] if force else []), *volumes]
        )

    def sleep(self, duration: float) -> None:
        """Sleep for the specified duration in seconds."""
        print(f"Sleeping for {duration} seconds...")
        time.sleep(duration)

    # TODO(benesch): replace with Docker health checks.
    def wait_for_tcp(
        self,
        *,
        host: str = "localhost",
        port: Union[int, str],
        timeout_secs: int = 240,
    ) -> None:
        if isinstance(port, str):
            port = int(port.split(":")[0])
        ui.progress(f"waiting for {host}:{port}", "C")
        cmd = f"docker run --rm -t --network {self.name}_default ubuntu:focal-20210723".split()
        try:
            _check_tcp(cmd[:], host, port, timeout_secs)
        except subprocess.CalledProcessError:
            ui.progress(" error!", finish=True)
            raise UIError(f"unable to connect to {host}:{port}")
        else:
            ui.progress(" success!", finish=True)

    # TODO(benesch): replace with Docker health checks.
    def wait_for_postgres(
        self,
        *,
        dbname: str = "postgres",
        port: Optional[int] = None,
        host: str = "localhost",
        timeout_secs: int = 120,
        query: str = "SELECT 1",
        user: str = "postgres",
        password: str = "postgres",
        expected: Union[Iterable[Any], Literal["any"]] = [[1]],
        print_result: bool = False,
        service: str = "postgres",
    ) -> None:
        """Wait for a PostgreSQL service to start.

        Args:
            dbname: the name of the database to wait for
            host: the host postgres is listening on
            port: the port postgres is listening on
            timeout_secs: How long to wait for postgres to be up before failing (Default: 30)
            query: The query to execute to ensure that it is running (Default: "Select 1")
            user: The chosen user (this is only relevant for postgres)
            service: The service that postgres is running as (Default: postgres)
        """
        _wait_for_pg(
            dbname=dbname,
            host=host,
            port=self.port(service, port) if port else self.default_port(service),
            timeout_secs=timeout_secs,
            query=query,
            user=user,
            password=password,
            expected=expected,
            print_result=print_result,
        )

    # TODO(benesch): replace with Docker health checks.
    def wait_for_materialized(
        self,
        service: str = "materialized",
        *,
        user: str = "materialize",
        dbname: str = "materialize",
        host: str = "localhost",
        port: Optional[int] = None,
        timeout_secs: int = 60,
        query: str = "SELECT 1",
        expected: Union[Iterable[Any], Literal["any"]] = [[1]],
        print_result: bool = False,
    ) -> None:
        """Like `Workflow.wait_for_postgres`, but with Materialize defaults."""
        self.wait_for_postgres(
            user=user,
            dbname=dbname,
            host=host,
            port=port,
            timeout_secs=timeout_secs,
            query=query,
            expected=expected,
            print_result=print_result,
            service=service,
        )

    def testdrive(
        self,
        input: str,
        service: str = "testdrive",
        persistent: bool = True,
        args: List[str] = [],
    ) -> None:
        """Run a string as a testdrive script.

        Args:
            args: Additional arguments to pass to testdrive
            service: Optional name of the testdrive service to use.
            input: The string to execute.
            persistent: Whether a persistent testdrive container will be used.
        """

        if persistent:
            self.exec(service, *args, stdin=input)
        else:
            self.run(service, *args, stdin=input)
Пример #34
0
    def run(self, params):
        if self.datastore.get_one('users', ('username', '=', params.get('user'))) is None:
            raise TaskException(
                errno.ENOENT, 'User {0} does not exist'.format(params.get('user'))
            )

        self.message = 'Starting Rsync Task'
        self.set_progress(0)
        with open(os.path.join(params['path'], '.lock'), 'wb+') as lockfile:
            # Lets try and get a lock on this path for the rsync task
            # but do not freak out if you do not get it
            try:
                flock(lockfile, LOCK_EX | LOCK_NB)
            except IOError:
                logger.warning('Rsync Task could not get a lock on {0}'.format(params['path']))

            # Execute Rsync Task here
            line = '/usr/local/bin/rsync --info=progress2 -h'
            rsync_properties = params.get('rsync_properties')
            if rsync_properties:
                if rsync_properties.get('recursive'):
                    line += ' -r'
                if rsync_properties.get('times'):
                    line += ' -t'
                if rsync_properties.get('compress'):
                    line += ' -z'
                if rsync_properties.get('archive'):
                    line += ' -a'
                if rsync_properties.get('preserve_permissions'):
                    line += ' -p'
                if rsync_properties.get('preserve_attributes'):
                    line += ' -X'
                if rsync_properties.get('delete'):
                    line += ' --delete-delay'
                if rsync_properties.get('delay_updates'):
                    line += ' --delay-updates'
                if rsync_properties.get('extra'):
                    line += ' {0}'.format(rsync_properties.get('extra'))

            remote_host = params.get('remote_host')
            remote_address = ''
            if '@' in remote_host:
                remote_address = remote_host
            else:
                remote_user = params.get('remote_user', params.get('user'))
                remote_address = '"{0}"@{1}'.format(remote_user, remote_host)

            if params.get('rsync_mode') == 'MODULE':
                if params.get('rsync_direction') == 'PUSH':
                    line += ' "{0}" {1}::"{2}"'.format(
                        params.get('path'),
                        remote_address,
                        params.get('remote_module'),
                    )
                else:
                    line += ' {0}::"{1}" "{2}"'.format(
                        remote_address,
                        params.get('remote_module'),
                        params.get('rsync_path'),
                    )
            else:
                line += ' -e "ssh -p {0} -o BatchMode=yes -o StrictHostKeyChecking=yes"'.format(
                    params.get('remote_ssh_port', 22)
                )
                if params.get('rsync_direction') == 'PUSH':
                    line += ' "{0}" {1}:\\""{2}"\\"'.format(
                        params.get('path'),
                        remote_address,
                        params.get('remote_path'),
                    )
                else:
                    line += ' {0}:\\""{1}"\\" "{2}"'.format(
                        remote_address,
                        params.get('remote_path'),
                        params.get('path'),
                    )

            if params.get('quiet'):
                line += ' > /dev/null 2>&1'

            # Starting rsync subprocess
            logger.debug('Rsync Copy Task Command: {0}'.format(line))
            # It would be nice to get the progess but not at the cost of
            # killing this task!

            # Note this TemporaryFile hack for the subprocess stdout is needed
            # because setting Popen's `stdout=subprocess.PIPE` does not allow
            # that sstdout to be seeked on. subprocess.PIPE only allows for
            # readline() and such read methods. stdout.readline() does not
            # allow for us to catch rsync's in-place progress updates which
            # are done with the '\r' character. It is also auto garbage collected.
            proc_stdout = TemporaryFile(mode='w+b', buffering=0)
            try:
                rsync_proc = subprocess.Popen(
                    line,
                    stdout=proc_stdout.fileno(),
                    stderr=subprocess.PIPE,
                    shell=True,
                    bufsize=0,
                    preexec_fn=demote(params.get('user'))
                )
                self.message = 'Executing Rsync Command'
                seek = 0
                old_seek = 0
                while rsync_proc.poll() is None:
                    proc_output = ''
                    proc_stdout.seek(seek)
                    try:
                        while True:
                            op_byte = proc_stdout.read(1).decode('utf8')
                            if op_byte == '':
                                # In this case break before incrementing `seek`
                                break
                            seek += 1
                            if op_byte == '\r':
                                break
                            proc_output += op_byte
                            seek += 1
                        if old_seek != seek:
                            old_seek = seek
                            self.message = proc_output.strip()
                            proc_output = proc_output.split(' ')
                            progress = [x for x in proc_output if '%' in x]
                            if len(progress):
                                self.set_progress(int(progress[0][:-1]))
                    except Exception as e:
                        # Catch IOERROR Errno 9 which usually arises because
                        # of already closed fileobject being used here therby
                        # raising Bad File Descriptor error. In this case break
                        # and the outer while loop will check for rsync_proc.poll()
                        # to be None or not and DTRT
                        if e.errno == 9:
                            break
                        logger.debug("Parsing error in rsync task: {0}".format(str(e)))
            except Exception as e:
                flock(lockfile, LOCK_UN)
                self.message = 'Rsync Task Failed'
                raise TaskException(
                    errno.EIO,
                    'Rsync Task failed because of Error: {0}'.format(str(e))
                )
            if rsync_proc.returncode != 0:
                self.message = 'Rsync Task Failed'
                raise TaskException(
                    errno.EIO,
                    'Rsync Task returned with non-zero returncode. Error: {0}'.format(
                        rsync_proc.stderr.read())
                )
            # Finally lets unlock that lockfile, it does not fail
            # even if did not acquire the lock in the first place
            flock(lockfile, LOCK_UN)
            self.message = 'Rsync Task Successfully Completed'
            self.set_progress(100)
Пример #35
0
class Composition:
    """A parsed mzcompose.yml with a loaded mzcompose.py file."""

    def __init__(
        self, repo: mzbuild.Repository, name: str, preserve_ports: bool = False
    ):
        self.name = name
        self.repo = repo
        self.images: List[mzbuild.Image] = []
        self.workflows: Dict[str, Callable[..., None]] = {}

        self.default_tag = os.getenv(f"MZBUILD_TAG", None)

        if name in self.repo.compositions:
            self.path = self.repo.compositions[name]
        else:
            raise UnknownCompositionError(name)

        # load the mzcompose.yml file, if one exists
        mzcompose_yml = self.path / "mzcompose.yml"
        if mzcompose_yml.exists():
            with open(mzcompose_yml) as f:
                compose = yaml.safe_load(f) or {}
        else:
            compose = {}

        self.compose = compose

        if "version" not in compose:
            compose["version"] = "3.7"

        if "services" not in compose:
            compose["services"] = {}

        # Load the mzcompose.py file, if one exists
        mzcompose_py = self.path / "mzcompose.py"
        if mzcompose_py.exists():
            spec = importlib.util.spec_from_file_location("mzcompose", mzcompose_py)
            assert spec
            module = importlib.util.module_from_spec(spec)
            assert isinstance(spec.loader, importlib.abc.Loader)
            spec.loader.exec_module(module)
            for name, fn in getmembers(module, isfunction):
                if name.startswith("workflow_"):
                    # The name of the workflow is the name of the function
                    # with the "workflow_" prefix stripped and any underscores
                    # replaced with dashes.
                    name = name[len("workflow_") :].replace("_", "-")
                    self.workflows[name] = fn

            for python_service in getattr(module, "SERVICES", []):
                compose["services"][python_service.name] = python_service.config

        for name, config in compose["services"].items():
            if "propagate_uid_gid" in config:
                if config["propagate_uid_gid"]:
                    config["user"] = f"{os.getuid()}:{os.getgid()}"
                del config["propagate_uid_gid"]

            ports = config.setdefault("ports", [])
            for i, port in enumerate(ports):
                if ":" in str(port):
                    raise UIError(
                        "programming error: disallowed host port in service {name!r}"
                    )
                if preserve_ports:
                    # If preserving ports, bind the container port to the same
                    # host port.
                    ports[i] = f"{port}:{port}"

            if self.repo.rd.coverage:
                # Emit coverage information to a file in a directory that is
                # bind-mounted to the "coverage" directory on the host. We
                # inject the configuration to all services for simplicity, but
                # this only have an effect if the service runs instrumented Rust
                # binaries.
                config.setdefault("environment", []).append(
                    f"LLVM_PROFILE_FILE=/coverage/{name}-%m.profraw"
                )
                config.setdefault("volumes", []).append("./coverage:/coverage")

        # Add default volumes
        compose.setdefault("volumes", {}).update(
            {
                "mzdata": None,
                "tmp": None,
                "secrets": None,
            }
        )

        self._resolve_mzbuild_references()

        # Emit the munged configuration to a temporary file so that we can later
        # pass it to Docker Compose.
        self.file = TemporaryFile()
        os.set_inheritable(self.file.fileno(), True)
        self._write_compose()

    def _resolve_mzbuild_references(self) -> None:
        # Resolve all services that reference an `mzbuild` image to a specific
        # `image` reference.
        for name, config in self.compose["services"].items():
            if "mzbuild" in config:
                image_name = config["mzbuild"]

                if image_name not in self.repo.images:
                    raise UIError(f"mzcompose: unknown image {image_name}")

                image = self.repo.images[image_name]
                override_tag = os.getenv(
                    f"MZBUILD_{image.env_var_name()}_TAG", self.default_tag
                )
                if override_tag is not None:
                    config["image"] = image.docker_name(override_tag)
                    print(
                        f"mzcompose: warning: overriding {image_name} image to tag {override_tag}",
                        file=sys.stderr,
                    )
                    del config["mzbuild"]
                else:
                    self.images.append(image)

        deps = self.repo.resolve_dependencies(self.images)
        for config in self.compose["services"].values():
            if "mzbuild" in config:
                config["image"] = deps[config["mzbuild"]].spec()
                del config["mzbuild"]

    def _write_compose(self) -> None:
        self.file.seek(0)
        self.file.truncate()
        yaml.dump(self.compose, self.file, encoding="utf-8")  # type: ignore
        self.file.flush()

    @classmethod
    def lint(cls, repo: mzbuild.Repository, name: str) -> List[LintError]:
        """Checks a composition for common errors."""
        if not name in repo.compositions:
            raise UnknownCompositionError(name)

        errs: List[LintError] = []

        path = repo.compositions[name] / "mzcompose.yml"

        if path.exists():
            with open(path) as f:
                composition = yaml.safe_load(f) or {}

            _lint_composition(path, composition, errs)
        return errs

    def invoke(self, *args: str, capture: bool = False) -> subprocess.CompletedProcess:
        """Invoke `docker-compose` on the rendered composition.

        Args:
            args: The arguments to pass to `docker-compose`.
            capture: Whether to capture the child's stdout stream.
        """
        print(f"$ docker-compose {' '.join(args)}", file=sys.stderr)

        self.file.seek(0)

        stdout = None
        if capture:
            stdout = subprocess.PIPE

        try:
            return subprocess.run(
                [
                    "docker-compose",
                    f"-f/dev/fd/{self.file.fileno()}",
                    "--project-directory",
                    self.path,
                    *args,
                ],
                close_fds=False,
                check=True,
                stdout=stdout,
                text=True,
            )
        except subprocess.CalledProcessError as e:
            if e.stdout:
                print(e.stdout)
            raise UIError(f"running docker-compose failed (exit status {e.returncode})")

    def port(self, service: str, private_port: Union[int, str]) -> int:
        """Get the public port for a service's private port.

        Delegates to `docker-compose port`. See that command's help for details.

        Args:
            service: The name of a service in the composition.
            private_port: A private port exposed by the service.
        """
        proc = self.invoke("port", service, str(private_port), capture=True)
        if not proc.stdout.strip():
            raise UIError(
                f"service f{service!r} is not exposing port {private_port!r}",
                hint="is the service running?",
            )
        return int(proc.stdout.split(":")[1])

    def default_port(self, service: str) -> int:
        """Get the default public port for a service.

        Args:
            service: The name of a service in the composition.
        """
        ports = self.compose["services"][service]["ports"]
        if not ports:
            raise UIError(f"service f{service!r} does not expose any ports")
        private_port = str(ports[0]).split(":")[0]
        return self.port(service, private_port)

    def workflow(self, name: str, *args: str) -> None:
        """Run a workflow in the composition.

        Raises a `KeyError` if the workflow does not exist.

        Args:
            name: The name of the workflow to run.
            args: The arguments to pass to the workflow function.
        """
        ui.header(f"Running workflow {name}")
        func = self.workflows[name]
        parser = WorkflowArgumentParser(name, inspect.getdoc(func), list(args))
        if len(inspect.signature(func).parameters) > 1:
            func(self, parser)
        else:
            # If the workflow doesn't have an `args` parameter, parse them here
            # with an empty parser to reject bogus arguments and to handle the
            # trivial help message.
            parser.parse_args()
            func(self)

    @contextmanager
    def override(self, *services: "Service") -> Iterator[None]:
        """Temporarily update the composition with the specified services.

        The services must already exist in the composition. They restored to
        their old definitions when the `with` block ends. Note that the service
        definition is written in its entirety; i.e., the configuration is not
        deep merged but replaced wholesale.

        Lest you are tempted to change this function to allow dynamically
        injecting new services: do not do this! These services will not be
        visible to other commands, like `mzcompose run`, `mzcompose logs`, or
        `mzcompose down`, which makes debugging or inspecting the composition
        challenging.
        """
        # Remember the old composition.
        old_compose = copy.deepcopy(self.compose)

        # Update the composition with the new service definitions.
        for service in services:
            if service.name not in self.compose["services"]:
                raise RuntimeError(
                    "programming error in call to Workflow.with_services: "
                    f"{service.name!r} does not exist"
                )
            self.compose["services"][service.name] = service.config
            self._resolve_mzbuild_references()
        self._write_compose()

        try:
            # Run the next composition.
            yield
        finally:
            # Restore the old composition.
            self.compose = old_compose
            self._write_compose()

    def sql(self, sql: str) -> None:
        """Run a batch of SQL statements against the materialized service."""
        port = self.default_port("materialized")
        conn = pg8000.connect(host="localhost", user="******", port=port)
        conn.autocommit = True
        cursor = conn.cursor()
        for statement in sqlparse.split(sql):
            cursor.execute(statement)

    def start_and_wait_for_tcp(self, services: List[str]) -> None:
        """Sequentially start the named services, waiting for eaach to become
        available via TCP before moving on to the next."""
        for service in services:
            self.up(service)
            for port in self.compose["services"][service].get("ports", []):
                self.wait_for_tcp(host=service, port=port)

    def run(
        self,
        service: str,
        *args: str,
        detach: bool = False,
        rm: bool = False,
        env: Dict[str, str] = {},
        capture: bool = False,
    ) -> subprocess.CompletedProcess:
        """Run a one-off command in a service.

        Delegates to `docker-compose run`. See that command's help for details.
        Note that unlike `docker compose run`, any services whose definitions
        have changed are rebuilt (like `docker-compose up` would do) before the
        command is executed.

        Args:
            service: The name of a service in the composition.
            args: Arguments to pass to the service's entrypoint.
            detach: Run the container in the background.
            env: Additional environment variables to set in the container.
            rm: Remove container after run.
            capture: Capture the stdout of the `docker-compose` invocation.
        """
        # Restart any dependencies whose definitions have changed. The trick,
        # taken from Buildkite's Docker Compose plugin, is to run an `up`
        # command that requests zero instances of the requested service.
        self.invoke("up", "--detach", "--scale", f"{service}=0", service)
        return self.invoke(
            "run",
            *(f"-e{k}={v}" for k, v in env.items()),
            *(["--detach"] if detach else []),
            *(["--rm"] if rm else []),
            service,
            *args,
            capture=capture,
        )

    def up(self, *services: str, detach: bool = True) -> None:
        """Build, (re)create, and start the named services.

        Delegates to `docker-compose up`. See that command's help for details.

        Args:
            services: The names of services in the composition.
            detach: Run containers in the background.
        """
        self.invoke("up", *(["--detach"] if detach else []), *services)

    def kill(self, *services: str, signal: str = "SIGKILL") -> None:
        """Force stop service containers.

        Delegates to `docker-compose kill`. See that command's help for details.

        Args:
            services: The names of services in the composition.
            signal: The signal to deliver.
        """
        self.invoke("kill", f"-s{signal}", *services)

    def rm(
        self, *services: str, stop: bool = True, destroy_volumes: bool = True
    ) -> None:
        """Remove stopped service containers.

        Delegates to `docker-compose rm`. See that command's help for details.

        Args:
            services: The names of services in the composition.
            stop: Stop the containers if necessary.
            destroy_volumes: Destroy any anonymous volumes associated with the
                service. Note that this does not destroy any named volumes
                attached to the service.
        """
        self.invoke(
            "rm",
            "--force",
            *(["--stop"] if stop else []),
            *(["-v"] if destroy_volumes else []),
            *services,
        )

    def rm_volumes(self, *volumes: str, force: bool = False) -> None:
        """Remove the named volumes.

        Args:
            volumes: The names of volumes in the composition.
            force: Whether to force the removal (i.e., don't error if the
                volume does not exist).
        """
        volumes = (f"{self.name}_{v}" for v in volumes)
        spawn.runv(
            ["docker", "volume", "rm", *(["--force"] if force else []), *volumes]
        )

    def sleep(self, duration: float) -> None:
        """Sleep for the specified duration in seconds."""
        print(f"Sleeping for {duration} seconds...")
        time.sleep(duration)

    # TODO(benesch): replace with Docker health checks.
    def wait_for_tcp(
        self,
        *,
        host: str = "localhost",
        port: int,
        timeout_secs: int = 240,
    ) -> None:
        ui.progress(f"waiting for {host}:{port}", "C")
        for remaining in ui.timeout_loop(timeout_secs):
            cmd = f"docker run --rm -t --network {self.name}_default ubuntu:focal-20210723".split()

            try:
                _check_tcp(cmd[:], host, port, timeout_secs)
            except subprocess.CalledProcessError:
                ui.progress(" {}".format(int(remaining)))
            else:
                ui.progress(" success!", finish=True)
                return

        ui.progress(" error!", finish=True)
        raise UIError(f"unable to connect to {host}:{port}")

    # TODO(benesch): replace with Docker health checks.
    def wait_for_postgres(
        self,
        *,
        dbname: str = "postgres",
        port: Optional[int] = None,
        host: str = "localhost",
        timeout_secs: int = 120,
        query: str = "SELECT 1",
        user: str = "postgres",
        password: str = "postgres",
        expected: Union[Iterable[Any], Literal["any"]] = [[1]],
        print_result: bool = False,
        service: str = "postgres",
    ) -> None:
        """Wait for a PostgreSQL service to start.

        Args:
            dbname: the name of the database to wait for
            host: the host postgres is listening on
            port: the port postgres is listening on
            timeout_secs: How long to wait for postgres to be up before failing (Default: 30)
            query: The query to execute to ensure that it is running (Default: "Select 1")
            user: The chosen user (this is only relevant for postgres)
            service: The service that postgres is running as (Default: postgres)
        """
        _wait_for_pg(
            dbname=dbname,
            host=host,
            port=port or self.default_port(service),
            timeout_secs=timeout_secs,
            query=query,
            user=user,
            password=password,
            expected=expected,
            print_result=print_result,
        )

    # TODO(benesch): replace with Docker health checks.
    def wait_for_materialized(
        self,
        service: str = "materialized",
        *,
        user: str = "materialize",
        dbname: str = "materialize",
        host: str = "localhost",
        port: Optional[int] = None,
        timeout_secs: int = 60,
        query: str = "SELECT 1",
        expected: Union[Iterable[Any], Literal["any"]] = [[1]],
        print_result: bool = False,
    ) -> None:
        """Like `Workflow.wait_for_postgres`, but with Materialize defaults."""
        self.wait_for_postgres(
            user=user,
            dbname=dbname,
            host=host,
            port=port,
            timeout_secs=timeout_secs,
            query=query,
            expected=expected,
            print_result=print_result,
            service=service,
        )
Пример #36
0
class BackupFile(object):

    def __init__(self, Name, write):
        restricted_characters = '/\\?%*:|"<>'
        Name = ''.join(filter(lambda x: x not in restricted_characters, Name))

        self.__write = write
        self.__dropbox_client = None
        if os.path.exists('.dropbox_access_token'):
            access_file = open('.dropbox_access_token', 'r')
            access_token = access_file.read()
            self.__dropbox_client = dropbox.client.DropboxClient(access_token)

        if write:
            if self.__dropbox_client:
                self.__tmp_file = TemporaryFile()
                self.__file_name = '/%s.tar.xz' % (Name,)

                self.__file = tarfile.open(mode='w:xz', fileobj=self.__tmp_file, encoding='utf-8')
            else:
                self.__file = tarfile.open(os.path.join('.', 'backups', Name + '.tar.xz'), 'w:xz', encoding='utf-8')
        else:
            if self.__dropbox_client:
                remote_file = self.__dropbox_client.get_file('/%s.tar.xz' % (Name,))
                self.__tmp_file = TemporaryFile()
                self.__tmp_file.write(remote_file.read())
                self.__tmp_file.seek(0)

                self.__file = tarfile.open(mode='r:xz', fileobj=self.__tmp_file, encoding='utf-8')
            else:
                self.__file = tarfile.open(os.path.join('.', 'backups', Name + '.tar.xz'), 'r:xz', encoding='utf-8')

    def __enter__(self):
        return self

    def __exit__(self, exc_type, exc_val, exc_tb):
        self.__file.close()

        self.__tmp_file.seek(0)
        if self.__write and self.__dropbox_client:
            sys.stdout.write(' uploading to Dropbox... (0%)')
            sys.stdout.flush()

            size = os.fstat(self.__tmp_file.fileno()).st_size
            uploader = ChunkedProgressUploader(self.__dropbox_client, self.__tmp_file, size)

            for offset in uploader.upload_chunked():
                sys.stdout.write('\r uploading to Dropbox... ({0:f}%)'.format(offset / size * 100))
                sys.stdout.flush()

            uploader.finish(self.__file_name, overwrite=True)

            print()

    def __add_file(self, archive_path, full_path, depth=0):
        for it in os.listdir(full_path):
            new_archive_path = '/'.join([archive_path, it])
            new_full_path = os.path.join(full_path, it)

            if os.path.isdir(os.path.join(full_path, it)):
                print(' ' * depth, ' {}/'.format(it))
                self.__add_file(new_archive_path, new_full_path, depth + 1)
            else:
                print(' ' * depth, ' {}'.format(it))
                self.__file.add(new_full_path, new_archive_path)

    def add_files(self, id, path, files):
        if isinstance(files, str):
            files = [files]

        for it in files:
            if os.path.exists(os.path.join(path, it)):
                print(' backuping file {}'.format(it))
                archive_name = "data/{}/{}".format(id, it)
                self.__file.add(os.path.join(path, it), archive_name)

    def add_folder(self, id, path, folder):
        full_path = os.path.join(path, folder)
        if os.path.exists(full_path):
            print(' backuping folder {}'.format(full_path))
            archive_name = "data/{}".format(id)
            self.__add_file(archive_name, full_path)

    def restore_files(self, id, path, files):
        if not os.path.exists(path):
            os.makedirs(path)
        if isinstance(files, str):
            files = [files]

        for it in files:
            archive_name = "data/{}/{}".format(id, it)
            try:
                current_file = self.__file.getmember(archive_name)
                print(' restoring file {}'.format(it))
                current_file.name = current_file.name[(6 + len(id)):]
                self.__file.extract(current_file, path)
            except KeyError:
                pass

    def restore_folder(self, id, path, folder):
        full_path = os.path.join(path, folder)
        print(' restoring folder {}'.format(full_path))

        if not os.path.exists(full_path):
            os.makedirs(full_path)

        archive_name = "data/{}".format(id)

        subdir_and_files = []
        for it in self.__file.getmembers():
            if it.name.startswith(archive_name):
                it.name = it.name[(6 + len(id)):]
                subdir_and_files.append(it)

        self.__file.extractall(full_path, subdir_and_files)
Пример #37
0
class S3File(io.IOBase):
    """File like proxy for s3 files, manages upload and download of locally managed temporary file
    """

    def __init__(self, bucket, key, mode='w+b', *args, **kwargs):
        super(S3File, self).__init__(*args, **kwargs)
        self.bucket = bucket
        self.key = key
        self.mode = mode
        self.path = self.bucket + '/' + self.key

        # converts mode to readable/writable to enable the temporary file to have S3 data
        # read or written to it even if the S3File is read/write/append
        # i.e. "r" => "r+", "ab" => "a+b"
        updatable_mode = re.sub(r'^([rwa]+)(b?)$', r'\1+\2', mode)
        self._tempfile = TemporaryFile(updatable_mode)

        try:
            with s3errors(self.path):
                if 'a' in mode:
                    # File is in an appending mode, start with the content in file
                    s3.Object(bucket, key).download_fileobj(self._tempfile)
                    self.seek(0, os.SEEK_END)
                elif 'a' not in mode and 'w' not in mode and 'x' not in mode:
                    # file is not in a create mode, so it is in read mode
                    # start with the content in the file, and seek to the beginning
                    s3.Object(bucket, key).download_fileobj(self._tempfile)
                    self.seek(0, os.SEEK_SET)
        except Exception:
            self.close()
            raise

    def __enter__(self):
        return self

    def __exit__(self, exc_type, exc_val, exc_tb):
        self.close()

    def close(self):
        try:
            if self.writable():
                self.seek(0)
                with s3errors(self.path):
                    s3.Object(self.bucket, self.key).upload_fileobj(self._tempfile)
        finally:
            self._tempfile.close()

    @property
    def closed(self):
        return self._tempfile.closed

    def fileno(self):
        return self._tempfile.fileno()

    def flush(self):
        return self._tempfile.flush()

    def isatty(self):
        return self._tempfile.isatty()

    def readable(self):
        return 'r' in self.mode or '+' in self.mode

    def read(self, n=-1):
        if not self.readable():
            raise IOError('not open for reading')
        return self._tempfile.read(n)

    def readinto(self, b):
        return self._tempfile.readinto(b)

    def readline(self, limit=-1):
        if not self.readable():
            raise IOError('not open for reading')
        return self._tempfile.readline(limit)

    def readlines(self, hint=-1):
        if not self.readable():
            raise IOError('not open for reading')
        return self._tempfile.readlines(hint)

    def seek(self, offset, whence=os.SEEK_SET):
        self._tempfile.seek(offset, whence)
        return self.tell()

    def seekable(self):
        return True

    def tell(self):
        return self._tempfile.tell()

    def writable(self):
        return 'w' in self.mode or 'a' in self.mode or '+' in self.mode or 'x' in self.mode

    def write(self, b):
        if not self.writable():
            raise IOError('not open for writing')
        self._tempfile.write(b)
        return len(b)

    def writelines(self, lines):
        if not self.writable():
            raise IOError('not open for writing')
        return self._tempfile.writelines(lines)

    def truncate(self, size=None):
        if not self.writable():
            raise IOError('not open for writing')

        if size is None:
            size = self.tell()

        self._tempfile.truncate(size)
        return size
    def __init__(self, repo: mzbuild.Repository, name: str):
        self.name = name
        self.repo = repo
        self.images: List[mzbuild.Image] = []
        self.workflows: Dict[str, Workflow] = {}

        default_tag = os.getenv(f"MZBUILD_TAG", None)

        if name in self.repo.compositions:
            self.path = self.repo.compositions[name]
        else:
            raise errors.UnknownComposition

        with open(self.path) as f:
            compose = yaml.safe_load(f)

        workflows = compose.pop("mzworkflows", None)
        if workflows is not None:
            # TODO: move this into the workflow so that it can use env vars that are
            # manually defined.
            workflows = _substitute_env_vars(workflows)
            for workflow_name, raw_w in workflows.items():
                built_steps = []
                for raw_step in raw_w["steps"]:
                    step_name = raw_step.pop("step")
                    step_ty = Steps.named(step_name)
                    munged = {
                        k.replace("-", "_"): v
                        for k, v in raw_step.items()
                    }
                    try:
                        step = step_ty(**munged)
                    except TypeError as e:
                        a = " ".join([f"{k}={v}" for k, v in munged.items()])
                        raise errors.BadSpec(
                            f"Unable to construct {step_name} with args {a}: {e}"
                        )
                    built_steps.append(step)
                env = raw_w.get("env")
                if not isinstance(env, dict) and env is not None:
                    raise errors.BadSpec(
                        f"Workflow {workflow_name} has wrong type for env: "
                        f"expected mapping, got {type(env).__name__}: {env}", )
                # ensure that integers (e.g. ports) are treated as env vars
                if isinstance(env, dict):
                    env = {k: str(v) for k, v in env.items()}
                self.workflows[workflow_name] = Workflow(workflow_name,
                                                         built_steps,
                                                         env=env,
                                                         composition=self)

        # Resolve all services that reference an `mzbuild` image to a specific
        # `image` reference.

        for config in compose["services"].values():
            if "mzbuild" in config:
                image_name = config["mzbuild"]

                if image_name not in self.repo.images:
                    raise errors.BadSpec(
                        f"mzcompose: unknown image {image_name}")

                image = self.repo.images[image_name]
                override_tag = os.getenv(f"MZBUILD_{image.env_var_name()}_TAG",
                                         default_tag)
                if override_tag is not None:
                    config["image"] = image.docker_name(override_tag)
                    print(
                        f"mzcompose: warning: overriding {image_name} image to tag {override_tag}",
                        file=sys.stderr,
                    )
                    del config["mzbuild"]
                else:
                    self.images.append(image)

                if "propagate-uid-gid" in config:
                    config["user"] = f"{os.getuid()}:{os.getgid()}"
                    del config["propagate-uid-gid"]

        deps = self.repo.resolve_dependencies(self.images)
        for config in compose["services"].values():
            if "mzbuild" in config:
                config["image"] = deps[config["mzbuild"]].spec()
                del config["mzbuild"]

        # Emit the munged configuration to a temporary file so that we can later
        # pass it to Docker Compose.
        tempfile = TemporaryFile()
        os.set_inheritable(tempfile.fileno(), True)
        yaml.dump(compose, tempfile, encoding="utf-8")  # type: ignore
        tempfile.flush()
        self.file = tempfile
Пример #39
0
 def export_to_temp_file(self) -> IO[bytes]:
     t = TemporaryFile()
     seccomp_export_bpf(self._seccomp_ruleset_ptr, t.fileno())
     t.seek(0)
     return t
Пример #40
0
def remote_print_stack(pid, output=1):
    """
    Tell a target process to print a stack trace.

    This currently only handles the main thread.
    TODO: handle multiple threads.

    @param pid:
      PID of target process.
    @type output:
      C{int}, C{file}, or C{str}
    @param output:
      Output file descriptor.
    """
    # Interpret C{output} argument as a file-like object, file descriptor, or
    # filename.
    if hasattr(output, 'write'):  # file-like object
        output_fh = output
        try:
            output.flush()
        except Exception:
            pass
        try:
            output_fd = output.fileno()
        except Exception:
            output_fd = None
        try:
            output_fn = Filename(output.name)
        except Exception:
            pass
    elif isinstance(output, int):
        output_fh = None
        output_fn = None
        output_fd = output
    elif isinstance(output, (str, Filename)):
        output_fh = None
        output_fn = Filename(output)
        output_fd = None
    else:
        raise TypeError(
            "remote_print_stack_trace(): expected file/str/int; got %s" %
            (type(output).__name__, ))
    temp_file = None
    remote_fn = output_fn
    if remote_fn is None and output_fd is not None:
        remote_fn = Filename("/proc/%d/fd/%d" % (os.getpid(), output_fd))
    # Figure out whether the target process will be able to open output_fn for
    # writing.  Since the target process would need to be running as the same
    # user as this process for us to be able to attach a debugger, we can
    # simply check whether we ourselves can open the file.  Typically output
    # will be fd 1 and we will have access to write to it.  However, if we're
    # sudoed, we won't be able to re-open it via the proc symlink, even though
    # we already currently have it open.  Another case is C{output} is a
    # file-like object that isn't a real file, e.g. a StringO.  In each case
    # we we don't have a usable filename for the remote process yet.  To
    # address these situations, we create a temporary file for the remote
    # process to write to.
    if remote_fn is None or not remote_fn.iswritable:
        if not output_fh or output_fd:
            assert remote_fn is not None
            raise OSError(errno.EACCESS, "Can't write to %s" % output_fn)
        # We can still use the /proc/$pid/fd approach with an unnamed temp
        # file.  If it turns out there are situations where that doesn't work,
        # we can switch to using a NamedTemporaryFile.
        from tempfile import TemporaryFile
        temp_file = TemporaryFile()
        remote_fn = Filename("/proc/%d/fd/%d" %
                             (os.getpid(), temp_file.fileno()))
        assert remote_fn.iswritable
    # *** Do the code injection ***
    _remote_print_stack_to_file(pid, remote_fn)
    # Copy from temp file to the requested output.
    if temp_file is not None:
        data = temp_file.read()
        temp_file.close()
        if output_fh is not None:
            output_fh.write(data)
            output_fh.flush()
        elif output_fd is not None:
            with os.fdopen(output_fd, 'w') as f:
                f.write(data)
        else:
            raise AssertionError("unreacahable")
Пример #41
0
    def run(self, params):
        if self.datastore.get_one(
                'users', ('username', '=', params.get('user'))) is None:
            raise TaskException(
                errno.ENOENT,
                'User {0} does not exist'.format(params.get('user')))

        self.message = 'Starting Rsync Task'
        self.set_progress(0)
        with open(os.path.join(params['path'], '.lock'), 'wb+') as lockfile:
            # Lets try and get a lock on this path for the rsync task
            # but do not freak out if you do not get it
            try:
                flock(lockfile, LOCK_EX | LOCK_NB)
            except IOError:
                logger.warning('Rsync Task could not get a lock on {0}'.format(
                    params['path']))

            # Execute Rsync Task here
            line = '/usr/local/bin/rsync --info=progress2 -h'
            rsync_properties = params.get('rsync_properties')
            if rsync_properties:
                if rsync_properties.get('recursive'):
                    line += ' -r'
                if rsync_properties.get('times'):
                    line += ' -t'
                if rsync_properties.get('compress'):
                    line += ' -z'
                if rsync_properties.get('archive'):
                    line += ' -a'
                if rsync_properties.get('preserve_permissions'):
                    line += ' -p'
                if rsync_properties.get('preserve_attributes'):
                    line += ' -X'
                if rsync_properties.get('delete'):
                    line += ' --delete-delay'
                if rsync_properties.get('delay_updates'):
                    line += ' --delay-updates'
                if rsync_properties.get('extra'):
                    line += ' {0}'.format(rsync_properties.get('extra'))

            remote_host = params.get('remote_host')
            remote_address = ''
            if '@' in remote_host:
                remote_address = remote_host
            else:
                remote_user = params.get('remote_user', params.get('user'))
                remote_address = '"{0}"@{1}'.format(remote_user, remote_host)

            if params.get('rsync_mode') == 'MODULE':
                if params.get('rsync_direction') == 'PUSH':
                    line += ' "{0}" {1}::"{2}"'.format(
                        params.get('path'),
                        remote_address,
                        params.get('remote_module'),
                    )
                else:
                    line += ' {0}::"{1}" "{2}"'.format(
                        remote_address,
                        params.get('remote_module'),
                        params.get('rsync_path'),
                    )
            else:
                line += ' -e "ssh -p {0} -o BatchMode=yes -o StrictHostKeyChecking=yes"'.format(
                    params.get('remote_ssh_port', 22))
                if params.get('rsync_direction') == 'PUSH':
                    line += ' "{0}" {1}:\\""{2}"\\"'.format(
                        params.get('path'),
                        remote_address,
                        params.get('remote_path'),
                    )
                else:
                    line += ' {0}:\\""{1}"\\" "{2}"'.format(
                        remote_address,
                        params.get('remote_path'),
                        params.get('path'),
                    )

            if params.get('quiet'):
                line += ' > /dev/null 2>&1'

            # Starting rsync subprocess
            logger.debug('Rsync Copy Task Command: {0}'.format(line))
            # It would be nice to get the progess but not at the cost of
            # killing this task!

            # Note this TemporaryFile hack for the subprocess stdout is needed
            # because setting Popen's `stdout=subprocess.PIPE` does not allow
            # that sstdout to be seeked on. subprocess.PIPE only allows for
            # readline() and such read methods. stdout.readline() does not
            # allow for us to catch rsync's in-place progress updates which
            # are done with the '\r' character. It is also auto garbage collected.
            proc_stdout = TemporaryFile(mode='w+b', buffering=0)
            try:
                rsync_proc = subprocess.Popen(line,
                                              stdout=proc_stdout.fileno(),
                                              stderr=subprocess.PIPE,
                                              shell=True,
                                              bufsize=0,
                                              preexec_fn=demote(
                                                  params.get('user')))
                self.message = 'Executing Rsync Command'
                seek = 0
                old_seek = 0
                while rsync_proc.poll() is None:
                    proc_output = ''
                    proc_stdout.seek(seek)
                    try:
                        while True:
                            op_byte = proc_stdout.read(1).decode('utf8')
                            if op_byte == '':
                                # In this case break before incrementing `seek`
                                break
                            seek += 1
                            if op_byte == '\r':
                                break
                            proc_output += op_byte
                            seek += 1
                        if old_seek != seek:
                            old_seek = seek
                            self.message = proc_output.strip()
                            proc_output = proc_output.split(' ')
                            progress = [x for x in proc_output if '%' in x]
                            if len(progress):
                                self.set_progress(int(progress[0][:-1]))
                    except Exception as e:
                        # Catch IOERROR Errno 9 which usually arises because
                        # of already closed fileobject being used here therby
                        # raising Bad File Descriptor error. In this case break
                        # and the outer while loop will check for rsync_proc.poll()
                        # to be None or not and DTRT
                        if e.errno == 9:
                            break
                        logger.debug("Parsing error in rsync task: {0}".format(
                            str(e)))
            except Exception as e:
                flock(lockfile, LOCK_UN)
                self.message = 'Rsync Task Failed'
                raise TaskException(
                    errno.EIO,
                    'Rsync Task failed because of Error: {0}'.format(str(e)))
            if rsync_proc.returncode != 0:
                self.message = 'Rsync Task Failed'
                raise TaskException(
                    errno.EIO,
                    'Rsync Task returned with non-zero returncode. Error: {0}'.
                    format(rsync_proc.stderr.read()))
            # Finally lets unlock that lockfile, it does not fail
            # even if did not acquire the lock in the first place
            flock(lockfile, LOCK_UN)
            self.message = 'Rsync Task Successfully Completed'
            self.set_progress(100)