Ejemplo n.º 1
0
class gather(Stream):
    def __init__(self, child, limit=10, client=None):
        self.client = client or default_client()
        self.queue = Queue(maxsize=limit)
        self.condition = Condition()

        Stream.__init__(self, child)

        self.client.loop.add_callback(self.cb)

    def update(self, x, who=None):
        return self.queue.put(x)

    @gen.coroutine
    def cb(self):
        while True:
            x = yield self.queue.get()
            L = [x]
            while not self.queue.empty():
                L.append(self.queue.get_nowait())
            results = yield self.client._gather(L)
            for x in results:
                yield self.emit(x)
            if self.queue.empty():
                self.condition.notify_all()

    @gen.coroutine
    def flush(self):
        while not self.queue.empty():
            yield self.condition.wait()
Ejemplo n.º 2
0
class StreamingMJPEGOutput(StreamingOutput):

    name = "mjpeg"

    def __init__(self):
        self.frame = None
        self.buffer = BytesIO()
        self.condition = Condition()

    def write(self, buf):
        app_log.debug(f"Received {len(buf)} bytes of data")
        if buf.startswith(b'\xff\xd8'):
            # New frame, copy the existing buffer's content and notify all
            # clients it's available
            self.buffer.truncate()
            with self.condition:
                self.frame = self.buffer.getvalue()
                self.condition.notify_all()
            self.buffer.seek(0)
        return self.buffer.write(buf)

    def flush(self):
        app_log.debug("Received flush call")
        self.buffer.truncate()

        with self.condition:
            self.frame = self.buffer.getvalue()
            self.condition.notify_all()

        self.buffer.seek(0)
Ejemplo n.º 3
0
class FlowControlWindow(object):

    __slots__ = ['condition', 'value']

    def __init__(self, initial_value=DEFAULT_WINDOW_SIZE):
        self.condition = Condition()
        self.value = initial_value

    @gen.coroutine
    def available(self, timeout=None):
        if self.value > 0:
            raise gen.Return(self.value)

        yield self.condition.wait(timeout=timeout)
        raise gen.Return(self.value)

    def consume(self, n):
        """Tries to consume n from value"""
        consumed = min(self.value, n)
        self.value -= consumed
        return consumed

    def produce(self, n):
        self.value += n
        self.condition.notify_all()
Ejemplo n.º 4
0
class ImageManager():
    def __init__(self):
        # Image data
        self._frame = None
        # Flow control
        self._condition = Condition()

    def timestamp(self, img):
        now = datetime.datetime.now()
        stamp.stamp(img, (10, 10), str(now), size=20)

        return img

    def update_frame(self, frame):
        self._frame = BytesIO(frame)
        self.ready = True

    @property
    def frame(self):
        return self._frame

    @property
    def ready(self):
        return self._condition

    @ready.setter
    def ready(self, cond):
        if cond is True:
            self._condition.notify_all()
Ejemplo n.º 5
0
class zip(Stream):
    """ Combine streams together into a stream of tuples

    We emit a new tuple once all streams have produce a new tuple.

    See also
    --------
    combine_latest
    zip_latest
    """
    _graphviz_orientation = 270
    _graphviz_shape = 'triangle'

    def __init__(self, *upstreams, **kwargs):
        self.maxsize = kwargs.pop('maxsize', 10)
        self.condition = Condition()
        self.literals = [(i, val) for i, val in enumerate(upstreams)
                         if not isinstance(val, Stream)]

        self.buffers = {
            upstream: deque()
            for upstream in upstreams if isinstance(upstream, Stream)
        }

        upstreams2 = [
            upstream for upstream in upstreams if isinstance(upstream, Stream)
        ]

        Stream.__init__(self, upstreams=upstreams2, **kwargs)

    def pack_literals(self, tup):
        """ Fill buffers for literals whenever we empty them """
        inp = list(tup)[::-1]
        out = []
        for i, val in self.literals:
            while len(out) < i:
                out.append(inp.pop())
            out.append(val)

        while inp:
            out.append(inp.pop())

        return tuple(out)

    def update(self, x, who=None):
        L = self.buffers[who]  # get buffer for stream
        L.append(x)
        if len(L) == 1 and all(self.buffers.values()):
            tup = tuple(self.buffers[up][0] for up in self.upstreams)
            for buf in self.buffers.values():
                buf.popleft()
            self.condition.notify_all()
            if self.literals:
                tup = self.pack_literals(tup)
            return self._emit(tup)
        elif len(L) > self.maxsize:
            return self.condition.wait()
Ejemplo n.º 6
0
class Window(object):
    def __init__(self, parent, stream_id, initial_window_size):
        self.parent = parent
        self.stream_id = stream_id
        self.cond = Condition()
        self.closed = False
        self.size = initial_window_size

    def close(self):
        self.closed = True
        self.cond.notify_all()

    def _raise_error(self, code, message):
        if self.parent is None:
            raise ConnectionError(code, message)
        else:
            raise StreamError(self.stream_id, code)

    def adjust(self, amount):
        self.size += amount
        if self.size > constants.MAX_WINDOW_SIZE:
            self._raise_error(constants.ErrorCode.FLOW_CONTROL_ERROR,
                              "flow control window too large")
        self.cond.notify_all()

    def apply_window_update(self, frame):
        try:
            window_update, = struct.unpack('>I', frame.data)
        except struct.error:
            raise ConnectionError(constants.ErrorCode.FRAME_SIZE_ERROR,
                                  "WINDOW_UPDATE incorrect size")
        # strip reserved bit
        window_update = window_update & 0x7fffffff
        if window_update == 0:
            self._raise_error(constants.ErrorCode.PROTOCOL_ERROR,
                              "window update must not be zero")
        self.adjust(window_update)

    @gen.coroutine
    def consume(self, amount):
        while not self.closed and self.size <= 0:
            yield self.cond.wait()
        if self.closed:
            raise StreamClosedError()
        if self.size < amount:
            amount = self.size
        if self.parent is not None:
            amount = yield self.parent.consume(amount)
        self.size -= amount
        raise gen.Return(amount)
Ejemplo n.º 7
0
class EventSource(object):

    def __init__(self):
        self.lock = Condition()
        self.events = None

    @tornado.gen.coroutine
    def publish(self, events):
        self.events = events
        self.lock.notify_all()

    @tornado.gen.coroutine
    def wait(self):
        yield self.lock.wait()
Ejemplo n.º 8
0
class zip(Stream):
    """ Combine streams together into a stream of tuples

    We emit a new tuple once all streams have produce a new tuple.

    See also
    --------
    combine_latest
    zip_latest
    """
    _graphviz_orientation = 270
    _graphviz_shape = 'triangle'

    def __init__(self, *upstreams, **kwargs):
        self.maxsize = kwargs.pop('maxsize', 10)
        self.buffers = [deque() for _ in upstreams]
        self.condition = Condition()
        self.literals = [(i, val) for i, val in enumerate(upstreams)
                         if not isinstance(val, Stream)]
        self.pack_literals()

        self.buffers_by_stream = {
            upstream: buffer
            for upstream, buffer in builtins.zip(upstreams, self.buffers)
            if isinstance(upstream, Stream)
        }

        upstreams2 = [
            upstream for upstream in upstreams if isinstance(upstream, Stream)
        ]

        Stream.__init__(self, upstreams=upstreams2, **kwargs)

    def pack_literals(self):
        """ Fill buffers for literals whenver we empty them """
        for i, val in self.literals:
            self.buffers[i].append(val)

    def update(self, x, who=None):
        L = self.buffers_by_stream[who]  # get buffer for stream
        L.append(x)
        if len(L) == 1 and all(self.buffers):
            tup = tuple(buf.popleft() for buf in self.buffers)
            self.condition.notify_all()
            if self.literals:
                self.pack_literals()
            return self._emit(tup)
        elif len(L) > self.maxsize:
            return self.condition.wait()
Ejemplo n.º 9
0
class CounterCondition(object):
    def __init__(self):
        self.condition = Condition()
        self.counter = 0

    def increment(self, value=1):
        self.counter += value
        self.condition.notify_all()

    @gen.coroutine
    def wait_until(self, value):
        while True:
            yield self.condition.wait()
            if self.counter >= value:
                self.counter -= value
                return
Ejemplo n.º 10
0
class zip(Stream):
    def __init__(self, *children, **kwargs):
        self.maxsize = kwargs.pop('maxsize', 10)
        self.buffers = [deque() for _ in children]
        self.condition = Condition()
        Stream.__init__(self, children=children)

    def update(self, x, who=None):
        L = self.buffers[self.children.index(who)]
        L.append(x)
        if len(L) == 1 and all(self.buffers):
            tup = tuple(buf.popleft() for buf in self.buffers)
            self.condition.notify_all()
            return self.emit(tup)
        elif len(L) > self.maxsize:
            return self.condition.wait()
class MessageBuffer(object):
    def __init__(self):
        self.cond = Condition()
        self.cache = []  # all messages added here
        self.cache_size = 200  # holds 200 newest msgs

    # return all messages since message with id=lastID
    def get_messages_since(self, lastID):
        results = []
        for msg in reversed(self.cache):
            if msg['id'] == lastID:
                break  # stop once you get here
            results.append(msg)
        results.reverse()
        return results

    # add msg to cache
    def add_message(self, message):
        self.cache.append(message)  # add msg
        if len(self.cache) > self.cache_size:  # change out old msgs
            self.cache = self.cache[-self.cache_size:]
        self.cond.notify_all()  # notify all coroutines aka all connected users
Ejemplo n.º 12
0
class WebUpdater:
    def __init__(self, umgr, config):
        self.umgr = umgr
        self.server = umgr.server
        self.notify_update_response = umgr.notify_update_response
        self.repo = config.get('repo').strip().strip("/")
        self.name = self.repo.split("/")[-1]
        if hasattr(config, "get_name"):
            self.name = config.get_name().split()[-1]
        self.path = os.path.realpath(os.path.expanduser(config.get("path")))
        self.version = self.remote_version = self.dl_url = "?"
        self.etag = None
        self.init_evt = Event()
        self.refresh_condition = None
        self._get_local_version()
        logging.info(f"\nInitializing Client Updater: '{self.name}',"
                     f"\nversion: {self.version}"
                     f"\npath: {self.path}")

    def _get_local_version(self):
        version_path = os.path.join(self.path, ".version")
        if os.path.isfile(os.path.join(self.path, ".version")):
            with open(version_path, "r") as f:
                v = f.read()
            self.version = v.strip()

    async def check_initialized(self, timeout=None):
        if self.init_evt.is_set():
            return
        if timeout is not None:
            timeout = IOLoop.current().time() + timeout
        await self.init_evt.wait(timeout)

    async def refresh(self):
        if self.refresh_condition is None:
            self.refresh_condition = Condition()
        else:
            self.refresh_condition.wait()
            return
        try:
            self._get_local_version()
            await self._get_remote_version()
        except Exception:
            logging.exception("Error Refreshing Client")
        self.init_evt.set()
        self.refresh_condition.notify_all()
        self.refresh_condition = None

    async def _get_remote_version(self):
        # Remote state
        url = f"https://api.github.com/repos/{self.repo}/releases/latest"
        try:
            result = await self.umgr.github_api_request(url, etag=self.etag)
        except Exception:
            logging.exception(f"Client {self.repo}: Github Request Error")
            result = {}
        if result is None:
            # No change, update not necessary
            return
        self.etag = result.get('etag', None)
        self.remote_version = result.get('name', "?")
        release_assets = result.get('assets', [{}])[0]
        self.dl_url = release_assets.get('browser_download_url', "?")
        logging.info(f"Github client Info Received:\nRepo: {self.name}\n"
                     f"Local Version: {self.version}\n"
                     f"Remote Version: {self.remote_version}\n"
                     f"url: {self.dl_url}")

    async def update(self, *args):
        await self.check_initialized(20.)
        if self.refresh_condition is not None:
            # wait for refresh if in progess
            self.refresh_condition.wait()
        if self.remote_version == "?":
            await self.refresh()
            if self.remote_version == "?":
                raise self.server.error(
                    f"Client {self.repo}: Unable to locate update")
        if self.dl_url == "?":
            raise self.server.error(
                f"Client {self.repo}: Invalid download url")
        if self.version == self.remote_version:
            # Already up to date
            return
        if os.path.isdir(self.path):
            shutil.rmtree(self.path)
        os.mkdir(self.path)
        self.notify_update_response(f"Downloading Client: {self.name}")
        archive = await self.umgr.http_download_request(self.dl_url)
        with zipfile.ZipFile(io.BytesIO(archive)) as zf:
            zf.extractall(self.path)
        self.version = self.remote_version
        version_path = os.path.join(self.path, ".version")
        if not os.path.exists(version_path):
            with open(version_path, "w") as f:
                f.write(self.version)
        self.notify_update_response(f"Client Update Finished: {self.name}",
                                    is_complete=True)

    def get_update_status(self):
        return {
            'name': self.name,
            'version': self.version,
            'remote_version': self.remote_version
        }
Ejemplo n.º 13
0
class PackageUpdater:
    def __init__(self, umgr):
        self.server = umgr.server
        self.execute_cmd = umgr.execute_cmd
        self.execute_cmd_with_response = umgr.execute_cmd_with_response
        self.notify_update_response = umgr.notify_update_response
        self.available_packages = []
        self.init_evt = Event()
        self.refresh_condition = None

    async def refresh(self, fetch_packages=True):
        # TODO: Use python-apt python lib rather than command line for updates
        if self.refresh_condition is None:
            self.refresh_condition = Condition()
        else:
            self.refresh_condition.wait()
            return
        try:
            if fetch_packages:
                await self.execute_cmd(f"{APT_CMD} update",
                                       timeout=300.,
                                       retries=3)
            res = await self.execute_cmd_with_response("apt list --upgradable",
                                                       timeout=60.)
            pkg_list = [p.strip() for p in res.split("\n") if p.strip()]
            if pkg_list:
                pkg_list = pkg_list[2:]
                self.available_packages = [
                    p.split("/", maxsplit=1)[0] for p in pkg_list
                ]
            pkg_list = "\n".join(self.available_packages)
            logging.info(
                f"Detected {len(self.available_packages)} package updates:"
                f"\n{pkg_list}")
        except Exception:
            logging.exception("Error Refreshing System Packages")
        self.init_evt.set()
        self.refresh_condition.notify_all()
        self.refresh_condition = None

    async def check_initialized(self, timeout=None):
        if self.init_evt.is_set():
            return
        if timeout is not None:
            timeout = IOLoop.current().time() + timeout
        await self.init_evt.wait(timeout)

    async def update(self, *args):
        await self.check_initialized(20.)
        if self.refresh_condition is not None:
            self.refresh_condition.wait()
        self.notify_update_response("Updating packages...")
        try:
            await self.execute_cmd(f"{APT_CMD} update",
                                   timeout=300.,
                                   notify=True)
            await self.execute_cmd(f"{APT_CMD} upgrade --yes",
                                   timeout=3600.,
                                   notify=True)
        except Exception:
            raise self.server.error("Error updating system packages")
        self.available_packages = []
        self.notify_update_response("Package update finished...",
                                    is_complete=True)

    def get_update_status(self):
        return {
            'package_count': len(self.available_packages),
            'package_list': self.available_packages
        }
Ejemplo n.º 14
0
class GitUpdater:
    def __init__(self, umgr, config, path=None, env=None):
        self.server = umgr.server
        self.execute_cmd = umgr.execute_cmd
        self.execute_cmd_with_response = umgr.execute_cmd_with_response
        self.notify_update_response = umgr.notify_update_response
        self.name = config.get_name().split()[-1]
        self.repo_path = path
        if path is None:
            self.repo_path = config.get('path')
        self.env = config.get("env", env)
        dist_packages = None
        if self.env is not None:
            self.env = os.path.expanduser(self.env)
            dist_packages = config.get('python_dist_packages', None)
            self.python_reqs = os.path.join(self.repo_path,
                                            config.get("requirements"))
        self.origin = config.get("origin").lower()
        self.install_script = config.get('install_script', None)
        if self.install_script is not None:
            self.install_script = os.path.abspath(
                os.path.join(self.repo_path, self.install_script))
        self.venv_args = config.get('venv_args', None)
        self.python_dist_packages = None
        self.python_dist_path = None
        self.env_package_path = None
        if dist_packages is not None:
            self.python_dist_packages = [
                p.strip() for p in dist_packages.split('\n') if p.strip()
            ]
            self.python_dist_path = os.path.abspath(
                config.get('python_dist_path'))
            if not os.path.exists(self.python_dist_path):
                raise config.error(
                    "Invalid path for option 'python_dist_path'")
            self.env_package_path = os.path.abspath(
                os.path.join(os.path.dirname(self.env), "..",
                             config.get('env_package_path')))
        for opt in [
                "repo_path", "env", "python_reqs", "install_script",
                "python_dist_path", "env_package_path"
        ]:
            val = getattr(self, opt)
            if val is None:
                continue
            if not os.path.exists(val):
                raise config.error("Invalid path for option '%s': %s" %
                                   (val, opt))

        self.version = self.cur_hash = "?"
        self.remote_version = self.remote_hash = "?"
        self.init_evt = Event()
        self.refresh_condition = None
        self.debug = umgr.repo_debug
        self.remote = "origin"
        self.branch = "master"
        self.is_valid = self.is_dirty = self.detached = False

    def _get_version_info(self):
        ver_path = os.path.join(self.repo_path, "scripts/version.txt")
        vinfo = {}
        if os.path.isfile(ver_path):
            data = ""
            with open(ver_path, 'r') as f:
                data = f.read()
            try:
                entries = [e.strip() for e in data.split('\n') if e.strip()]
                vinfo = dict([i.split('=') for i in entries])
                vinfo = {
                    k: tuple(re.findall(r"\d+", v))
                    for k, v in vinfo.items()
                }
            except Exception:
                pass
            else:
                self._log_info(f"Version Info Found: {vinfo}")
        vinfo['version'] = tuple(re.findall(r"\d+", self.version))
        return vinfo

    def _log_exc(self, msg, traceback=True):
        log_msg = f"Repo {self.name}: {msg}"
        if traceback:
            logging.exception(log_msg)
        else:
            logging.info(log_msg)
        return self.server.error(msg)

    def _log_info(self, msg):
        log_msg = f"Repo {self.name}: {msg}"
        logging.info(log_msg)

    def _notify_status(self, msg, is_complete=False):
        log_msg = f"Repo {self.name}: {msg}"
        logging.debug(log_msg)
        self.notify_update_response(log_msg, is_complete)

    async def check_initialized(self, timeout=None):
        if self.init_evt.is_set():
            return
        if timeout is not None:
            timeout = IOLoop.current().time() + timeout
        await self.init_evt.wait(timeout)

    async def refresh(self):
        if self.refresh_condition is None:
            self.refresh_condition = Condition()
        else:
            self.refresh_condition.wait()
            return
        try:
            await self._check_version()
        except Exception:
            logging.exception("Error Refreshing git state")
        self.init_evt.set()
        self.refresh_condition.notify_all()
        self.refresh_condition = None

    async def _check_version(self, need_fetch=True):
        self.is_valid = self.detached = False
        self.cur_hash = self.branch = self.remote = "?"
        self.version = self.remote_version = "?"
        try:
            blist = await self.execute_cmd_with_response(
                f"git -C {self.repo_path} branch --list")
            if blist.startswith("fatal:"):
                self._log_info(f"Invalid git repo at path '{self.repo_path}'")
                return
            branch = None
            for b in blist.split("\n"):
                b = b.strip()
                if b[0] == "*":
                    branch = b[2:]
                    break
            if branch is None:
                self._log_info(
                    "Unable to retreive current branch from branch list\n"
                    f"{blist}")
                return
            if "HEAD detached" in branch:
                bparts = branch.split()[-1].strip("()")
                self.remote, self.branch = bparts.split("/")
                self.detached = True
            else:
                self.branch = branch.strip()
                self.remote = await self.execute_cmd_with_response(
                    f"git -C {self.repo_path} config --get"
                    f" branch.{self.branch}.remote")
            if need_fetch:
                await self.execute_cmd(
                    f"git -C {self.repo_path} fetch {self.remote} --prune -q",
                    retries=3)
            remote_url = await self.execute_cmd_with_response(
                f"git -C {self.repo_path} remote get-url {self.remote}")
            cur_hash = await self.execute_cmd_with_response(
                f"git -C {self.repo_path} rev-parse HEAD")
            remote_hash = await self.execute_cmd_with_response(
                f"git -C {self.repo_path} rev-parse "
                f"{self.remote}/{self.branch}")
            repo_version = await self.execute_cmd_with_response(
                f"git -C {self.repo_path} describe --always "
                "--tags --long --dirty")
            remote_version = await self.execute_cmd_with_response(
                f"git -C {self.repo_path} describe {self.remote}/{self.branch}"
                " --always --tags --long")
        except Exception:
            self._log_exc("Error retreiving git info")
            return

        self.is_dirty = repo_version.endswith("dirty")
        versions = []
        for ver in [repo_version, remote_version]:
            tag_version = "?"
            ver_match = re.match(r"v\d+\.\d+\.\d-\d+", ver)
            if ver_match:
                tag_version = ver_match.group()
            versions.append(tag_version)
        self.version, self.remote_version = versions
        self.cur_hash = cur_hash.strip()
        self.remote_hash = remote_hash.strip()
        self._log_info(
            f"Repo Detected:\nPath: {self.repo_path}\nRemote: {self.remote}\n"
            f"Branch: {self.branch}\nRemote URL: {remote_url}\n"
            f"Current SHA: {self.cur_hash}\n"
            f"Remote SHA: {self.remote_hash}\nVersion: {self.version}\n"
            f"Remote Version: {self.remote_version}\n"
            f"Is Dirty: {self.is_dirty}\nIs Detached: {self.detached}")
        if self.debug:
            self.is_valid = True
            self._log_info("Debug enabled, bypassing official repo check")
        elif self.branch == "master" and self.remote == "origin":
            if self.detached:
                self._log_info("Detached HEAD detected, repo invalid")
                return
            remote_url = remote_url.lower()
            if remote_url[-4:] != ".git":
                remote_url += ".git"
            if remote_url == self.origin:
                self.is_valid = True
                self._log_info("Validity check for git repo passed")
            else:
                self._log_info(f"Invalid git origin url '{remote_url}'")
        else:
            self._log_info("Git repo not on offical remote/branch: "
                           f"{self.remote}/{self.branch}")

    async def update(self, update_deps=False):
        await self.check_initialized(20.)
        if self.refresh_condition is not None:
            self.refresh_condition.wait()
        if not self.is_valid:
            raise self._log_exc("Update aborted, repo is not valid", False)
        if self.is_dirty:
            raise self._log_exc("Update aborted, repo is has been modified",
                                False)
        if self.remote_hash == self.cur_hash:
            # No need to update
            return
        self._notify_status("Updating Repo...")
        try:
            if self.detached:
                await self.execute_cmd(
                    f"git -C {self.repo_path} fetch {self.remote} -q",
                    retries=3)
                await self.execute_cmd(f"git -C {self.repo_path} checkout"
                                       f" {self.remote}/{self.branch} -q")
            else:
                await self.execute_cmd(f"git -C {self.repo_path} pull -q",
                                       retries=3)
        except Exception:
            raise self._log_exc("Error running 'git pull'")
        # Check Semantic Versions
        vinfo = self._get_version_info()
        cur_version = vinfo.get('version', ())
        update_deps |= cur_version < vinfo.get('deps_version', ())
        need_env_rebuild = cur_version < vinfo.get('env_version', ())
        if update_deps:
            await self._install_packages()
            await self._update_virtualenv(need_env_rebuild)
        elif need_env_rebuild:
            await self._update_virtualenv(True)
        # Refresh local repo state
        await self._check_version(need_fetch=False)
        if self.name == "moonraker":
            # Launch restart async so the request can return
            # before the server restarts
            self._notify_status("Update Finished...", is_complete=True)
            IOLoop.current().call_later(.1, self.restart_service)
        else:
            await self.restart_service()
            self._notify_status("Update Finished...", is_complete=True)

    async def _install_packages(self):
        if self.install_script is None:
            return
        # Open install file file and read
        inst_path = self.install_script
        if not os.path.isfile(inst_path):
            self._log_info(f"Unable to open install script: {inst_path}")
            return
        with open(inst_path, 'r') as f:
            data = f.read()
        packages = re.findall(r'PKGLIST="(.*)"', data)
        packages = [p.lstrip("${PKGLIST}").strip() for p in packages]
        if not packages:
            self._log_info(f"No packages found in script: {inst_path}")
            return
        # TODO: Log and notify that packages will be installed
        pkgs = " ".join(packages)
        logging.debug(f"Repo {self.name}: Detected Packages: {pkgs}")
        self._notify_status("Installing system dependencies...")
        # Install packages with apt-get
        try:
            await self.execute_cmd(f"{APT_CMD} update",
                                   timeout=300.,
                                   notify=True)
            await self.execute_cmd(f"{APT_CMD} install --yes {pkgs}",
                                   timeout=3600.,
                                   notify=True)
        except Exception:
            self._log_exc("Error updating packages via apt-get")
            return

    async def _update_virtualenv(self, rebuild_env=False):
        if self.env is None:
            return
        # Update python dependencies
        bin_dir = os.path.dirname(self.env)
        env_path = os.path.normpath(os.path.join(bin_dir, ".."))
        if rebuild_env:
            self._notify_status(f"Creating virtualenv at: {env_path}...")
            if os.path.exists(env_path):
                shutil.rmtree(env_path)
            try:
                await self.execute_cmd(
                    f"virtualenv {self.venv_args} {env_path}", timeout=300.)
            except Exception:
                self._log_exc(f"Error creating virtualenv")
                return
            if not os.path.exists(self.env):
                raise self._log_exc("Failed to create new virtualenv", False)
        reqs = self.python_reqs
        if not os.path.isfile(reqs):
            self._log_exc(f"Invalid path to requirements_file '{reqs}'")
            return
        pip = os.path.join(bin_dir, "pip")
        self._notify_status("Updating python packages...")
        try:
            await self.execute_cmd(f"{pip} install -r {reqs}",
                                   timeout=1200.,
                                   notify=True,
                                   retries=3)
        except Exception:
            self._log_exc("Error updating python requirements")
        self._install_python_dist_requirements()

    def _install_python_dist_requirements(self):
        dist_reqs = self.python_dist_packages
        if dist_reqs is None:
            return
        dist_path = self.python_dist_path
        site_path = self.env_package_path
        for pkg in dist_reqs:
            for f in os.listdir(dist_path):
                if f.startswith(pkg):
                    src = os.path.join(dist_path, f)
                    dest = os.path.join(site_path, f)
                    self._notify_status(f"Linking to dist package: {pkg}")
                    if os.path.islink(dest):
                        os.remove(dest)
                    elif os.path.exists(dest):
                        self._notify_status(
                            f"Error symlinking dist package: {pkg}, "
                            f"file already exists: {dest}")
                        continue
                    os.symlink(src, dest)
                    break

    async def restart_service(self):
        self._notify_status("Restarting Service...")
        try:
            await self.execute_cmd(f"sudo systemctl restart {self.name}")
        except Exception:
            raise self._log_exc("Error restarting service")

    def get_update_status(self):
        return {
            'remote_alias': self.remote,
            'branch': self.branch,
            'version': self.version,
            'remote_version': self.remote_version,
            'current_hash': self.cur_hash,
            'remote_hash': self.remote_hash,
            'is_dirty': self.is_dirty,
            'is_valid': self.is_valid,
            'detached': self.detached,
            'debug_enabled': self.debug
        }
Ejemplo n.º 15
0
class GitRepo:
    def __init__(self, cmd_helper, git_path, alias):
        self.server = cmd_helper.get_server()
        self.cmd_helper = cmd_helper
        self.alias = alias
        self.git_path = git_path
        self.git_cmd = f"git -C {git_path}"
        self.valid_git_repo = False
        self.git_owner = "?"
        self.git_remote = "?"
        self.git_branch = "?"
        self.current_version = "?"
        self.upstream_version = "?"
        self.current_commit = "?"
        self.upstream_commit = "?"
        self.upstream_url = "?"
        self.branches = []
        self.dirty = False
        self.head_detached = False

        self.init_condition = None
        self.git_operation_lock = Lock()
        self.fetch_timeout_handle = None
        self.fetch_input_recd = False

    async def initialize(self, need_fetch=True):
        if self.init_condition is not None:
            # No need to initialize multiple requests
            await self.init_condition.wait()
            return
        self.init_condition = Condition()
        try:
            await self.update_repo_status()
            self._verify_repo()
            if not self.head_detached:
                # lookup remote via git config
                self.git_remote = await self.get_config_item(
                    f"branch.{self.git_branch}.remote")

            # Populate list of current branches
            blist = await self.list_branches()
            self.branches = []
            for branch in blist:
                branch = branch.strip()
                if branch[0] == "*":
                    branch = branch[2:]
                if branch[0] == "(":
                    continue
                self.branches.append(branch)

            if need_fetch:
                await self.fetch()

            self.upstream_url = await self.remote("get-url")
            self.current_commit = await self.rev_parse("HEAD")
            self.upstream_commit = await self.rev_parse(
                f"{self.git_remote}/{self.git_branch}")
            current_version = await self.describe(
                "--always --tags --long --dirty")
            upstream_version = await self.describe(
                f"{self.git_remote}/{self.git_branch} "
                "--always --tags --long")

            # Parse GitHub Owner from URL
            owner_match = re.match(r"https?://[^/]+/([^/]+)",
                                   self.upstream_url)
            self.git_owner = "?"
            if owner_match is not None:
                self.git_owner = owner_match.group(1)
            self.dirty = current_version.endswith("dirty")

            # Parse Version Info
            versions = []
            for ver in [current_version, upstream_version]:
                tag_version = "?"
                ver_match = re.match(r"v\d+\.\d+\.\d-\d+", ver)
                if ver_match:
                    tag_version = ver_match.group()
                versions.append(tag_version)
            self.current_version, self.upstream_version = versions
            self.log_repo_info()
        except Exception:
            logging.exception(f"Git Repo {self.alias}: Initialization failure")
            raise
        finally:
            self.init_condition.notify_all()
            self.init_condition = None

    async def wait_for_init(self):
        if self.init_condition is not None:
            await self.init_condition.wait()

    async def update_repo_status(self):
        async with self.git_operation_lock:
            if not os.path.isdir(os.path.join(self.git_path, ".git")):
                logging.info(f"Git Repo {self.alias}: path '{self.git_path}'"
                             " is not a valid git repo")
                return False
            try:
                resp = await self.cmd_helper.run_cmd_with_response(
                    f"{self.git_cmd} status -u no")
            except Exception:
                self.valid_git_repo = False
                return False
            resp = resp.strip().split('\n', 1)[0]
            if resp.startswith("fatal:"):
                # Invalid repo
                self.valid_git_repo = False
                return False
            self.head_detached = resp.startswith("HEAD detached")
            branch_info = resp.split()[-1]
            if self.head_detached:
                bparts = branch_info.split("/", 1)
                if len(bparts) == 2:
                    self.git_remote, self.git_branch = bparts
                else:
                    if self.git_remote == "?":
                        msg = "Resolve by manually checking out" \
                            " a branch via SSH."
                    else:
                        msg = "Defaulting to previously tracked " \
                            f"{self.git_remote}/{self.git_branch}."
                    logging.info(
                        f"Git Repo {self.alias}: HEAD detached on untracked "
                        f"commit {branch_info}. {msg}")
            else:
                self.git_branch = branch_info
            self.valid_git_repo = True
            return True

    def log_repo_info(self):
        logging.info(f"Git Repo {self.alias} Detected:\n"
                     f"Owner: {self.git_owner}\n"
                     f"Path: {self.git_path}\n"
                     f"Remote: {self.git_remote}\n"
                     f"Branch: {self.git_branch}\n"
                     f"Remote URL: {self.upstream_url}\n"
                     f"Current Commit SHA: {self.current_commit}\n"
                     f"Upstream Commit SHA: {self.upstream_commit}\n"
                     f"Current Version: {self.current_version}\n"
                     f"Upstream Version: {self.upstream_version}\n"
                     f"Is Dirty: {self.dirty}\n"
                     f"Is Detached: {self.head_detached}")

    def report_invalids(self, valid_origin):
        invalids = []
        upstream_url = self.upstream_url.lower()
        if upstream_url[-4:] != ".git":
            upstream_url += ".git"
        if upstream_url != valid_origin:
            invalids.append(f"Unofficial remote url: {self.upstream_url}")
        if self.git_branch != "master" or self.git_remote != "origin":
            invalids.append("Repo not on default remote branch: "
                            f"{self.git_remote}/{self.git_branch}")
        if self.head_detached:
            invalids.append("Detached HEAD detected")
        return invalids

    def _verify_repo(self, check_remote=False):
        if not self.valid_git_repo:
            raise self.server.error(
                f"Git Repo {self.alias}: repo not initialized")
        if check_remote:
            if self.git_remote == "?":
                raise self.server.error(
                    f"Git Repo {self.alias}: No valid git remote detected")

    async def fetch(self):
        self._verify_repo(check_remote=True)
        async with self.git_operation_lock:
            await self._do_fetch_pull(f"{self.git_cmd} fetch {self.git_remote}"
                                      )

    async def pull(self):
        self._verify_repo()
        if self.head_detached:
            raise self.server.error(
                f"Git Repo {self.alias}: Cannot perform pull on a "
                "detached HEAD")
        async with self.git_operation_lock:
            await self._do_fetch_pull(f"{self.git_cmd} pull")

    async def list_branches(self):
        self._verify_repo()
        async with self.git_operation_lock:
            resp = await self.cmd_helper.run_cmd_with_response(
                f"{self.git_cmd} branch --list")
            return resp.strip().split("\n")

    async def remote(self, command):
        self._verify_repo(check_remote=True)
        async with self.git_operation_lock:
            resp = await self.cmd_helper.run_cmd_with_response(
                f"{self.git_cmd} remote {command} {self.git_remote}")
            return resp.strip()

    async def prune(self):
        self._verify_repo(check_remote=True)
        async with self.git_operation_lock:
            await self.cmd_helper.run_cmd(
                f"{self.git_cmd} remote prune {self.git_remote}", timeout=30.)

    async def describe(self, args=""):
        self._verify_repo()
        async with self.git_operation_lock:
            resp = await self.cmd_helper.run_cmd_with_response(
                f"{self.git_cmd} describe {args}".strip())
            return resp.strip()

    async def rev_parse(self, args=""):
        self._verify_repo()
        async with self.git_operation_lock:
            resp = await self.cmd_helper.run_cmd_with_response(
                f"{self.git_cmd} rev-parse {args}".strip())
            return resp.strip()

    async def get_config_item(self, item):
        self._verify_repo()
        async with self.git_operation_lock:
            resp = await self.cmd_helper.run_cmd_with_response(
                f"{self.git_cmd} config --get {item}")
            return resp.strip()

    async def checkout(self, branch=None):
        self._verify_repo()
        async with self.git_operation_lock:
            branch = branch or f"{self.git_remote}/{self.git_branch}"
            await self.cmd_helper.run_cmd_with_response(
                f"{self.git_cmd} checkout {branch} -q")

    def get_repo_status(self):
        return {
            'remote_alias': self.git_remote,
            'branch': self.git_branch,
            'owner': self.git_owner,
            'version': self.current_version,
            'remote_version': self.upstream_version,
            'current_hash': self.current_commit,
            'remote_hash': self.upstream_commit,
            'is_dirty': self.dirty,
            'detached': self.head_detached
        }

    def get_version(self, upstream=False):
        version = self.upstream_version if upstream else self.current_version
        return tuple(re.findall(r"\d+", version))

    def is_detached(self):
        return self.head_detached

    def is_dirty(self):
        return self.dirty

    def is_current(self):
        return self.current_commit == self.upstream_commit

    async def _do_fetch_pull(self, cmd, retries=5):
        # Fetch and pull require special handling.  If the request
        # gets delayed we do not want to terminate it while the command
        # is processing.
        env = os.environ.copy()
        env.update(GIT_FETCH_ENV_VARS)
        scmd = self.cmd_helper.build_shell_command(
            cmd, std_err_callback=self._handle_process_output, env=env)
        while retries:
            ioloop = IOLoop.current()
            self.fetch_input_recd = False
            self.fetch_timeout_handle = ioloop.call_later(
                GIT_FETCH_TIMEOUT, self._check_process_active, scmd)
            try:
                await scmd.run(timeout=0)
            except Exception:
                pass
            ioloop.remove_timeout(self.fetch_timeout_handle)
            ret = scmd.get_return_code()
            if ret == 0:
                return
            retries -= 1
        raise self.server.error(f"Git Command '{cmd}' failed")

    def _handle_process_output(self, output):
        self.fetch_input_recd = True
        logging.debug(f"Git Repo {self.alias}: Fetch/Pull Response\n"
                      f"{output.decode()}")

    async def _check_process_active(self, scmd):
        ret = scmd.get_return_code()
        if ret is not None:
            logging.debug(f"Git Repo {self.alias}: Fetch/Pull returned")
            return
        if self.fetch_input_recd:
            # Received some input, reschedule timeout
            logging.debug(
                f"Git Repo {self.alias}: Fetch/Pull active, rescheduling")
            ioloop = IOLoop.current()
            self.fetch_input_recd = False
            self.fetch_timeout_handle = ioloop.call_later(
                GIT_FETCH_TIMEOUT, self._check_process_active, scmd)
        else:
            # Request has timed out with no input, terminate it
            logging.debug(f"Git Repo {self.alias}: Fetch/Pull timed out")
            await scmd.cancel()
Ejemplo n.º 16
0
class WebUpdater:
    def __init__(self, config, cmd_helper):
        self.server = cmd_helper.get_server()
        self.cmd_helper = cmd_helper
        self.repo = config.get('repo').strip().strip("/")
        self.owner, self.name = self.repo.split("/", 1)
        if hasattr(config, "get_name"):
            self.name = config.get_name().split()[-1]
        self.path = os.path.realpath(os.path.expanduser(config.get("path")))
        self.persistent_files = []
        pfiles = config.get('persistent_files', None)
        if pfiles is not None:
            self.persistent_files = [
                pf.strip().strip("/") for pf in pfiles.split("\n")
                if pf.strip()
            ]
            if ".version" in self.persistent_files:
                raise config.error(
                    "Invalid value for option 'persistent_files': "
                    "'.version' can not be persistent")

        self.version = self.remote_version = self.dl_url = "?"
        self.etag = None
        self.init_evt = Event()
        self.refresh_condition = None
        self._get_local_version()
        logging.info(f"\nInitializing Client Updater: '{self.name}',"
                     f"\nversion: {self.version}"
                     f"\npath: {self.path}")

    def _get_local_version(self):
        version_path = os.path.join(self.path, ".version")
        if os.path.isfile(os.path.join(self.path, ".version")):
            with open(version_path, "r") as f:
                v = f.read()
            self.version = v.strip()

    async def check_initialized(self, timeout=None):
        if self.init_evt.is_set():
            return
        if timeout is not None:
            timeout = IOLoop.current().time() + timeout
        await self.init_evt.wait(timeout)

    async def refresh(self):
        if self.refresh_condition is None:
            self.refresh_condition = Condition()
        else:
            self.refresh_condition.wait()
            return
        try:
            self._get_local_version()
            await self._get_remote_version()
        except Exception:
            logging.exception("Error Refreshing Client")
        self.init_evt.set()
        self.refresh_condition.notify_all()
        self.refresh_condition = None

    async def _get_remote_version(self):
        # Remote state
        url = f"https://api.github.com/repos/{self.repo}/releases/latest"
        try:
            result = await self.cmd_helper.github_api_request(url,
                                                              etag=self.etag)
        except Exception:
            logging.exception(f"Client {self.repo}: Github Request Error")
            result = {}
        if result is None:
            # No change, update not necessary
            return
        self.etag = result.get('etag', None)
        self.remote_version = result.get('name', "?")
        release_assets = result.get('assets', [{}])[0]
        self.dl_url = release_assets.get('browser_download_url', "?")
        logging.info(f"Github client Info Received:\nRepo: {self.name}\n"
                     f"Local Version: {self.version}\n"
                     f"Remote Version: {self.remote_version}\n"
                     f"url: {self.dl_url}")

    async def update(self, *args):
        await self.check_initialized(20.)
        if self.refresh_condition is not None:
            # wait for refresh if in progess
            self.refresh_condition.wait()
        if self.remote_version == "?":
            await self.refresh()
            if self.remote_version == "?":
                raise self.server.error(
                    f"Client {self.repo}: Unable to locate update")
        if self.dl_url == "?":
            raise self.server.error(
                f"Client {self.repo}: Invalid download url")
        if self.version == self.remote_version:
            # Already up to date
            return
        self.cmd_helper.notify_update_response(
            f"Downloading Client: {self.name}")
        archive = await self.cmd_helper.http_download_request(self.dl_url)
        with tempfile.TemporaryDirectory(suffix=self.name,
                                         prefix="client") as tempdir:
            if os.path.isdir(self.path):
                # find and move persistent files
                for fname in os.listdir(self.path):
                    src_path = os.path.join(self.path, fname)
                    if fname in self.persistent_files:
                        dest_dir = os.path.dirname(os.path.join(
                            tempdir, fname))
                        os.makedirs(dest_dir, exist_ok=True)
                        shutil.move(src_path, dest_dir)
                shutil.rmtree(self.path)
            os.mkdir(self.path)
            with zipfile.ZipFile(io.BytesIO(archive)) as zf:
                zf.extractall(self.path)
            # Move temporary files back into
            for fname in os.listdir(tempdir):
                src_path = os.path.join(tempdir, fname)
                dest_dir = os.path.dirname(os.path.join(self.path, fname))
                os.makedirs(dest_dir, exist_ok=True)
                shutil.move(src_path, dest_dir)
        self.version = self.remote_version
        version_path = os.path.join(self.path, ".version")
        if not os.path.exists(version_path):
            with open(version_path, "w") as f:
                f.write(self.version)
        self.cmd_helper.notify_update_response(
            f"Client Update Finished: {self.name}", is_complete=True)

    def get_update_status(self):
        return {
            'name': self.name,
            'owner': self.owner,
            'version': self.version,
            'remote_version': self.remote_version
        }
Ejemplo n.º 17
0
class foreach_zip(Pipeline):

    _graphviz_orientation = 270
    _graphviz_shape = 'triangle'

    def __init__(self, *upstreams, **kwargs):
        self.maxsize = kwargs.pop('maxsize', 10)
        self.condition = Condition()
        self.literals = [(i, val) for i, val in enumerate(upstreams)
                         if not isinstance(val, Pipeline)]

        self.buffers = {
            upstream: deque()
            for upstream in upstreams if isinstance(upstream, Pipeline)
        }

        upstreams2 = [
            upstream for upstream in upstreams
            if isinstance(upstream, Pipeline)
        ]

        Pipeline.__init__(self, upstreams=upstreams2, **kwargs)
        _global_sinks.add(self)

    def _add_upstream(self, upstream):
        # Override method to handle setup of buffer for new stream
        self.buffers[upstream] = deque()
        super(zip, self)._add_upstream(upstream)

    def _remove_upstream(self, upstream):
        # Override method to handle removal of buffer for stream
        self.buffers.pop(upstream)
        super(zip, self)._remove_upstream(upstream)

    def pack_literals(self, tup):
        """ Fill buffers for literals whenever we empty them """
        inp = list(tup)[::-1]
        out = []
        for i, val in self.literals:
            while len(out) < i:
                out.append(inp.pop())
            out.append(val)

        while inp:
            out.append(inp.pop())

        return out

    def update(self, x, who=None):
        L = self.buffers[who]  # get buffer for stream
        L.append(x)
        if len(L) == 1 and all(self.buffers.values()):
            tup = tuple(self.buffers[up][0] for up in self.upstreams)
            for buf in self.buffers.values():
                buf.popleft()
            self.condition.notify_all()
            if self.literals:
                tup = self.pack_literals(tup)

            tup = tuple(zipping(*tup))
            return self._emit(tup)
        elif len(L) > self.maxsize:
            return self.condition.wait()
Ejemplo n.º 18
0
class PeerGroup(object):
    """A PeerGroup represents a collection of Peers.

    Requests routed through a PeerGroup can be sent to either a specific peer
    or a peer chosen at random.
    """

    def __init__(self, tchannel, score_threshold=None):
        """Initializes a new PeerGroup.

        :param tchannel:
            TChannel used for communication by this PeerGroup
        :param score_threshold:
            A value in the ``[0, 1]`` range. If specifiede, this requires that
            chosen peers havea score higher than this value when performing
            requests.
        """
        self.tchannel = tchannel

        self._score_threshold = score_threshold

        # Dictionary from hostport to Peer.
        self._peers = {}

        # Notified when a reset is performed. This allows multiple coroutines
        # to block on the same reset.
        self._resetting = False

        # We'll create a Condition here later. We want to avoid it right now
        # because it has a side-effect of scheduling some dummy work on the
        # ioloop, which prevents us from forking (if you're into that).
        self._reset_condition = None

    def __str__(self):
        return "<PeerGroup peers=%s>" % str(self._peers)

    @gen.coroutine
    def clear(self):
        """Reset this PeerGroup.

        This closes all connections to all known peers and forgets about
        these peers.

        :returns:
            A Future that resolves with a value of None when the operation
            has finished
        """
        if self._resetting:
            # If someone else is already resetting the PeerGroup, just block
            # on them to be finished.
            yield self._reset_condition.wait()
            raise gen.Return(None)

        self._resetting = True
        if self._reset_condition is None:
            self._reset_condition = Condition()

        try:
            for peer in self._peers.values():
                peer.close()
        finally:
            self._peers = {}
            self._resetting = False
            self._reset_condition.notify_all()

    def get(self, hostport):
        """Get a Peer for the given destination.

        A new Peer is added and returned if one does not already exist for the
        given host-port. Otherwise, the existing Peer is returned.
        """
        assert hostport, "hostport is required"
        if hostport not in self._peers:
            self._peers[hostport] = Peer(self.tchannel, hostport)
        return self._peers[hostport]

    def lookup(self, hostport):
        """Look up a Peer for the given host and port.

        Returns None if a Peer for the given host-port does not exist.
        """
        assert hostport, "hostport is required"
        return self._peers.get(hostport, None)

    def remove(self, hostport):
        """Delete the Peer for the given host port.

        Does nothing if a matching Peer does not exist.

        :returns: The removed Peer
        """
        assert hostport, "hostport is required"
        return self._peers.pop(hostport, None)

    def add(self, peer):
        """Add an existing Peer to this group.

        A peer for the given host-port must not already exist in the group.
        """
        assert peer, "peer is required"

        if isinstance(peer, basestring):
            # Assume strings are host-ports
            peer = Peer(self.tchannel, peer)

        assert peer.hostport not in self._peers, "%s already has a peer" % peer.hostport

        self._peers[peer.hostport] = peer

    @property
    def hosts(self):
        """Get all host-ports managed by this PeerGroup."""
        return self._peers.keys()

    @property
    def peers(self):
        """Get all Peers managed by this PeerGroup."""
        return self._peers.values()

    def request(self, service, hostport=None, **kwargs):
        """Initiate a new request through this PeerGroup.

        :param hostport:
            If specified, requests will be sent to the specific host.
            Otherwise, a known peer will be picked at random.
        :param service:
            Name of the service being called. Defaults to an empty string.
        :param service_threshold:
            If ``hostport`` was not specified, this specifies the score
            threshold at or below which peers will be ignored.
        """
        return PeerClientOperation(peer_group=self, service=service, hostport=hostport, **kwargs)

    def choose(self, hostport=None, score_threshold=None, blacklist=None):
        """Choose a Peer that matches the given criteria.

        The Peer with the highest score will be chosen.

        :param hostport:
            Specifies that the returned Peer must be for the given host-port.
            Without this, all peers managed by this PeerGroup are
            candidates. If this is present, ``score_threshold`` is ignored.
        :param score_threshold:
            If specified, Peers with a score equal to or below this will be
            ignored. Defaults to the value specified when the PeerGroup was
            initialized.
        :param blacklist:
            Peers on the blacklist won't be chosen.
        :returns:
            A Peer that matches all the requested criteria or None if no such
            Peer was found.
        """

        blacklist = blacklist or set()
        if hostport:
            return self.get(hostport)

        score_threshold = score_threshold or self._score_threshold or 0
        chosen_peer = None
        chosen_score = 0
        hosts = self._peers.viewkeys() - blacklist

        for host in hosts:
            peer = self.get(host)
            score = peer.state.score()

            if score <= score_threshold:
                continue

            if score > chosen_score:
                chosen_peer = peer
                chosen_score = score

        return chosen_peer
class TornadoCoroutineExecutor(Executor):
    def __init__(self,
                 core_pool_size,
                 queue,
                 reject_handler,
                 coroutine_pool_name=None):
        self._core_pool_size = core_pool_size
        self._queue = queue
        self._reject_handler = reject_handler
        self._coroutine_pool_name = coroutine_pool_name or \
            'tornado-coroutine-pool-%s' % uuid.uuid1().hex
        self._core_coroutines_condition = Condition()
        self._core_coroutines = {}
        self._core_coroutines_wait_condition = Condition()
        self._shutting_down = False
        self._shuted_down = False
        self._initialize_core_coroutines()

    def _initialize_core_coroutines(self):
        for ind in range(self._core_pool_size):
            self._core_coroutines[ind] = self._core_coroutine_run(ind)
            LOGGER.info("core coroutine: %s is intialized" %
                        self._get_coroutine_name(ind))

    def _get_coroutine_name(self, ind):
        return '%s:%d' % (self._coroutine_pool_name, ind)

    @gen.coroutine
    def _core_coroutine_run(self, ind):
        coroutine_name = self._get_coroutine_name(ind)
        while not self._shutting_down and not self._shuted_down:
            try:
                task_item = self._queue.get_nowait()
            except QueueEmpty:
                LOGGER.debug("coroutine: %s will enter into waiting pool" %
                             coroutine_name)
                if self._shutting_down or self._shuted_down:
                    break
                yield self._core_coroutines_wait_condition.wait()
                LOGGER.debug("coroutine: %s was woken up from waiting pool" %
                             coroutine_name)
                continue

            async_result = task_item.async_result
            async_result.set_time_info("consumed_from_queue_at")
            if not async_result.set_running_or_notify_cancel():
                continue
            time_info_key = "executed_completion_at"
            try:
                result = yield task_item.function(*task_item.args,
                                                  **task_item.kwargs)
                async_result.set_time_info(time_info_key).set_result(result)
            except Exception as ex:
                async_result.set_time_info(time_info_key).set_exception(ex)

        LOGGER.info("coroutine: %s is stopped" % coroutine_name)
        self._core_coroutines.pop(ind)
        if not self._core_coroutines:
            LOGGER.info("all coroutines in %s are stopped" %
                        self._coroutine_pool_name)
            self._core_coroutines_condition.notify_all()

    def submit_task(self, function, *args, **kwargs):
        async_result = AsyncResult()
        if self._shutting_down or self._shuted_down:
            async_result.set_exception(
                ShutedDownError(self._coroutine_pool_name))
            return async_result
        if not gen.is_coroutine_function(function):
            async_result.set_exception(
                RuntimeError("function must be tornado coroutine function"))
            return async_result

        is_full = False
        task_item = TaskItem(function, args, kwargs, async_result)
        try:
            self._queue.put_nowait(task_item)
            async_result.set_time_info("submitted_to_queue_at")
        except QueueFull:
            is_full = True

        if is_full:
            return self._reject_handler(self._queue, task_item)
        else:
            self._core_coroutines_wait_condition.notify()
            return async_result

    @gen.coroutine
    def shutdown(self, wait_time=None):
        if self._shutting_down or self._shuted_down:
            raise gen.Return()

        self._shutting_down = True
        self._shuted_down = False

        LOGGER.info("begin to notify all coroutines")
        self._core_coroutines_wait_condition.notify_all()
        if self._core_coroutines:
            yield self._core_coroutines_condition.wait(wait_time)

        while True:
            try:
                task_item = self._queue.get_nowait()
            except QueueEmpty:
                break
            else:
                task_item.async_result.set_exception(
                    ShutedDownError(self._coroutine_pool_name))

        self._shutting_down = False
        self._shuted_down = True
Ejemplo n.º 20
0
class PeerGroup(object):
    """A PeerGroup represents a collection of Peers.

    Requests routed through a PeerGroup can be sent to either a specific peer
    or a peer chosen at random.
    """
    def __init__(self, tchannel, score_threshold=None):
        """Initializes a new PeerGroup.

        :param tchannel:
            TChannel used for communication by this PeerGroup
        :param score_threshold:
            A value in the ``[0, 1]`` range. If specifiede, this requires that
            chosen peers havea score higher than this value when performing
            requests.
        """
        self.tchannel = tchannel

        self._score_threshold = score_threshold

        # Dictionary from hostport to Peer.
        self._peers = {}

        # Notified when a reset is performed. This allows multiple coroutines
        # to block on the same reset.
        self._resetting = False

        # We'll create a Condition here later. We want to avoid it right now
        # because it has a side-effect of scheduling some dummy work on the
        # ioloop, which prevents us from forking (if you're into that).
        self._reset_condition = None

    def __str__(self):
        return "<PeerGroup peers=%s>" % str(self._peers)

    @gen.coroutine
    def clear(self):
        """Reset this PeerGroup.

        This closes all connections to all known peers and forgets about
        these peers.

        :returns:
            A Future that resolves with a value of None when the operation
            has finished
        """
        if self._resetting:
            # If someone else is already resetting the PeerGroup, just block
            # on them to be finished.
            yield self._reset_condition.wait()
            raise gen.Return(None)

        self._resetting = True
        if self._reset_condition is None:
            self._reset_condition = Condition()

        try:
            for peer in self._peers.values():
                peer.close()
        finally:
            self._peers = {}
            self._resetting = False
            self._reset_condition.notify_all()

    def get(self, hostport):
        """Get a Peer for the given destination.

        A new Peer is added and returned if one does not already exist for the
        given host-port. Otherwise, the existing Peer is returned.
        """
        assert hostport, "hostport is required"
        if hostport not in self._peers:
            self._peers[hostport] = Peer(self.tchannel, hostport)
        return self._peers[hostport]

    def lookup(self, hostport):
        """Look up a Peer for the given host and port.

        Returns None if a Peer for the given host-port does not exist.
        """
        assert hostport, "hostport is required"
        return self._peers.get(hostport, None)

    def remove(self, hostport):
        """Delete the Peer for the given host port.

        Does nothing if a matching Peer does not exist.

        :returns: The removed Peer
        """
        assert hostport, "hostport is required"
        return self._peers.pop(hostport, None)

    def add(self, peer):
        """Add an existing Peer to this group.

        A peer for the given host-port must not already exist in the group.
        """
        assert peer, "peer is required"

        if isinstance(peer, basestring):
            # Assume strings are host-ports
            peer = Peer(self.tchannel, peer)

        assert peer.hostport not in self._peers, ("%s already has a peer" %
                                                  peer.hostport)

        self._peers[peer.hostport] = peer

    @property
    def hosts(self):
        """Get all host-ports managed by this PeerGroup."""
        return self._peers.keys()

    @property
    def peers(self):
        """Get all Peers managed by this PeerGroup."""
        return self._peers.values()

    def request(self, service, hostport=None, **kwargs):
        """Initiate a new request through this PeerGroup.

        :param hostport:
            If specified, requests will be sent to the specific host.
            Otherwise, a known peer will be picked at random.
        :param service:
            Name of the service being called. Defaults to an empty string.
        :param service_threshold:
            If ``hostport`` was not specified, this specifies the score
            threshold at or below which peers will be ignored.
        """
        return PeerClientOperation(peer_group=self,
                                   service=service,
                                   hostport=hostport,
                                   **kwargs)

    def choose(self, hostport=None, score_threshold=None, blacklist=None):
        """Choose a Peer that matches the given criteria.

        The Peer with the highest score will be chosen.

        :param hostport:
            Specifies that the returned Peer must be for the given host-port.
            Without this, all peers managed by this PeerGroup are
            candidates. If this is present, ``score_threshold`` is ignored.
        :param score_threshold:
            If specified, Peers with a score equal to or below this will be
            ignored. Defaults to the value specified when the PeerGroup was
            initialized.
        :param blacklist:
            Peers on the blacklist won't be chosen.
        :returns:
            A Peer that matches all the requested criteria or None if no such
            Peer was found.
        """

        blacklist = blacklist or set()
        if hostport:
            return self.get(hostport)

        score_threshold = score_threshold or self._score_threshold or 0
        chosen_peer = None
        chosen_score = 0
        hosts = self._peers.viewkeys() - blacklist

        for host in hosts:
            peer = self.get(host)
            score = peer.state.score()

            if score <= score_threshold:
                continue

            if score > chosen_score:
                chosen_peer = peer
                chosen_score = score

        return chosen_peer