コード例 #1
0
class SubscribeListener(SubscribeCallback):
    def __init__(self):
        self.connected = False
        self.connected_event = Event()
        self.disconnected_event = Event()
        self.presence_queue = Queue()
        self.message_queue = Queue()

    def status(self, pubnub, status):
        if utils.is_subscribed_event(status) and not self.connected_event.is_set():
            self.connected_event.set()
        elif utils.is_unsubscribed_event(status) and not self.disconnected_event.is_set():
            self.disconnected_event.set()

    def message(self, pubnub, message):
        self.message_queue.put(message)

    def presence(self, pubnub, presence):
        self.presence_queue.put(presence)

    @tornado.gen.coroutine
    def wait_for_connect(self):
        if not self.connected_event.is_set():
            yield self.connected_event.wait()
        else:
            raise Exception("instance is already connected")

    @tornado.gen.coroutine
    def wait_for_disconnect(self):
        if not self.disconnected_event.is_set():
            yield self.disconnected_event.wait()
        else:
            raise Exception("instance is already disconnected")

    @tornado.gen.coroutine
    def wait_for_message_on(self, *channel_names):
        channel_names = list(channel_names)
        while True:
            try:
                env = yield self.message_queue.get()
                if env.channel in channel_names:
                    raise tornado.gen.Return(env)
                else:
                    continue
            finally:
                self.message_queue.task_done()

    @tornado.gen.coroutine
    def wait_for_presence_on(self, *channel_names):
        channel_names = list(channel_names)
        while True:
            try:
                env = yield self.presence_queue.get()
                if env.channel in channel_names:
                    raise tornado.gen.Return(env)
                else:
                    continue
            finally:
                self.presence_queue.task_done()
コード例 #2
0
async def test_exit_callback():
    to_child = mp_context.Queue()
    from_child = mp_context.Queue()
    evt = Event()

    # FIXME: this breaks if changed to async def...
    @gen.coroutine
    def on_stop(_proc):
        assert _proc is proc
        yield gen.moment
        evt.set()

    # Normal process exit
    proc = AsyncProcess(target=feed, args=(to_child, from_child))
    evt.clear()
    proc.set_exit_callback(on_stop)
    proc.daemon = True

    await proc.start()
    await asyncio.sleep(0.05)
    assert proc.is_alive()
    assert not evt.is_set()

    to_child.put(None)
    await evt.wait(timedelta(seconds=3))
    assert evt.is_set()
    assert not proc.is_alive()

    # Process terminated
    proc = AsyncProcess(target=wait)
    evt.clear()
    proc.set_exit_callback(on_stop)
    proc.daemon = True

    await proc.start()
    await asyncio.sleep(0.05)
    assert proc.is_alive()
    assert not evt.is_set()

    await proc.terminate()
    await evt.wait(timedelta(seconds=3))
    assert evt.is_set()
コード例 #3
0
def test_exit_callback():
    to_child = mp_context.Queue()
    from_child = mp_context.Queue()
    evt = Event()

    @gen.coroutine
    def on_stop(_proc):
        assert _proc is proc
        yield gen.moment
        evt.set()

    # Normal process exit
    proc = AsyncProcess(target=feed, args=(to_child, from_child))
    evt.clear()
    proc.set_exit_callback(on_stop)
    proc.daemon = True

    yield proc.start()
    yield gen.sleep(0.05)
    assert proc.is_alive()
    assert not evt.is_set()

    to_child.put(None)
    yield evt.wait(timedelta(seconds=3))
    assert evt.is_set()
    assert not proc.is_alive()

    # Process terminated
    proc = AsyncProcess(target=wait)
    evt.clear()
    proc.set_exit_callback(on_stop)
    proc.daemon = True

    yield proc.start()
    yield gen.sleep(0.05)
    assert proc.is_alive()
    assert not evt.is_set()

    yield proc.terminate()
    yield evt.wait(timedelta(seconds=3))
    assert evt.is_set()
コード例 #4
0
class ClientUpdater:
    def __init__(self, umgr, repo, path):
        self.server = umgr.server
        self.github_request = umgr.github_request
        self.notify_update_response = umgr.notify_update_response
        self.repo = repo.strip().strip("/")
        self.name = self.repo.split("/")[-1]
        self.path = path
        self.version = self.remote_version = self.dl_url = "?"
        self.init_evt = Event()
        self._get_local_version()
        logging.info(f"\nInitializing Client Updater: '{self.name}',"
                     f"\nversion: {self.version}"
                     f"\npath: {self.path}")
        IOLoop.current().spawn_callback(self.refresh)

    def _get_local_version(self):
        version_path = os.path.join(self.path, ".version")
        if os.path.isfile(os.path.join(self.path, ".version")):
            with open(version_path, "r") as f:
                v = f.read()
            self.version = v.strip()

    async def check_initialized(self, timeout=None):
        if self.init_evt.is_set():
            return
        if timeout is not None:
            to = IOLoop.current().time() + timeout
        await self.init_evt.wait(to)

    async def refresh(self):
        # Local state
        self._get_local_version()

        # Remote state
        url = f"https://api.github.com/repos/{self.repo}/releases/latest"
        try:
            result = await self.github_request(url)
        except Exception:
            logging.exception(f"Client {self.repo}: Github Request Error")
            result = {}
        self.remote_version = result.get('name', "?")
        release_assets = result.get('assets', [{}])[0]
        self.dl_url = release_assets.get('browser_download_url', "?")
        logging.info(
            f"Github client Info Received: {self.name}, "
            f"version: {self.remote_version} "
            f"url: {self.dl_url}")
        self.init_evt.set()

    async def update(self, *args):
        if self.remote_version == "?":
            await self.refresh()
            if self.remote_version == "?":
                raise self.server.error(
                    f"Client {self.repo}: Unable to locate update")
        if self.dl_url == "?":
            raise self.server.error(
                f"Client {self.repo}: Invalid download url")
        if self.version == self.remote_version:
            # Already up to date
            return
        if os.path.isdir(self.path):
            shutil.rmtree(self.path)
        os.mkdir(self.path)
        self.notify_update_response(f"Downloading Client: {self.name}")
        archive = await self.github_request(self.dl_url, is_download=True)
        with zipfile.ZipFile(io.BytesIO(archive)) as zf:
            zf.extractall(self.path)
        self.version = self.remote_version
        version_path = os.path.join(self.path, ".version")
        if not os.path.exists(version_path):
            with open(version_path, "w") as f:
                f.write(self.version)
        self.notify_update_response(f"Client Updated Finished: {self.name}",
                                    is_complete=True)

    def get_update_status(self):
        return {
            'name': self.name,
            'version': self.version,
            'remote_version': self.remote_version
        }
コード例 #5
0
class PackageUpdater:
    def __init__(self, umgr):
        self.server = umgr.server
        self.execute_cmd = umgr.execute_cmd
        self.execute_cmd_with_response = umgr.execute_cmd_with_response
        self.notify_update_response = umgr.notify_update_response
        self.available_packages = []
        self.init_evt = Event()
        self.refresh_condition = None

    async def refresh(self, fetch_packages=True):
        # TODO: Use python-apt python lib rather than command line for updates
        if self.refresh_condition is None:
            self.refresh_condition = Condition()
        else:
            self.refresh_condition.wait()
            return
        try:
            if fetch_packages:
                await self.execute_cmd(f"{APT_CMD} update",
                                       timeout=300.,
                                       retries=3)
            res = await self.execute_cmd_with_response("apt list --upgradable",
                                                       timeout=60.)
            pkg_list = [p.strip() for p in res.split("\n") if p.strip()]
            if pkg_list:
                pkg_list = pkg_list[2:]
                self.available_packages = [
                    p.split("/", maxsplit=1)[0] for p in pkg_list
                ]
            pkg_list = "\n".join(self.available_packages)
            logging.info(
                f"Detected {len(self.available_packages)} package updates:"
                f"\n{pkg_list}")
        except Exception:
            logging.exception("Error Refreshing System Packages")
        self.init_evt.set()
        self.refresh_condition.notify_all()
        self.refresh_condition = None

    async def check_initialized(self, timeout=None):
        if self.init_evt.is_set():
            return
        if timeout is not None:
            timeout = IOLoop.current().time() + timeout
        await self.init_evt.wait(timeout)

    async def update(self, *args):
        await self.check_initialized(20.)
        if self.refresh_condition is not None:
            self.refresh_condition.wait()
        self.notify_update_response("Updating packages...")
        try:
            await self.execute_cmd(f"{APT_CMD} update",
                                   timeout=300.,
                                   notify=True)
            await self.execute_cmd(f"{APT_CMD} upgrade --yes",
                                   timeout=3600.,
                                   notify=True)
        except Exception:
            raise self.server.error("Error updating system packages")
        self.available_packages = []
        self.notify_update_response("Package update finished...",
                                    is_complete=True)

    def get_update_status(self):
        return {
            'package_count': len(self.available_packages),
            'package_list': self.available_packages
        }
コード例 #6
0
class GitUpdater:
    def __init__(self, config, cmd_helper, path=None, env=None):
        self.server = cmd_helper.get_server()
        self.cmd_helper = cmd_helper
        self.name = config.get_name().split()[-1]
        if path is None:
            path = os.path.expanduser(config.get('path'))
        self.repo_path = path
        self.repo = GitRepo(cmd_helper, path, self.name)
        self.init_evt = Event()
        self.debug = self.cmd_helper.is_debug_enabled()
        self.env = config.get("env", env)
        dist_packages = None
        self.python_reqs = None
        if self.env is not None:
            self.env = os.path.expanduser(self.env)
            dist_packages = config.get('python_dist_packages', None)
            self.python_reqs = os.path.join(self.repo_path,
                                            config.get("requirements"))
        self.origin = config.get("origin").lower()
        self.install_script = config.get('install_script', None)
        if self.install_script is not None:
            self.install_script = os.path.abspath(
                os.path.join(self.repo_path, self.install_script))
        self.venv_args = config.get('venv_args', None)
        self.python_dist_packages = None
        self.python_dist_path = None
        self.env_package_path = None
        if dist_packages is not None:
            self.python_dist_packages = [
                p.strip() for p in dist_packages.split('\n') if p.strip()
            ]
            self.python_dist_path = os.path.abspath(
                config.get('python_dist_path'))
            env_package_path = os.path.abspath(
                os.path.join(os.path.dirname(self.env), "..",
                             config.get('env_package_path')))
            matches = glob.glob(env_package_path)
            if len(matches) == 1:
                self.env_package_path = matches[0]
            else:
                raise config.error("No match for 'env_package_path': %s" %
                                   (env_package_path, ))
        for opt in [
                "repo_path", "env", "python_reqs", "install_script",
                "python_dist_path", "env_package_path"
        ]:
            val = getattr(self, opt)
            if val is None:
                continue
            if not os.path.exists(val):
                raise config.error("Invalid path for option '%s': %s" %
                                   (val, opt))

    def _get_version_info(self):
        ver_path = os.path.join(self.repo_path, "scripts/version.txt")
        vinfo = {}
        if os.path.isfile(ver_path):
            data = ""
            with open(ver_path, 'r') as f:
                data = f.read()
            try:
                entries = [e.strip() for e in data.split('\n') if e.strip()]
                vinfo = dict([i.split('=') for i in entries])
                vinfo = {
                    k: tuple(re.findall(r"\d+", v))
                    for k, v in vinfo.items()
                }
            except Exception:
                pass
            else:
                self._log_info(f"Version Info Found: {vinfo}")
        vinfo['version'] = self.repo.get_version()
        return vinfo

    def _log_exc(self, msg, traceback=True):
        log_msg = f"Repo {self.name}: {msg}"
        if traceback:
            logging.exception(log_msg)
        else:
            logging.info(log_msg)
        return self.server.error(msg)

    def _log_info(self, msg):
        log_msg = f"Repo {self.name}: {msg}"
        logging.info(log_msg)

    def _notify_status(self, msg, is_complete=False):
        log_msg = f"Repo {self.name}: {msg}"
        logging.debug(log_msg)
        self.cmd_helper.notify_update_response(log_msg, is_complete)

    async def check_initialized(self, timeout=None):
        if self.init_evt.is_set():
            return
        if timeout is not None:
            timeout = IOLoop.current().time() + timeout
        await self.init_evt.wait(timeout)

    async def refresh(self):
        try:
            await self._update_repo_state()
        except Exception:
            logging.exception("Error Refreshing git state")
        self.init_evt.set()

    async def _update_repo_state(self, need_fetch=True):
        self.is_valid = False
        await self.repo.initialize(need_fetch=need_fetch)
        invalids = self.repo.report_invalids(self.origin)
        if invalids:
            msgs = '\n'.join(invalids)
            self._log_info(f"Repo validation checks failed:\n{msgs}")
            if self.debug:
                self.is_valid = True
                self._log_info(
                    "Repo debug enabled, overriding validity checks")
            else:
                self._log_info("Updates on repo disabled")
        else:
            self.is_valid = True
            self._log_info("Validity check for git repo passed")

    async def update(self, update_deps=False):
        await self.check_initialized(20.)
        await self.repo.wait_for_init()
        if not self.is_valid:
            raise self._log_exc("Update aborted, repo not valid", False)
        if self.repo.is_dirty():
            raise self._log_exc("Update aborted, repo has been modified",
                                False)
        if self.repo.is_current():
            # No need to update
            return
        self._notify_status("Updating Repo...")
        try:
            if self.repo.is_detached():
                await self.repo.fetch()
                await self.repo.checkout()
            else:
                await self.repo.pull()
            # Prune stale refrences.  Do this separately from pull or
            # fetch to prevent a timeout during a prune
            await self.repo.prune()
        except Exception:
            raise self._log_exc("Error running 'git pull'")
        # Check Semantic Versions
        vinfo = self._get_version_info()
        cur_version = vinfo.get('version', ())
        update_deps |= cur_version < vinfo.get('deps_version', ())
        need_env_rebuild = cur_version < vinfo.get('env_version', ())
        if update_deps:
            await self._install_packages()
            await self._update_virtualenv(need_env_rebuild)
        elif need_env_rebuild:
            await self._update_virtualenv(True)
        # Refresh local repo state
        await self._update_repo_state(need_fetch=False)
        if self.name == "moonraker":
            # Launch restart async so the request can return
            # before the server restarts
            self._notify_status("Update Finished...", is_complete=True)
            IOLoop.current().call_later(.1, self.restart_service)
        else:
            await self.restart_service()
            self._notify_status("Update Finished...", is_complete=True)

    async def _install_packages(self):
        if self.install_script is None:
            return
        # Open install file file and read
        inst_path = self.install_script
        if not os.path.isfile(inst_path):
            self._log_info(f"Unable to open install script: {inst_path}")
            return
        with open(inst_path, 'r') as f:
            data = f.read()
        packages = re.findall(r'PKGLIST="(.*)"', data)
        packages = [p.lstrip("${PKGLIST}").strip() for p in packages]
        if not packages:
            self._log_info(f"No packages found in script: {inst_path}")
            return
        # TODO: Log and notify that packages will be installed
        pkgs = " ".join(packages)
        logging.debug(f"Repo {self.name}: Detected Packages: {pkgs}")
        self._notify_status("Installing system dependencies...")
        # Install packages with apt-get
        try:
            await self.cmd_helper.run_cmd(f"{APT_CMD} update",
                                          timeout=300.,
                                          notify=True)
            await self.cmd_helper.run_cmd(f"{APT_CMD} install --yes {pkgs}",
                                          timeout=3600.,
                                          notify=True)
        except Exception:
            self._log_exc("Error updating packages via apt-get")
            return

    async def _update_virtualenv(self, rebuild_env=False):
        if self.env is None:
            return
        # Update python dependencies
        bin_dir = os.path.dirname(self.env)
        env_path = os.path.normpath(os.path.join(bin_dir, ".."))
        if rebuild_env:
            self._notify_status(f"Creating virtualenv at: {env_path}...")
            if os.path.exists(env_path):
                shutil.rmtree(env_path)
            try:
                await self.cmd_helper.run_cmd(
                    f"virtualenv {self.venv_args} {env_path}", timeout=300.)
            except Exception:
                self._log_exc(f"Error creating virtualenv")
                return
            if not os.path.exists(self.env):
                raise self._log_exc("Failed to create new virtualenv", False)
        reqs = self.python_reqs
        if not os.path.isfile(reqs):
            self._log_exc(f"Invalid path to requirements_file '{reqs}'")
            return
        pip = os.path.join(bin_dir, "pip")
        self._notify_status("Updating python packages...")
        try:
            await self.cmd_helper.run_cmd(f"{pip} install -r {reqs}",
                                          timeout=1200.,
                                          notify=True,
                                          retries=3)
        except Exception:
            self._log_exc("Error updating python requirements")
        self._install_python_dist_requirements()

    def _install_python_dist_requirements(self):
        dist_reqs = self.python_dist_packages
        if dist_reqs is None:
            return
        dist_path = self.python_dist_path
        site_path = self.env_package_path
        for pkg in dist_reqs:
            for f in os.listdir(dist_path):
                if f.startswith(pkg):
                    src = os.path.join(dist_path, f)
                    dest = os.path.join(site_path, f)
                    self._notify_status(f"Linking to dist package: {pkg}")
                    if os.path.islink(dest):
                        os.remove(dest)
                    elif os.path.exists(dest):
                        self._notify_status(
                            f"Error symlinking dist package: {pkg}, "
                            f"file already exists: {dest}")
                        continue
                    os.symlink(src, dest)
                    break

    async def restart_service(self):
        self._notify_status("Restarting Service...")
        try:
            await self.cmd_helper.run_cmd(f"sudo systemctl restart {self.name}"
                                          )
        except Exception:
            raise self._log_exc("Error restarting service")

    def get_update_status(self):
        status = self.repo.get_repo_status()
        status['is_valid'] = self.is_valid
        status['debug_enabled'] = self.debug
        return status
コード例 #7
0
class SubscribeListener(SubscribeCallback):
    def __init__(self):
        self.connected = False
        self.connected_event = Event()
        self.disconnected_event = Event()
        self.presence_queue = Queue()
        self.message_queue = Queue()
        self.error_queue = Queue()

    def status(self, pubnub, status):
        if utils.is_subscribed_event(status) and not self.connected_event.is_set():
            self.connected_event.set()
        elif utils.is_unsubscribed_event(status) and not self.disconnected_event.is_set():
            self.disconnected_event.set()
        elif status.is_error():
            self.error_queue.put_nowait(status.error_data.exception)

    def message(self, pubnub, message):
        self.message_queue.put(message)

    def presence(self, pubnub, presence):
        self.presence_queue.put(presence)

    @tornado.gen.coroutine
    def _wait_for(self, coro):
        error = self.error_queue.get()
        wi = tornado.gen.WaitIterator(coro, error)

        while not wi.done():
            result = yield wi.next()

            if wi.current_future == coro:
                raise gen.Return(result)
            elif wi.current_future == error:
                raise result
            else:
                raise Exception("Unexpected future resolved: %s" % str(wi.current_future))

    @tornado.gen.coroutine
    def wait_for_connect(self):
        if not self.connected_event.is_set():
            yield self._wait_for(self.connected_event.wait())
        else:
            raise Exception("instance is already connected")

    @tornado.gen.coroutine
    def wait_for_disconnect(self):
        if not self.disconnected_event.is_set():
            yield self._wait_for(self.disconnected_event.wait())
        else:
            raise Exception("instance is already disconnected")

    @tornado.gen.coroutine
    def wait_for_message_on(self, *channel_names):
        channel_names = list(channel_names)
        while True:
            try:  # NOQA
                env = yield self._wait_for(self.message_queue.get())
                if env.channel in channel_names:
                    raise tornado.gen.Return(env)
                else:
                    continue
            finally:
                self.message_queue.task_done()

    @tornado.gen.coroutine
    def wait_for_presence_on(self, *channel_names):
        channel_names = list(channel_names)
        while True:
            try:
                try:
                    env = yield self._wait_for(self.presence_queue.get())
                except:  # NOQA E722 pylint: disable=W0702
                    break
                if env.channel in channel_names:
                    raise tornado.gen.Return(env)
                else:
                    continue
            finally:
                self.presence_queue.task_done()
コード例 #8
0
class TornadoReconnectionManager(ReconnectionManager):
    def __init__(self, pubnub):
        self._cancelled_event = Event()
        super(TornadoReconnectionManager, self).__init__(pubnub)

    @gen.coroutine
    def _register_heartbeat_timer(self):
        self._cancelled_event.clear()

        while not self._cancelled_event.is_set():
            if self._pubnub.config.reconnect_policy == PNReconnectionPolicy.EXPONENTIAL:
                self._timer_interval = int(math.pow(2, self._connection_errors) - 1)
                if self._timer_interval > self.MAXEXPONENTIALBACKOFF:
                    self._timer_interval = self.MINEXPONENTIALBACKOFF
                    self._connection_errors = 1
                    logger.debug("timerInterval > MAXEXPONENTIALBACKOFF at: %s" % utils.datetime_now())
                elif self._timer_interval < 1:
                    self._timer_interval = self.MINEXPONENTIALBACKOFF
                logger.debug("timerInterval = %d at: %s" % (self._timer_interval, utils.datetime_now()))
            else:
                self._timer_interval = self.INTERVAL

            # >>> Wait given interval or cancel
            sleeper = tornado.gen.sleep(self._timer_interval)
            canceller = self._cancelled_event.wait()

            wi = tornado.gen.WaitIterator(canceller, sleeper)

            while not wi.done():
                try:
                    future = wi.next()
                    yield future
                except Exception as e:
                    # TODO: verify the error will not be eaten
                    logger.error(e)
                    raise
                else:
                    if wi.current_future == sleeper:
                        break
                    elif wi.current_future == canceller:
                        return
                    else:
                        raise Exception("unknown future raised")

            logger.debug("reconnect loop at: %s" % utils.datetime_now())

            # >>> Attempt to request /time/0 endpoint
            try:
                yield self._pubnub.time().result()
                self._connection_errors = 1
                self._callback.on_reconnect()
                logger.debug("reconnection manager stop due success time endpoint call: %s" % utils.datetime_now())
                break
            except Exception:
                if self._pubnub.config.reconnect_policy == PNReconnectionPolicy.EXPONENTIAL:
                    logger.debug("reconnect interval increment at: %s" % utils.datetime_now())
                    self._connection_errors += 1

    def start_polling(self):
        if self._pubnub.config.reconnect_policy == PNReconnectionPolicy.NONE:
            logger.warn("reconnection policy is disabled, please handle reconnection manually.")
            return

        self._pubnub.ioloop.spawn_callback(self._register_heartbeat_timer)

    def stop_polling(self):
        if self._cancelled_event is not None and not self._cancelled_event.is_set():
            self._cancelled_event.set()
コード例 #9
0
class ProjectGroomer(object):
  """ Cleans up expired transactions for a project. """
  def __init__(self, project_id, coordinator, zk_client, db_access,
               thread_pool):
    """ Creates a new ProjectGroomer.

    Args:
      project_id: A string specifying a project ID.
      coordinator: A GroomingCoordinator.
      zk_client: A KazooClient.
      db_access: A DatastoreProxy.
      thread_pool: A ThreadPoolExecutor.
    """
    self.project_id = project_id

    self._coordinator = coordinator
    self._zk_client = zk_client
    self._tornado_zk = TornadoKazoo(self._zk_client)
    self._db_access = db_access
    self._thread_pool = thread_pool
    self._project_node = '/appscale/apps/{}'.format(self.project_id)
    self._containers = []
    self._inactive_containers = set()
    self._batch_resolver = BatchResolver(self.project_id, self._db_access)

    self._zk_client.ensure_path(self._project_node)
    self._zk_client.ChildrenWatch(self._project_node, self._update_containers)

    self._txid_manual_offset = 0
    self._offset_node = '/'.join([self._project_node, OFFSET_NODE])
    self._zk_client.DataWatch(self._offset_node, self._update_offset)

    self._stop_event = AsyncEvent()
    self._stopped_event = AsyncEvent()

    # Keeps track of cleanup results for each round of grooming.
    self._txids_cleaned = 0
    self._oldest_valid_tx_time = None

    self._worker_queue = AsyncQueue(maxsize=MAX_CONCURRENCY)
    for _ in range(MAX_CONCURRENCY):
      IOLoop.current().spawn_callback(self._worker)

    IOLoop.current().spawn_callback(self.start)

  @gen.coroutine
  def start(self):
    """ Starts the grooming process until the stop event is set. """
    logger.info('Grooming {}'.format(self.project_id))
    while True:
      if self._stop_event.is_set():
        break

      try:
        yield self._groom_project()
      except Exception:
        # Prevent the grooming loop from stopping if an error is encountered.
        logger.exception(
          'Unexpected error while grooming {}'.format(self.project_id))
        yield gen.sleep(MAX_TX_DURATION)

    self._stopped_event.set()

  @gen.coroutine
  def stop(self):
    """ Stops the grooming process. """
    logger.info('Stopping grooming process for {}'.format(self.project_id))
    self._stop_event.set()
    yield self._stopped_event.wait()

  @gen.coroutine
  def _worker(self):
    """ Processes items in the worker queue. """
    while True:
      tx_path, composite_indexes = yield self._worker_queue.get()
      try:
        tx_time = yield self._resolve_txid(tx_path, composite_indexes)
        if tx_time is None:
          self._txids_cleaned += 1

        if tx_time is not None and tx_time < self._oldest_valid_tx_time:
          self._oldest_valid_tx_time = tx_time
      finally:
        self._worker_queue.task_done()

  def _update_offset(self, new_offset, _):
    """ Watches for updates to the manual offset node.

    Args:
      new_offset: A string specifying the new manual offset.
    """
    self._txid_manual_offset = int(new_offset or 0)

  def _update_containers(self, nodes):
    """ Updates the list of active txid containers.

    Args:
      nodes: A list of strings specifying ZooKeeper nodes.
    """
    counters = [int(node[len(CONTAINER_PREFIX):] or 1)
                for node in nodes if node.startswith(CONTAINER_PREFIX)
                and node not in self._inactive_containers]
    counters.sort()

    containers = [CONTAINER_PREFIX + str(counter) for counter in counters]
    if containers and containers[0] == '{}1'.format(CONTAINER_PREFIX):
      containers[0] = CONTAINER_PREFIX

    self._containers = containers

  @gen.coroutine
  def _groom_project(self):
    """ Runs the grooming process. """
    index = self._coordinator.index
    worker_count = self._coordinator.total_workers

    oldest_valid_tx_time = yield self._fetch_and_clean(index, worker_count)

    # Wait until there's a reasonable chance that some transactions have
    # timed out.
    next_timeout_eta = oldest_valid_tx_time + MAX_TX_DURATION

    # The oldest ignored transaction should still be valid, but ensure that
    # the timeout is not negative.
    next_timeout = max(0, next_timeout_eta - time.time())
    time_to_wait = datetime.timedelta(
      seconds=next_timeout + (MAX_TX_DURATION / 2))

    # Allow the wait to be cut short when a project is removed.
    try:
      yield self._stop_event.wait(timeout=time_to_wait)
    except gen.TimeoutError:
      raise gen.Return()

  @gen.coroutine
  def _remove_path(self, tx_path):
    """ Removes a ZooKeeper node.

    Args:
      tx_path: A string specifying the path to delete.
    """
    try:
      yield self._tornado_zk.delete(tx_path)
    except NoNodeError:
      pass
    except NotEmptyError:
      yield self._thread_pool.submit(self._zk_client.delete, tx_path,
                                     recursive=True)

  @gen.coroutine
  def _resolve_txid(self, tx_path, composite_indexes):
    """ Cleans up a transaction if it has expired.

    Args:
      tx_path: A string specifying the location of the ZooKeeper node.
      composite_indexes: A list of CompositeIndex objects.
    Returns:
      The transaction start time if still valid, None if invalid because this
      method will also delete it.
    """
    tx_data = yield self._tornado_zk.get(tx_path)
    tx_time = float(tx_data[0])

    _, container, tx_node = tx_path.rsplit('/', 2)
    tx_node_id = int(tx_node.lstrip(COUNTER_NODE_PREFIX))
    container_count = int(container[len(CONTAINER_PREFIX):] or 1)
    if tx_node_id < 0:
      yield self._remove_path(tx_path)
      raise gen.Return()

    container_size = MAX_SEQUENCE_COUNTER + 1
    automatic_offset = (container_count - 1) * container_size
    txid = self._txid_manual_offset + automatic_offset + tx_node_id

    if txid < 1:
      yield self._remove_path(tx_path)
      raise gen.Return()

    # If the transaction is still valid, return the time it was created.
    if tx_time + MAX_TX_DURATION >= time.time():
      raise gen.Return(tx_time)

    yield self._batch_resolver.resolve(txid, composite_indexes)
    yield self._remove_path(tx_path)
    yield self._batch_resolver.cleanup(txid)

  @gen.coroutine
  def _fetch_and_clean(self, worker_index, worker_count):
    """ Cleans up expired transactions.

    Args:
      worker_index: An integer specifying this worker's index.
      worker_count: An integer specifying the number of total workers.
    Returns:
      A float specifying the time of the oldest valid transaction as a unix
      timestamp.
    """
    self._txids_cleaned = 0
    self._oldest_valid_tx_time = time.time()

    children = []
    for index, container in enumerate(self._containers):
      container_path = '/'.join([self._project_node, container])
      new_children = yield self._tornado_zk.get_children(container_path)

      if not new_children and index < len(self._containers) - 1:
        self._inactive_containers.add(container)

      children.extend(['/'.join([container_path, node])
                       for node in new_children])

    logger.debug(
      'Found {} transaction IDs for {}'.format(len(children), self.project_id))

    if not children:
      raise gen.Return(self._oldest_valid_tx_time)

    # Refresh these each time so that the indexes are fresh.
    encoded_indexes = yield self._thread_pool.submit(
      self._db_access.get_indices, self.project_id)
    composite_indexes = [CompositeIndex(index) for index in encoded_indexes]

    for tx_path in children:
      tx_node_id = int(tx_path.split('/')[-1].lstrip(COUNTER_NODE_PREFIX))
      # Only resolve transactions that this worker has been assigned.
      if tx_node_id % worker_count != worker_index:
        continue

      yield self._worker_queue.put((tx_path, composite_indexes))

    yield self._worker_queue.join()

    if self._txids_cleaned > 0:
      logger.info('Cleaned up {} expired txids for {}'.format(
        self._txids_cleaned, self.project_id))

    raise gen.Return(self._oldest_valid_tx_time)
コード例 #10
0
ファイル: stores.py プロジェクト: mivade/tornadose
class RedisStore(BaseStore):
    """Publish data via a Redis backend.

    This data store works in a similar manner as
    :class:`DataStore`. The primary advantage is that external
    programs can be used to publish data to be consumed by clients.

    The ``channel`` keyword argument specifies which Redis channel to
    publish to and defaults to ``tornadose``.

    All remaining keyword arguments are passed directly to the
    ``redis.StrictRedis`` constructor. See `redis-py`__'s
    documentation for detais.

    New messages are read in a background thread via a
    :class:`concurrent.futures.ThreadPoolExecutor`. This requires
    either Python >= 3.2 or the backported ``futures`` module to be
    installed.

    __ https://redis-py.readthedocs.org/en/latest/

    :raises ConnectionError: when the Redis host is not pingable

    """
    def initialize(self, channel='tornadose', **kwargs):
        if redis is None:
            raise RuntimeError("The redis module is required to use RedisStore")
        self.executor = ThreadPoolExecutor(max_workers=1)
        self.channel = channel
        self.messages = Queue()
        self._done = Event()

        self._redis = redis.StrictRedis(**kwargs)
        self._redis.ping()
        self._pubsub = self._redis.pubsub(ignore_subscribe_messages=True)
        self._pubsub.subscribe(self.channel)

        self.publish()

    def submit(self, message, debug=False):
        self._redis.publish(self.channel, message)
        if debug:
            logger.debug(message)
            self._redis.setex(self.channel, 5, message)

    def shutdown(self):
        """Stop the publishing loop."""
        self._done.set()
        self.executor.shutdown(wait=False)

    @run_on_executor
    def _get_message(self):
        data = self._pubsub.get_message(timeout=1)
        if data is not None:
            data = data['data']
        return data

    @gen.coroutine
    def publish(self):
        while not self._done.is_set():
            data = yield self._get_message()
            if len(self.subscribers) > 0 and data is not None:
                [subscriber.submit(data) for subscriber in self.subscribers]
コード例 #11
0
ファイル: base.py プロジェクト: nioinnovation/python-xbee
class XBeeBase(_XBeeBase):
    """
    Abstract base class providing command generation and response
    parsing methods for XBee modules.

    Constructor arguments:
        ser:    The file-like serial port to use.

        shorthand: boolean flag which determines whether shorthand command
                   calls (i.e. xbee.at(...) instead of xbee.send("at",...)
                   are allowed.

        callback: function which should be called with frame data
                  whenever a frame arrives from the serial port.

        escaped: boolean flag which determines whether the library should
                 operate in escaped mode. In this mode, certain data bytes
                 in the output and input streams will be escaped and unescaped
                 in accordance with the XBee API. This setting must match
                 the appropriate api_mode setting of an XBee device; see your
                 XBee device's documentation for more information.

        error_callback: function which should be called with an Exception
                 whenever an exception is raised while waiting for data from
                 the serial port. This will only take affect if the callback
                 argument is also used.
    """
    def __init__(self, *args, **kwargs):
        if 'io_loop' in kwargs:
            self._ioloop = kwargs.pop('io_loop')
        else:
            self._ioloop = ioloop.IOLoop.current()

        super(XBeeBase, self).__init__(*args, **kwargs)

        self._running = Event()
        self._running.set()

        self._frame_future = None
        self._frame_queue = deque()

        if self._callback:
            # Make Non-Blocking
            self.serial.timeout = 0
            self.process_frames()

        self._ioloop.add_handler(self.serial.fd,
                                 self._process_input,
                                 ioloop.IOLoop.READ)

    def halt(self):
        """
        halt: None -> None

        Stop the event, and remove the FD from the loop handler
        """
        if self._callback:
            self._running.clear()
            self._ioloop.remove_handler(self.serial.fd)

            if self._frame_future is not None:
                self._frame_future.set_result(None)
                self._frame_future = None

    @gen.coroutine
    def process_frames(self):
        """
        process_frames: None -> None

        Wait for a frame to become available, when resolved call the callback
        """
        while self._running.is_set():
            try:
                frame = yield self._get_frame()
                info = self._split_response(frame.data)
                if info is not None:
                    self._callback(info)
            except Exception as e:
                # Unexpected quit.
                if self._error_callback:
                    self._error_callback(e)

    @gen.coroutine
    def wait_read_frame(self, timeout=None):
        frame = yield self._get_frame(timeout=timeout)
        raise gen.Return(self._split_response(frame.data))

    def _get_frame(self, timeout=None):
        future = Future()
        if self._frame_queue:
            future.set_result(self._frame_queue.popleft())
        else:
            if timeout is not None:
                def on_timeout():
                    future.set_exception(_TimeoutException())

                handle = self._ioloop.add_timeout(
                    self._ioloop.time() + timeout, on_timeout
                )
                future.add_done_callback(lambda _:
                                         self._ioloop.remove_timeout(handle))

            self._frame_future = future

        return future

    def _process_input(self, data, events):
        """
        _process_input:

        _process_input will be notified when there is data ready on the
        serial connection to be read.  It will read and process the data
        into an API Frame and then either resolve a frame future, or push
        the frame into the queue of frames needing to be processed
        """
        frame = APIFrame(escaped=self._escaped)

        byte = self.serial.read()

        if byte != APIFrame.START_BYTE:
            return

        # Save all following bytes, if they are not empty
        if len(byte) == 1:
            frame.fill(byte)

        while(frame.remaining_bytes() > 0):
            byte = self.serial.read()

            if len(byte) == 1:
                frame.fill(byte)

        try:
            # Try to parse and return result
            frame.parse()

            # Ignore empty frames
            if len(frame.data) == 0:
                return

            if self._frame_future is not None:
                self._frame_future.set_result(frame)
                self._frame_future = None
            else:
                self._frame_queue.append(frame)
        except ValueError:
            return
コード例 #12
0
ファイル: base.py プロジェクト: ksnwright/xbee
class XBeeBase(_XBeeBase):
    """
    Abstract base class providing command generation and response
    parsing methods for XBee modules.

    Constructor arguments:
        ser:    The file-like serial port to use.

        shorthand: boolean flag which determines whether shorthand command
                   calls (i.e. xbee.at(...) instead of xbee.send("at",...)
                   are allowed.

        callback: function which should be called with frame data
                  whenever a frame arrives from the serial port.

        escaped: boolean flag which determines whether the library should
                 operate in escaped mode. In this mode, certain data bytes
                 in the output and input streams will be escaped and unescaped
                 in accordance with the XBee API. This setting must match
                 the appropriate api_mode setting of an XBee device; see your
                 XBee device's documentation for more information.

        error_callback: function which should be called with an Exception
                 whenever an exception is raised while waiting for data from
                 the serial port. This will only take affect if the callback
                 argument is also used.
    """
    def __init__(self, *args, **kwargs):
        if 'io_loop' in kwargs:
            self._ioloop = kwargs.pop('io_loop')
        else:
            self._ioloop = ioloop.IOLoop.current()

        super(XBeeBase, self).__init__(*args, **kwargs)

        self._running = Event()
        self._running.set()

        self._frame_future = None
        self._frame_queue = deque()

        if self._callback:
            # Make Non-Blocking
            self.serial.timeout = 0
            self.process_frames()

        self._ioloop.add_handler(self.serial.fd, self._process_input,
                                 ioloop.IOLoop.READ)

    def halt(self):
        """
        halt: None -> None

        Stop the event, and remove the FD from the loop handler
        """
        if self._callback:
            self._running.clear()
            self._ioloop.remove_handler(self.serial.fd)

            if self._frame_future is not None:
                self._frame_future.set_result(None)
                self._frame_future = None

    @gen.coroutine
    def process_frames(self):
        """
        process_frames: None -> None

        Wait for a frame to become available, when resolved call the callback
        """
        while self._running.is_set():
            try:
                frame = yield self._get_frame()
                info = self._split_response(frame.data)
                if info is not None:
                    self._callback(info)
            except Exception as e:
                # Unexpected quit.
                if self._error_callback:
                    self._error_callback(e)

    @gen.coroutine
    def wait_read_frame(self, timeout=None):
        frame = yield self._get_frame(timeout=timeout)
        raise gen.Return(self._split_response(frame.data))

    def _get_frame(self, timeout=None):
        future = Future()
        if self._frame_queue:
            future.set_result(self._frame_queue.popleft())
        else:
            if timeout is not None:

                def on_timeout():
                    future.set_exception(_TimeoutException())

                handle = self._ioloop.add_timeout(
                    self._ioloop.time() + timeout, on_timeout)
                future.add_done_callback(
                    lambda _: self._ioloop.remove_timeout(handle))

            self._frame_future = future

        return future

    def _process_input(self, data, events):
        """
        _process_input:

        _process_input will be notified when there is data ready on the
        serial connection to be read.  It will read and process the data
        into an API Frame and then either resolve a frame future, or push
        the frame into the queue of frames needing to be processed
        """
        frame = APIFrame(escaped=self._escaped)

        byte = self.serial.read()

        if byte != APIFrame.START_BYTE:
            return

        # Save all following bytes, if they are not empty
        if len(byte) == 1:
            frame.fill(byte)

        while (frame.remaining_bytes() > 0):
            byte = self.serial.read()

            if len(byte) == 1:
                frame.fill(byte)

        try:
            # Try to parse and return result
            frame.parse()

            # Ignore empty frames
            if len(frame.data) == 0:
                return

            if self._frame_future is not None:
                self._frame_future.set_result(frame)
                self._frame_future = None
            else:
                self._frame_queue.append(frame)
        except ValueError:
            return
コード例 #13
0
ファイル: base.py プロジェクト: andrey-yantsen/tobot
class Base:
    SETTINGS_PER_BOT = 1
    SETTINGS_PER_USER = 2
    SETTINGS_TYPE = SETTINGS_PER_BOT

    def __init__(self, token, stages_builder: callable, **kwargs):
        self.token = token
        self.settings = kwargs.pop('settings', {})

        self.ignore_403_in_handlers = kwargs.pop('ignore_403_in_handlers',
                                                 False)
        for key, value in kwargs.items():
            self.__dict__[key] = value

        self.api = Api(token, self.process_update)
        self.user_settings = {}
        self.commands = {}
        self.raw_commands_tree = {}
        self.cancellation_handler = None
        self.unknown_command_handler = None
        self.updates_queue = Queue(
            kwargs.get('updates_queue_handlers', 4) * 10)
        self._init_handlers()
        self._stages = stages_builder(bot_id=self.bot_id)
        self._finished = Event()
        self._supported_languages = tuple([])

    def _init_handlers(self):
        raise NotImplementedError()

    def _add_handler(self,
                     handler: callable,
                     name: pgettext = None,
                     previous_handler: callable = None,
                     is_final=True):
        if handler not in self.commands:
            self.commands[handler] = Command(self, handler, name)

        if previous_handler and previous_handler not in self.commands:
            raise BotError('Previous command is unknown')

        previous_handler_name = previous_handler.__name__ if previous_handler else 'none'

        if previous_handler_name not in self.raw_commands_tree:
            self.raw_commands_tree[previous_handler_name] = []
        else:
            for h, _ in self.raw_commands_tree[previous_handler_name]:
                if h.handler == handler and handler != self.cancellation_handler:
                    raise BotError('Command already registered')
                elif h.handler == handler:
                    return

        self.raw_commands_tree[previous_handler_name].append(
            (self.commands[handler], is_final))

        if not is_final and self.cancellation_handler:
            self._add_handler(self.cancellation_handler,
                              previous_handler=handler,
                              is_final=True)

    def _load_user_settings_per_user(self):
        return {}

    def _update_settings_for_bot(self, settings):
        pass

    def _update_settings_for_user(self, user_id, settings):
        pass

    @coroutine
    def update_settings(self, user_id, **kwargs):
        logging.info('[bot#%s] Updating settings to %s by user#%s',
                     self.bot_id, kwargs, user_id)
        if self.SETTINGS_TYPE == self.SETTINGS_PER_BOT:
            self.settings.update(kwargs)
            yield maybe_future(self._update_settings_for_bot(self.settings))
        else:
            if user_id not in self.user_settings:
                self.user_settings[user_id] = kwargs
            else:
                self.user_settings[user_id].update(kwargs)

            yield maybe_future(
                self._update_settings_for_user(user_id, self.settings))

    def get_settings(self, user_id):
        if self.SETTINGS_TYPE == self.SETTINGS_PER_BOT:
            return deepcopy(self.settings)
        else:
            return deepcopy(self.user_settings.get(user_id, {}))

    @coroutine
    def start(self):
        logging.debug('[bot#%s] Starting', self.bot_id)
        self._finished.clear()
        self.user_settings = yield maybe_future(
            self._load_user_settings_per_user())
        handlers_f = [
            self._update_processor()
            for _ in range(self.settings.get('updates_queue_handlers', 4))
        ]
        yield maybe_future(self._stages.restore())
        try:
            yield self.api.wait_commands()
        finally:
            self.stop()
            yield handlers_f

    def stop(self):
        assert not self._finished.is_set()
        logging.debug('[bot#%s] Terminating', self.bot_id)
        self._finished.set()
        if self.api.is_alive:
            self.api.stop()

    @property
    def is_alive(self):
        return not self._finished.is_set()

    @coroutine
    def process_update(self, update):
        yield self.updates_queue.put(update)

    @staticmethod
    def get_stage_key(update):
        if 'message' in update:
            chat_id = update['message']['chat']['id']
            user_id = update['message']['from']['id']
        elif 'callback_query' in update:
            if 'message' in update['callback_query']:
                chat_id = update['callback_query']['message']['chat']['id']
            else:
                chat_id = update['callback_query']['from']['id']
            user_id = update['callback_query']['from']['id']
        elif 'channel_post' in update:
            chat_id = update['channel_post']['chat']['id']
            user_id = update['channel_post']['chat']['id']
        elif 'edited_channel_post' in update:
            chat_id = update['edited_channel_post']['chat']['id']
            user_id = update['edited_channel_post']['chat']['id']
        else:
            raise BotError('Unable to get stage_key for this type of update')

        return '%s-%s' % (user_id, chat_id)

    @coroutine
    def _update_processor(self):
        while not self._finished.is_set():
            try:
                received_update = yield self.updates_queue.get(
                    timedelta(seconds=3))
            except:
                continue

            del received_update['update_id']

            try:
                stage_key = self.get_stage_key(received_update)
                current_stage = self._stages[stage_key]
                if current_stage:
                    stage_data = current_stage[1]
                    received_update.update(current_stage[1])
                    commands_tree = self.raw_commands_tree[current_stage[0]]
                else:
                    stage_data = {}
                    commands_tree = self.raw_commands_tree['none']

                processing_result = False
                for command_in_tree in commands_tree:
                    try:
                        processing_result = yield command_in_tree[0](
                            **received_update)
                    except ApiError as e:
                        if not self.ignore_403_in_handlers or str(
                                e.code) != '403':
                            raise
                        else:
                            logging.exception(
                                'Got exception in message handler')
                    if processing_result is not False:
                        if not command_in_tree[
                                1] and processing_result is not None:
                            if processing_result is True:
                                processing_result = {}
                            stage_data.update(processing_result)
                            self._stages[stage_key] = command_in_tree[
                                0].handler, stage_data
                        elif processing_result is not None:
                            del self._stages[stage_key]
                        break

                    if processing_result is not False:
                        break

                if processing_result is False:
                    logging.debug('Handler not found: %s',
                                  dumps(received_update, indent=2))
                    if self.unknown_command_handler:
                        try:
                            yield maybe_future(
                                self.unknown_command_handler(
                                    self, **received_update))
                        except ApiError as e:
                            if not self.ignore_403_in_handlers or str(
                                    e.code) != '403':
                                raise
                            else:
                                logging.exception(
                                    'Got exception in message handler')
            except:
                logging.exception(
                    '[bot#%s] Got error while processing message %s',
                    self.bot_id, dumps(received_update, indent=2))

            self.updates_queue.task_done()

    def __getattr__(self, name):
        def outer_wrapper(f):
            @wraps(f)
            def wrapper(*args, **kwargs):
                l = locale.get('en_US')
                if self.SETTINGS_TYPE == self.SETTINGS_PER_BOT:
                    l = locale.get(self.settings.get('locale', 'en_US'))
                elif self.SETTINGS_TYPE == self.SETTINGS_PER_USER:
                    chat_id = None
                    if 'reply_to_message' in kwargs:
                        if 'chat' in kwargs['reply_to_message']:
                            chat_id = kwargs['reply_to_message']['chat']['id']
                        elif 'from' in kwargs['reply_to_message']:
                            chat_id = kwargs['reply_to_message']['from']['id']
                    elif 'chat_id' in kwargs:
                        chat_id = kwargs['chat_id']

                    if chat_id in self.user_settings:
                        l = locale.get(self.user_settings[chat_id].get(
                            'locale', 'en_US'))

                return f(*set_locale_recursive(args, l),
                         **set_locale_recursive(kwargs, l))

            return wrapper

        if hasattr(self.api, name):
            attr = getattr(self.api, name)
            if isinstance(attr, type(self.stop)):
                return outer_wrapper(attr)
            else:
                return attr
        else:
            raise AttributeError("'%s' object has no attribute '%s'" %
                                 (self.__class__.__name__, name))
コード例 #14
0
ファイル: telegram.py プロジェクト: andrey-yantsen/tobot
class Api:
    STATE_WORKING = 0
    STATE_STOP_PENDING = 1
    STATE_STOPPED = 2

    CHAT_ACTION_TYPING = 'typing'
    CHAT_ACTION_UPLOAD_PHOTO = 'upload_photo'
    CHAT_ACTION_RECORD_VIDEO = 'record_video'
    CHAT_ACTION_UPLOAD_VIDEO = 'upload_video'
    CHAT_ACTION_RECORD_AUDIO = 'record_audio'
    CHAT_ACTION_UPLOAD_AUDIO = 'upload_audio'
    CHAT_ACTION_UPLOAD_DOC = 'upload_document'
    CHAT_ACTION_FIND_LOCATION = 'find_location'

    PARSE_MODE_NONE = None
    PARSE_MODE_MD = 'Markdown'
    PARSE_MODE_HTML = 'HTML'

    def __init__(self, token, processor):
        if ':' in token:
            self.bot_id, _ = token.split(':')
            if self.bot_id.isdigit():
                self.bot_id = int(self.bot_id)
            else:
                raise ValueError('Non well-formatted token given')
        else:
            raise ValueError('Non well-formatted token given')

        self.token = token
        self.consumption_state = self.STATE_STOPPED
        self.processor = processor
        self.__me = None
        self._finished = Event()
        self._finished.set()

    @coroutine
    def get_me(self):
        if not self.__me:
            self.__me = yield self.__request_api('getMe')

        return self.__me

    def stop(self):
        assert not self._finished.is_set()
        self._finished.set()

    @property
    def is_alive(self):
        return not self._finished.is_set()

    @coroutine
    def __request_api(self, method, body=None, request_timeout=10, retry_on_nonuser_error=False):
        def guess_filename(obj):
            """Tries to guess the filename of the given object."""
            name = getattr(obj, 'name', None)
            if name and isinstance(name, str) and name[0] != '<' and name[-1] != '>':
                return basename(name)

        url = 'https://api.telegram.org/bot{token}/{method}'.format(token=self.token, method=method)
        try:
            request = {
                'request_timeout': request_timeout,
                'headers': {},
            }

            if body:
                request['method'] = 'POST'
                request_content = {}
                has_files = False
                file_names = {}
                for key, value in body.items():
                    if hasattr(value, 'read'):
                        request_content[key] = value.read()
                        file_names[key] = guess_filename(value)
                        has_files = True
                    else:
                        request_content[key] = value

                if has_files:
                    boundary = md5(str(time()).encode('utf-8')).hexdigest()
                    request['headers']['Content-type'] = 'multipart/form-data; boundary=' + boundary

                    body = []
                    for key, value in request_content.items():
                        body.append(b'--' + boundary.encode('utf-8'))
                        if key in file_names:
                            body.append(('Content-Disposition: form-data; name="%s"; filename="%s"' % (key, file_names[key])).encode('utf-8'))
                        else:
                            body.append(('Content-Disposition: form-data; name="%s"' % key).encode('utf-8'))

                        body.append(b'')
                        if isinstance(value, int):
                            value = str(value)
                        if isinstance(value, str):
                            value = value.encode('utf-8')
                        body.append(value)
                    body.append(b'--' + boundary.encode('utf-8') + b'--')
                    body = b"\r\n" + b"\r\n".join(body) + b"\r\n"
                else:
                    request['headers']['Content-type'] = 'application/json'
                    body = ujson.dumps(request_content)
            else:
                request['method'] = 'GET'

            while True:
                try:
                    response = yield AsyncHTTPClient().fetch(url, body=body, **request)
                    break
                except HTTPError as e:
                    if not retry_on_nonuser_error or 400 <= e.code < 500:
                        raise
                    else:
                        yield sleep(5)

            if response and response.body:
                response = ujson.loads(response.body.decode('utf-8'))
                if response['ok']:
                    return response['result']
                else:
                    raise ApiError(response['error_code'], response['description'], response.get('parameters'),
                                   request_body=body)
        except HTTPError as e:
            if e.code == 599:
                logging.exception('%s request timed out', method)  # Do nothing on timeout, just return None
            elif e.response and e.response.body:
                response = ujson.loads(e.response.body.decode('utf-8'))
                raise ApiError(response['error_code'], response['description'], response.get('parameters'),
                               request_body=body)
            else:
                raise ApiError(e.code, None, request_body=body)

        return None

    @coroutine
    def get_updates(self, offset: int=None, limit: int=100, timeout: int=2, retry_on_nonuser_error: bool=False):
        assert 1 <= limit <= 100
        assert 0 <= timeout

        request = {
            'limit': limit,
            'timeout': timeout
        }

        if offset is not None:
            request['offset'] = offset

        data = yield self.__request_api('getUpdates', request, request_timeout=timeout * 1.5,
                                        retry_on_nonuser_error=retry_on_nonuser_error)

        if data is None:
            return []

        return data

    @coroutine
    def wait_commands(self, last_update_id=None):
        assert self._finished.is_set()

        self._finished.clear()

        self.consumption_state = self.STATE_WORKING

        if last_update_id is not None:
            last_update_id += 1

        yield self.get_me()

        while not self._finished.is_set():
            try:
                updates = yield self.get_updates(last_update_id, retry_on_nonuser_error=True)
            except:
                self._finished.set()
                raise

            for update in updates:
                yield maybe_future(self.processor(update))
                if 'update_id' in update:
                    last_update_id = update['update_id']

            if len(updates):
                last_update_id += 1

    @coroutine
    def send_chat_action(self, chat_id, action: str):
        return (yield self.__request_api('sendChatAction', {'chat_id': chat_id, 'action': action}))

    @coroutine
    def send_message(self, text: str, chat_id=None, reply_to_message: dict=None, parse_mode: str=None,
                     disable_web_page_preview: bool=False, disable_notification: bool=False,
                     reply_to_message_id: int=None, reply_markup=None):
        request = {
            'chat_id': chat_id,
            'text': text,
            'disable_web_page_preview': disable_web_page_preview,
            'disable_notification': disable_notification,
        }

        if parse_mode is not None:
            request['parse_mode'] = parse_mode

        if reply_to_message_id is not None:
            request['reply_to_message_id'] = reply_to_message_id

        if reply_to_message:
            if chat_id is None:
                request['chat_id'] = reply_to_message['chat']['id']
                if reply_to_message['chat']['id'] != reply_to_message['from']['id']:
                    request['reply_to_message_id'] = reply_to_message['message_id']
            else:
                request['reply_to_message_id'] = reply_to_message['message_id']
        else:
            assert chat_id is not None

        if reply_markup is not None:
            request['reply_markup'] = reply_markup

        try:
            return (yield self.__request_api('sendMessage', request))
        except ApiError as e:
            if e.code == 400 and e.description.startswith("Bad Request: Can\'t parse"):
                logging.exception('Got exception while sending text: %s', text)
            raise

    @coroutine
    def send_photo(self, chat_id, photo, caption: str=None, disable_notification: bool=False,
                   reply_to_message_id: int=None, reply_markup=None):
        request = {
            'chat_id': chat_id,
            'photo': photo,
            'disable_notification': disable_notification,
        }

        if caption is not None:
            request['caption'] = caption

        if reply_to_message_id is not None:
            request['reply_to_message_id'] = reply_to_message_id

        if reply_markup is not None:
            request['reply_markup'] = reply_markup

        return (yield self.__request_api('sendPhoto', request))

    @coroutine
    def forward_message(self, chat_id, from_chat_id, message_id: int, disable_notification: bool=False):
        return (yield self.__request_api('forwardMessage', {
            'chat_id': chat_id,
            'from_chat_id': from_chat_id,
            'disable_notification': disable_notification,
            'message_id': message_id,
        }))

    @staticmethod
    def _prepare_inline_message(message=None, chat_id=None, message_id=None, inline_message_id=None):
        request = {}

        if message:
            request['chat_id'] = message['chat']['id']
            request['message_id'] = message['message_id']
        elif chat_id and message_id:
            request['chat_id'] = chat_id
            request['message_id'] = message_id
        else:
            request['inline_message_id'] = inline_message_id

        return request

    @coroutine
    def edit_message_reply_markup(self, message=None, chat_id=None, message_id=None, inline_message_id=None,
                                  reply_markup=None):
        assert (chat_id and message_id) or message or inline_message_id

        request = self._prepare_inline_message(message=message, chat_id=chat_id, message_id=message_id,
                                               inline_message_id=inline_message_id)

        if reply_markup:
            request['reply_markup'] = reply_markup

        return (yield self.__request_api('editMessageReplyMarkup', request))

    @coroutine
    def edit_message_text(self, text, message=None, chat_id=None, message_id=None, inline_message_id=None,
                          parse_mode=None, disable_web_page_preview=False, reply_markup=None):
        request = self._prepare_inline_message(message=message, chat_id=chat_id, message_id=message_id,
                                               inline_message_id=inline_message_id)

        if parse_mode is not None:
            request['parse_mode'] = parse_mode

        request['disable_web_page_preview'] = disable_web_page_preview
        request['text'] = text

        if reply_markup is not None:
            request['reply_markup'] = reply_markup

        return (yield self.__request_api('editMessageText', request))

    @coroutine
    def answer_callback_query(self, callback_query_id, text=None, show_alert=False):
        request = {
            'callback_query_id': callback_query_id,
            'show_alert': show_alert
        }

        if text:
            request['text'] = text

        return (yield self.__request_api('answerCallbackQuery', request))

    @coroutine
    def get_chat_administrators(self, chat_id):
        return (yield self.__request_api('getChatAdministrators', {'chat_id': chat_id}))

    @coroutine
    def get_chat(self, chat_id):
        return (yield self.__request_api('getChat', {'chat_id': chat_id}))
コード例 #15
0
ファイル: telegram.py プロジェクト: andrey-yantsen/tobot
class Api:
    STATE_WORKING = 0
    STATE_STOP_PENDING = 1
    STATE_STOPPED = 2

    CHAT_ACTION_TYPING = 'typing'
    CHAT_ACTION_UPLOAD_PHOTO = 'upload_photo'
    CHAT_ACTION_RECORD_VIDEO = 'record_video'
    CHAT_ACTION_UPLOAD_VIDEO = 'upload_video'
    CHAT_ACTION_RECORD_AUDIO = 'record_audio'
    CHAT_ACTION_UPLOAD_AUDIO = 'upload_audio'
    CHAT_ACTION_UPLOAD_DOC = 'upload_document'
    CHAT_ACTION_FIND_LOCATION = 'find_location'

    PARSE_MODE_NONE = None
    PARSE_MODE_MD = 'Markdown'
    PARSE_MODE_HTML = 'HTML'

    def __init__(self, token, processor):
        if ':' in token:
            self.bot_id, _ = token.split(':')
            if self.bot_id.isdigit():
                self.bot_id = int(self.bot_id)
            else:
                raise ValueError('Non well-formatted token given')
        else:
            raise ValueError('Non well-formatted token given')

        self.token = token
        self.consumption_state = self.STATE_STOPPED
        self.processor = processor
        self.__me = None
        self._finished = Event()
        self._finished.set()

    @coroutine
    def get_me(self):
        if not self.__me:
            self.__me = yield self.__request_api('getMe')

        return self.__me

    def stop(self):
        assert not self._finished.is_set()
        self._finished.set()

    @property
    def is_alive(self):
        return not self._finished.is_set()

    @coroutine
    def __request_api(self,
                      method,
                      body=None,
                      request_timeout=10,
                      retry_on_nonuser_error=False):
        def guess_filename(obj):
            """Tries to guess the filename of the given object."""
            name = getattr(obj, 'name', None)
            if name and isinstance(name,
                                   str) and name[0] != '<' and name[-1] != '>':
                return basename(name)

        url = 'https://api.telegram.org/bot{token}/{method}'.format(
            token=self.token, method=method)
        try:
            request = {
                'request_timeout': request_timeout,
                'headers': {},
            }

            if body:
                request['method'] = 'POST'
                request_content = {}
                has_files = False
                file_names = {}
                for key, value in body.items():
                    if hasattr(value, 'read'):
                        request_content[key] = value.read()
                        file_names[key] = guess_filename(value)
                        has_files = True
                    else:
                        request_content[key] = value

                if has_files:
                    boundary = md5(str(time()).encode('utf-8')).hexdigest()
                    request['headers'][
                        'Content-type'] = 'multipart/form-data; boundary=' + boundary

                    body = []
                    for key, value in request_content.items():
                        body.append(b'--' + boundary.encode('utf-8'))
                        if key in file_names:
                            body.append((
                                'Content-Disposition: form-data; name="%s"; filename="%s"'
                                % (key, file_names[key])).encode('utf-8'))
                        else:
                            body.append(
                                ('Content-Disposition: form-data; name="%s"' %
                                 key).encode('utf-8'))

                        body.append(b'')
                        if isinstance(value, int):
                            value = str(value)
                        if isinstance(value, str):
                            value = value.encode('utf-8')
                        body.append(value)
                    body.append(b'--' + boundary.encode('utf-8') + b'--')
                    body = b"\r\n" + b"\r\n".join(body) + b"\r\n"
                else:
                    request['headers']['Content-type'] = 'application/json'
                    body = ujson.dumps(request_content)
            else:
                request['method'] = 'GET'

            while True:
                try:
                    response = yield AsyncHTTPClient().fetch(url,
                                                             body=body,
                                                             **request)
                    break
                except HTTPError as e:
                    if not retry_on_nonuser_error or 400 <= e.code < 500:
                        raise
                    else:
                        yield sleep(5)

            if response and response.body:
                response = ujson.loads(response.body.decode('utf-8'))
                if response['ok']:
                    return response['result']
                else:
                    raise ApiError(response['error_code'],
                                   response['description'],
                                   response.get('parameters'),
                                   request_body=body)
        except HTTPError as e:
            if e.code == 599:
                logging.exception(
                    '%s request timed out',
                    method)  # Do nothing on timeout, just return None
            elif e.response and e.response.body:
                response = ujson.loads(e.response.body.decode('utf-8'))
                raise ApiError(response['error_code'],
                               response['description'],
                               response.get('parameters'),
                               request_body=body)
            else:
                raise ApiError(e.code, None, request_body=body)

        return None

    @coroutine
    def get_updates(self,
                    offset: int = None,
                    limit: int = 100,
                    timeout: int = 2,
                    retry_on_nonuser_error: bool = False):
        assert 1 <= limit <= 100
        assert 0 <= timeout

        request = {'limit': limit, 'timeout': timeout}

        if offset is not None:
            request['offset'] = offset

        data = yield self.__request_api(
            'getUpdates',
            request,
            request_timeout=timeout * 1.5,
            retry_on_nonuser_error=retry_on_nonuser_error)

        if data is None:
            return []

        return data

    @coroutine
    def wait_commands(self, last_update_id=None):
        assert self._finished.is_set()

        self._finished.clear()

        self.consumption_state = self.STATE_WORKING

        if last_update_id is not None:
            last_update_id += 1

        yield self.get_me()

        while not self._finished.is_set():
            try:
                updates = yield self.get_updates(last_update_id,
                                                 retry_on_nonuser_error=True)
            except:
                self._finished.set()
                raise

            for update in updates:
                yield maybe_future(self.processor(update))
                if 'update_id' in update:
                    last_update_id = update['update_id']

            if len(updates):
                last_update_id += 1

    @coroutine
    def send_chat_action(self, chat_id, action: str):
        return (yield self.__request_api('sendChatAction', {
            'chat_id': chat_id,
            'action': action
        }))

    @coroutine
    def send_message(self,
                     text: str,
                     chat_id=None,
                     reply_to_message: dict = None,
                     parse_mode: str = None,
                     disable_web_page_preview: bool = False,
                     disable_notification: bool = False,
                     reply_to_message_id: int = None,
                     reply_markup=None):
        request = {
            'chat_id': chat_id,
            'text': text,
            'disable_web_page_preview': disable_web_page_preview,
            'disable_notification': disable_notification,
        }

        if parse_mode is not None:
            request['parse_mode'] = parse_mode

        if reply_to_message_id is not None:
            request['reply_to_message_id'] = reply_to_message_id

        if reply_to_message:
            if chat_id is None:
                request['chat_id'] = reply_to_message['chat']['id']
                if reply_to_message['chat']['id'] != reply_to_message['from'][
                        'id']:
                    request['reply_to_message_id'] = reply_to_message[
                        'message_id']
            else:
                request['reply_to_message_id'] = reply_to_message['message_id']
        else:
            assert chat_id is not None

        if reply_markup is not None:
            request['reply_markup'] = reply_markup

        try:
            return (yield self.__request_api('sendMessage', request))
        except ApiError as e:
            if e.code == 400 and e.description.startswith(
                    "Bad Request: Can\'t parse"):
                logging.exception('Got exception while sending text: %s', text)
            raise

    @coroutine
    def send_photo(self,
                   chat_id,
                   photo,
                   caption: str = None,
                   disable_notification: bool = False,
                   reply_to_message_id: int = None,
                   reply_markup=None):
        request = {
            'chat_id': chat_id,
            'photo': photo,
            'disable_notification': disable_notification,
        }

        if caption is not None:
            request['caption'] = caption

        if reply_to_message_id is not None:
            request['reply_to_message_id'] = reply_to_message_id

        if reply_markup is not None:
            request['reply_markup'] = reply_markup

        return (yield self.__request_api('sendPhoto', request))

    @coroutine
    def forward_message(self,
                        chat_id,
                        from_chat_id,
                        message_id: int,
                        disable_notification: bool = False):
        return (yield self.__request_api(
            'forwardMessage', {
                'chat_id': chat_id,
                'from_chat_id': from_chat_id,
                'disable_notification': disable_notification,
                'message_id': message_id,
            }))

    @staticmethod
    def _prepare_inline_message(message=None,
                                chat_id=None,
                                message_id=None,
                                inline_message_id=None):
        request = {}

        if message:
            request['chat_id'] = message['chat']['id']
            request['message_id'] = message['message_id']
        elif chat_id and message_id:
            request['chat_id'] = chat_id
            request['message_id'] = message_id
        else:
            request['inline_message_id'] = inline_message_id

        return request

    @coroutine
    def edit_message_reply_markup(self,
                                  message=None,
                                  chat_id=None,
                                  message_id=None,
                                  inline_message_id=None,
                                  reply_markup=None):
        assert (chat_id and message_id) or message or inline_message_id

        request = self._prepare_inline_message(
            message=message,
            chat_id=chat_id,
            message_id=message_id,
            inline_message_id=inline_message_id)

        if reply_markup:
            request['reply_markup'] = reply_markup

        return (yield self.__request_api('editMessageReplyMarkup', request))

    @coroutine
    def edit_message_text(self,
                          text,
                          message=None,
                          chat_id=None,
                          message_id=None,
                          inline_message_id=None,
                          parse_mode=None,
                          disable_web_page_preview=False,
                          reply_markup=None):
        request = self._prepare_inline_message(
            message=message,
            chat_id=chat_id,
            message_id=message_id,
            inline_message_id=inline_message_id)

        if parse_mode is not None:
            request['parse_mode'] = parse_mode

        request['disable_web_page_preview'] = disable_web_page_preview
        request['text'] = text

        if reply_markup is not None:
            request['reply_markup'] = reply_markup

        return (yield self.__request_api('editMessageText', request))

    @coroutine
    def answer_callback_query(self,
                              callback_query_id,
                              text=None,
                              show_alert=False):
        request = {
            'callback_query_id': callback_query_id,
            'show_alert': show_alert
        }

        if text:
            request['text'] = text

        return (yield self.__request_api('answerCallbackQuery', request))

    @coroutine
    def get_chat_administrators(self, chat_id):
        return (yield self.__request_api('getChatAdministrators',
                                         {'chat_id': chat_id}))

    @coroutine
    def get_chat(self, chat_id):
        return (yield self.__request_api('getChat', {'chat_id': chat_id}))
コード例 #16
0
class MockFitsWriterClient(object):
    """
    Wrapper class for a KATCP client to a EddFitsWriterServer
    """
    def __init__(self, address, record_dest):
        """
        @brief      Construct new instance
                    If record_dest is not empty, create a folder named record_dest and record the received packages there.
        """
        self._address = address
        self.__record_dest = record_dest
        if record_dest:
            if not os.path.isdir(record_dest):
                os.makedirs(record_dest)
        self._ioloop = IOLoop.current()
        self._stop_event = Event()
        self._is_stopped = Condition()
        self._socket = None
        self.__last_package = 0

    def reset_connection(self):
        self._socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
        self._socket.setblocking(False)
        try:
            self._socket.connect(self._address)
        except socket.error as error:
            if error.args[0] == errno.EINPROGRESS:
                pass
            else:
                raise error

    @coroutine
    def recv_nbytes(self, nbytes):
        received_bytes = 0
        data = b''
        while received_bytes < nbytes:
            if self._stop_event.is_set():
                raise StopEvent
            try:
                log.debug("Requesting {} bytes".format(nbytes -
                                                       received_bytes))
                current_data = self._socket.recv(nbytes - received_bytes)
                received_bytes += len(current_data)
                data += current_data
                log.debug("Received {} bytes ({} of {} bytes)".format(
                    len(current_data), received_bytes, nbytes))
            except socket.error as error:
                error_id = error.args[0]
                if error_id == errno.EAGAIN or error_id == errno.EWOULDBLOCK:
                    yield sleep(0.1)
                else:
                    log.exception("Unexpected error on socket recv: {}".format(
                        str(error)))
                    raise error
        raise Return(data)

    @coroutine
    def recv_loop(self):
        while not self._stop_event.is_set():
            try:
                header, sections = yield self.recv_packet()
            except StopEvent:
                log.debug("Notifying that recv calls have stopped")
            except Exception as E:
                log.exception("Failure while receiving packet: {}".format(E))

    def start(self):
        self._stop_event.clear()
        self.reset_connection()
        self._ioloop.add_callback(self.recv_loop)

    @coroutine
    def stop(self, timeout=2):
        self._stop_event.set()
        try:
            success = yield self._is_stopped.wait(timeout=self._ioloop.time() +
                                                  timeout)
            if not success:
                raise TimeoutError
        except TimeoutError:
            log.error(("Could not stop the client within "
                       "the {} second limit").format(timeout))
        except Exception:
            log.exception("Fucup")

    @coroutine
    def recv_packet(self):
        log.debug("Receiving packet header")
        raw_header = yield self.recv_nbytes(C.sizeof(FWHeader))
        log.debug("Converting packet header")
        header = FWHeader.from_buffer_copy(raw_header)
        log.info("Received header: {}".format(header))
        if header.timestamp < self.__last_package:
            log.error("Timestamps out of order!")
        else:
            self.__last_package = header.timestamp

        if self.__record_dest:
            filename = os.path.join(self.__record_dest,
                                    "FWP_{}.dat".format(header.timestamp))
            while os.path.isfile(filename):
                log.warning('Filename {} already exists. Add suffix _'.format(
                    filename))
                filename += '_'
            log.info('Recording to file {}'.format(filename))
            ofile = open(filename, 'wb')
            ofile.write(raw_header)

        fw_data_type = header.channel_data_type.strip().upper()
        c_data_type, np_data_type = TYPE_MAP[fw_data_type]
        sections = []
        for section in range(header.nsections):
            log.debug("Receiving section {} of {}".format(
                section + 1, header.nsections))
            raw_section_header = yield self.recv_nbytes(
                C.sizeof(FWSectionHeader))
            if self.__record_dest:
                ofile.write(raw_section_header)

            section_header = FWSectionHeader.from_buffer_copy(
                raw_section_header)
            log.info("Section {} header: {}".format(section, section_header))
            log.debug("Receiving section data")
            raw_bytes = yield self.recv_nbytes(
                C.sizeof(c_data_type) * section_header.nchannels)
            if self.__record_dest:
                ofile.write(raw_bytes)
            data = np.frombuffer(raw_bytes, dtype=np_data_type)
            log.info("Section {} data: {}".format(section, data[:10]))
            sections.append((section_header, data))

        if self.__record_dest:
            ofile.close()
        raise Return((header, sections))
コード例 #17
0
class XEngineOperations(object):

    def __init__(self, corr_obj):
        """
        A collection of x-engine operations that act on/with a correlator
        instance.
        :param corr_obj: the FxCorrelator instance
        :return:
        """
        self.corr = corr_obj
        self.hosts = corr_obj.xhosts
        self.logger = corr_obj.logger
        self.data_stream = None

        self.vacc_synch_running = IOLoopEvent()
        self.vacc_synch_running.clear()
        self.vacc_check_enabled = IOLoopEvent()
        self.vacc_check_enabled.clear()
        self.vacc_check_cb = None
        self.vacc_check_cb_data = None

    @staticmethod
    def _gberst(hosts, state):
        THREADED_FPGA_OP(
            hosts, timeout=5,
            target_function=(
                lambda fpga_:
                fpga_.registers.control.write(gbe_rst=state),))

    def initialise_post_gbe(self):
        """
        Perform post-gbe setup initialisation steps
        :return:
        """
        # write the board IDs to the xhosts
        board_id = 0
        for f in self.hosts:
            f.registers.board_id.write(reg=board_id)
            board_id += 1

        # write the data stream destination to the registers
        self.write_data_stream_destination(None)

        # clear gbe status
        THREADED_FPGA_OP(
            self.hosts, timeout=5,
            target_function=(
                lambda fpga_:
                fpga_.registers.control.write(gbe_debug_rst='pulse'),))

        # release cores from reset
        XEngineOperations._gberst(self.hosts, False)

        # simulator
        if use_xeng_sim:
            THREADED_FPGA_OP(
                self.hosts, timeout=5,
                target_function=(
                    lambda fpga_: fpga_.registers.simulator.write(en=True),))

        # set up accumulation length
        self.set_acc_len(vacc_resync=False)

        # clear general status
        THREADED_FPGA_OP(
            self.hosts, timeout=5,
            target_function=(
                lambda fpga_:
                fpga_.registers.control.write(status_clr='pulse'),))

        # check for errors
        # TODO - read status regs?

    def initialise_pre_gbe(self):
        """
        Set up x-engines on this device.
        :return:
        """
        # simulator
        if use_xeng_sim:
            THREADED_FPGA_OP(
                self.hosts, timeout=5,
                target_function=(
                    lambda fpga_: fpga_.registers.simulator.write(
                        en=False, rst='pulse'),))

        # set the gapsize register
        gapsize = int(self.corr.configd['xengine']['10gbe_pkt_gapsize'])
        self.logger.info('X-engines: setting packet gap size to %i' % gapsize)
        if 'gapsize' in self.hosts[0].registers.names():
            # these versions have the correct logic surrounding the register
            THREADED_FPGA_OP(
                self.hosts, timeout=5,
                target_function=(
                    lambda fpga_: fpga_.registers.gapsize.write_int(gapsize),))
        elif 'gap_size' in self.hosts[0].registers.names():
            # these versions do not, they need a software hack for the setting
            # to 'take'
            THREADED_FPGA_OP(
                self.hosts, timeout=5,
                target_function=(
                    lambda fpga_: fpga_.registers.gap_size.write_int(gapsize),))
            # HACK - this is a hack to overcome broken x-engine firmware in
            # versions around a2d0615bc9cd95eabf7c8ed922c1a15658c0688e.
            # The logic next to the gap_size register is broken, registering
            # the LAST value written, not the new one.
            THREADED_FPGA_OP(
                self.hosts, timeout=5,
                target_function=(
                    lambda fpga_: fpga_.registers.gap_size.write_int(
                        gapsize-1),))
            # /HACK
        else:
            _errmsg = 'X-engine image has no register gap_size/gapsize?'
            self.logger.exception(_errmsg)
            raise RuntimeError(_errmsg)

        # disable transmission, place cores in reset, and give control
        # register a known state
        self.xeng_tx_disable(None)

        XEngineOperations._gberst(self.hosts, True)

        self.clear_status_all()

    def configure(self):
        """
        Configure the xengine operations - this is done whenever a correlator
        is instantiated.
        :return:
        """
        # set up the xengine data stream
        self._setup_data_stream()

    def _setup_data_stream(self):
        """
        Set up the data stream for the xengine output
        :return:
        """
        # the x-engine output data stream setup
        _xeng_d = self.corr.configd['xengine']

        data_addr = NetAddress(_xeng_d['output_destination_ip'],
                               _xeng_d['output_destination_port'])
        meta_addr = NetAddress(_xeng_d['output_destination_ip'],
                               _xeng_d['output_destination_port'])

        xeng_stream = data_stream.DataStream(
            name=_xeng_d['output_products'][0],
            category=data_stream.XENGINE_CROSS_PRODUCTS,
            destination=data_addr,
            meta_destination=meta_addr,
            destination_cb=self.write_data_stream_destination,
            meta_destination_cb=self.spead_meta_issue_all,
            tx_enable_method=self.xeng_tx_enable,
            tx_disable_method=self.xeng_tx_disable)

        self.data_stream = xeng_stream
        self.corr.register_data_stream(xeng_stream)
        self.vacc_check_enabled.clear()
        self.vacc_synch_running.clear()
        if self.vacc_check_cb is not None:
            self.vacc_check_cb.stop()
        self.vacc_check_cb = None

    def _vacc_periodic_check(self):

        self.logger.debug('Checking vacc operation @ %s' % time.ctime())

        if not self.vacc_check_enabled.is_set():
            self.logger.info('Check logic disabled, exiting')
            return

        if self.vacc_synch_running.is_set():
            self.logger.info('vacc_sync is currently running, exiting')
            return

        def get_data():
            """
            Get the relevant data from the X-engine FPGAs
            """
            # older versions had other register names
            _OLD = 'reorderr_timeout0' in self.hosts[0].registers.names()

            def _get_reorder_data(fpga):
                rv = {}
                for _ctr in range(0, fpga.x_per_fpga):
                    if _OLD:
                        _reg = fpga.registers['reorderr_timeout%i' % _ctr]
                        rv['etim%i' % _ctr] = _reg.read()['data']['reg']
                    else:
                        _reg = fpga.registers['reorderr_timedisc%i' % _ctr]
                        rv['etim%i' % _ctr] = _reg.read()['data']['timeout']
                return rv
            reo_data = THREADED_FPGA_OP(self.hosts, timeout=5,
                                        target_function=_get_reorder_data)
            vacc_data = self.vacc_status()
            return {'reorder': reo_data, 'vacc': vacc_data}
    
        def _vacc_data_check(d0, d1):
            # check errors are not incrementing
            for host in self.hosts:
                for xeng in range(0, host.x_per_fpga):
                    status0 = d0[host.host][xeng]
                    status1 = d1[host.host][xeng]
                    if ((status1['errors'] > status0['errors']) or
                            (status0['errors'] != 0)):
                        self.logger.error('    vacc %i on %s has '
                                          'errors' % (xeng, host.host))
                        return False
            # check that the accumulations are ticking over
            for host in self.hosts:
                for xeng in range(0, host.x_per_fpga):
                    status0 = d0[host.host][xeng]
                    status1 = d1[host.host][xeng]
                    if status1['count'] == status0['count']:
                        self.logger.error('    vacc %i on %s is not '
                                          'incrementing' % (xeng, host.host))
                        return False
            return True
    
        def _reorder_data_check(d0, d1):
            for host in self.hosts:
                for ctr in range(0, host.x_per_fpga):
                    reg = 'etim%i' % ctr
                    if d0[host.host][reg] != d1[host.host][reg]:
                        self.logger.error('    %s - vacc check reorder '
                                          'reg %s error' % (host.host, reg))
                        return False
            return True
    
        new_data = get_data()
        # self.logger.info('new_data: %s' % new_data)
    
        if self.vacc_check_cb_data is not None:
            force_sync = False
            # check the vacc status data first
            if not _vacc_data_check(self.vacc_check_cb_data['vacc'],
                                    new_data['vacc']):
                force_sync = True
            # check the reorder data
            if not force_sync:
                if not _reorder_data_check(self.vacc_check_cb_data['reorder'],
                                           new_data['reorder']):
                    force_sync = True
            if force_sync:
                self.logger.error('    forcing vacc sync')
                self.vacc_sync()

        self.corr.logger.debug('scheduled check done @ %s' % time.ctime())
        self.vacc_check_cb_data = new_data

    def vacc_check_timer_stop(self):
        """
        Disable the vacc_check timer
        :return:
        """
        if self.vacc_check_cb is not None:
            self.vacc_check_cb.stop()
        self.vacc_check_cb = None
        self.vacc_check_enabled.clear()
        self.corr.logger.info('vacc check timer stopped')

    def vacc_check_timer_start(self, vacc_check_time=30):
        """
        Set up a periodic check on the vacc operation.
        :param vacc_check_time: the interval, in seconds, at which to check
        :return:
        """
        if not IOLoop.current()._running:
            raise RuntimeError('IOLoop not running, this will not work')
        self.logger.info('xeng_setup_vacc_check_timer: setting up the '
                         'vacc check timer at %i seconds' % vacc_check_time)
        if vacc_check_time < self.get_acc_time():
            raise RuntimeError('A check time smaller than the accumulation'
                               'time makes no sense.')
        if self.vacc_check_cb is not None:
            self.vacc_check_cb.stop()
        self.vacc_check_cb = PeriodicCallback(self._vacc_periodic_check,
                                              vacc_check_time * 1000)
        self.vacc_check_enabled.set()
        self.vacc_check_cb.start()
        self.corr.logger.info('vacc check timer started')

    def write_data_stream_destination(self, data_stream):
        """
        Write the x-engine data stream destination to the hosts.
        :param data_stream - the data stream on which to act
        :return:
        """
        dstrm = data_stream or self.data_stream
        txip = int(dstrm.destination.ip)
        txport = dstrm.destination.port
        try:
            THREADED_FPGA_OP(
                self.hosts, timeout=10,
                target_function=(lambda fpga_:
                                 fpga_.registers.gbe_iptx.write(reg=txip),))
            THREADED_FPGA_OP(
                self.hosts, timeout=10,
                target_function=(lambda fpga_:
                                 fpga_.registers.gbe_porttx.write(reg=txport),))
        except AttributeError:
            self.logger.warning('Writing stream %s destination to '
                                'hardware failed!' % dstrm.name)

        # update meta data on stream destination change
        self.spead_meta_update_stream_destination()
        dstrm.meta_transmit()

        self.logger.info('Wrote stream %s destination to %s in hardware' % (
            dstrm.name, dstrm.destination))

    def clear_status_all(self):
        """
        Clear the various status registers and counters on all the fengines
        :return:
        """
        THREADED_FPGA_FUNC(self.hosts, timeout=10,
                           target_function='clear_status')

    def subscribe_to_multicast(self):
        """
        Subscribe the x-engines to the f-engine output multicast groups -
        each one subscribes to only one group, with data meant only for it.
        :return:
        """
        if self.corr.fengine_output.is_multicast():
            self.logger.info('F > X is multicast from base %s' %
                             self.corr.fengine_output)
            source_address = str(self.corr.fengine_output.ip_address)
            source_bits = source_address.split('.')
            source_base = int(source_bits[3])
            source_prefix = '%s.%s.%s.' % (source_bits[0],
                                           source_bits[1],
                                           source_bits[2])
            source_ctr = 0
            for host_ctr, host in enumerate(self.hosts):
                for gbe in host.tengbes:
                    rxaddress = '%s%d' % (source_prefix,
                                          source_base + source_ctr)
                    gbe.multicast_receive(rxaddress, 0)

                    # CLUDGE
                    source_ctr += 1
                    # source_ctr += 4

                    self.logger.info('\txhost %s %s subscribing to address %s' %
                                     (host.host, gbe.name, rxaddress))
        else:
            self.logger.info('F > X is unicast from base %s' %
                             self.corr.fengine_output)

    def check_rx(self, max_waittime=30):
        """
        Check that the x hosts are receiving data correctly
        :param max_waittime:
        :return:
        """
        self.logger.info('Checking X hosts are receiving data...')
        results = THREADED_FPGA_FUNC(
            self.hosts, timeout=max_waittime+1,
            target_function=('check_rx', (max_waittime,),))
        all_okay = True
        for _v in results.values():
            all_okay = all_okay and _v
        if not all_okay:
            self.logger.error('\tERROR in X-engine rx data.')
        self.logger.info('\tdone.')
        return all_okay

    def vacc_status(self):
        """
        Get a dictionary of the vacc status registers for all
        x-engines.
        :return: {}
        """
        return THREADED_FPGA_FUNC(self.hosts, timeout=10,
                                  target_function='vacc_get_status')

    def _vacc_sync_check_reset(self):
        """
        Do the vaccs need resetting before a synch?
        :return:
        """
        vaccstat = THREADED_FPGA_FUNC(
            self.hosts, timeout=10,
            target_function='vacc_check_arm_load_counts')
        reset_required = False
        for xhost, result in vaccstat.items():
            if result:
                self.logger.info('xeng_vacc_sync: %s has a vacc that '
                                 'needs resetting' % xhost)
                reset_required = True

        if reset_required:
            THREADED_FPGA_FUNC(self.hosts, timeout=10,
                               target_function='vacc_reset')
            vaccstat = THREADED_FPGA_FUNC(
                self.hosts, timeout=10,
                target_function='vacc_check_reset_status')
            for xhost, result in vaccstat.items():
                if not result:
                    errstr = 'xeng_vacc_sync: resetting vaccs on ' \
                             '%s failed.' % xhost
                    self.logger.error(errstr)
                    raise RuntimeError(errstr)

    def _vacc_sync_create_loadtime(self, min_loadtime):
        """
        Calculate the load time for the vacc synch based on a
        given minimum load time
        :param min_loadtime:
        :return: the vacc load time, in seconds since the UNIX epoch
        """
        # how long should we wait for the vacc load
        self.logger.info('Vacc sync time not specified. Syncing in '
                         '%2.2f seconds\' time.' % (min_loadtime*2))
        t_now = time.time()
        vacc_load_time = t_now + (min_loadtime*2)

        if vacc_load_time < (t_now + min_loadtime):
            raise RuntimeError(
                'Cannot load at a time in the past. '
                'Need at least %2.2f seconds lead time. You asked for '
                '%s.%i, and it is now %s.%i.' % (
                    min_loadtime,
                    time.strftime('%H:%M:%S', time.gmtime(vacc_load_time)),
                    (vacc_load_time-int(vacc_load_time))*100,
                    time.strftime('%H:%M:%S', time.gmtime(t_now)),
                    (t_now-int(t_now))*100))

        self.logger.info('    xeng vaccs will sync at %s (in %2.2fs)'
                         % (time.ctime(t_now), vacc_load_time-t_now))
        return vacc_load_time

    def _vacc_sync_calc_load_mcount(self, vacc_loadtime):
        """
        Calculate the loadtime in clock ticks
        :param vacc_loadtime:
        :return:
        """
        ldmcnt = int(self.corr.mcnt_from_time(vacc_loadtime))
        self.logger.debug('$$$$$$$$$$$ - ldmcnt = %i' % ldmcnt)
        _ldmcnt_orig = ldmcnt
        _cfgd = self.corr.configd
        n_chans = int(_cfgd['fengine']['n_chans'])
        xeng_acc_len = int(_cfgd['xengine']['xeng_accumulation_len'])

        quantisation_bits = int(
            numpy.log2(n_chans) + 1 +
            numpy.log2(xeng_acc_len))

        self.logger.debug('$$$$$$$$$$$ - quant bits = %i' % quantisation_bits)
        ldmcnt = ((ldmcnt >> quantisation_bits) + 1) << quantisation_bits
        self.logger.debug('$$$$$$$$$$$ - ldmcnt quantised = %i' % ldmcnt)
        self.logger.debug('$$$$$$$$$$$ - ldmcnt diff = %i' % (
            ldmcnt - _ldmcnt_orig))
        if _ldmcnt_orig > ldmcnt:
            raise RuntimeError('Quantising the ldmcnt has broken it: %i -> '
                               '%i, diff(%i)' % (_ldmcnt_orig, ldmcnt,
                                                 ldmcnt - _ldmcnt_orig))
        time_from_mcnt = self.corr.time_from_mcnt(ldmcnt)
        t_now = time.time()
        if time_from_mcnt <= t_now:
            self.logger.warn('    Warning: the board timestamp has probably'
                             ' wrapped! mcnt_time(%.3f) time.time(%.3f)' %
                             (time_from_mcnt, t_now))
        return ldmcnt

    def _vacc_sync_print_vacc_statuses(self, vstatus):
        """
        Print the vacc statuses to the logger
        :param vstatus:
        :return:
        """
        self.logger.info('vacc statii:')
        for _host in self.hosts:
            self.logger.info('    %s:' % _host.host)
            for _ctr, _status in enumerate(vstatus[_host.host]):
                self.logger.info('        %i: %s' % (_ctr, _status))

    def _vacc_sync_check_counts_initial(self):
        """
        Check the arm and load counts initially
        :return:
        """
        # read the current arm and load counts
        vacc_status = self.vacc_status()
        arm_count0 = vacc_status[self.hosts[0].host][0]['armcount']
        load_count0 = vacc_status[self.hosts[0].host][0]['loadcount']
        # check the xhosts load and arm counts
        for host in self.hosts:
            for status in vacc_status[host.host]:
                _bad_ldcnt = status['loadcount'] != load_count0
                _bad_armcnt = status['armcount'] != arm_count0
                if _bad_ldcnt or _bad_armcnt:
                    _err = 'All hosts do not have matching arm and ' \
                           'load counts.'
                    self.logger.error(_err)
                    self._vacc_sync_print_vacc_statuses(vacc_status)
                    raise RuntimeError(_err)
        self.logger.info('    Before arming: arm_count(%i) load_count(%i)' %
                         (arm_count0, load_count0))
        return arm_count0, load_count0

    def _vacc_sync_check_arm_count(self, armcount_initial):
        """
        Check that the arm count increased
        :return:
        """
        vacc_status = self.vacc_status()
        arm_count_new = vacc_status[self.hosts[0].host][0]['armcount']
        for host in self.hosts:
            for status in vacc_status[host.host]:
                if ((status['armcount'] != arm_count_new) or
                        (status['armcount'] != armcount_initial + 1)):
                    _err = 'xeng_vacc_sync: all hosts do not have ' \
                           'matching arm counts or arm count did ' \
                           'not increase.'
                    self.logger.error(_err)
                    self._vacc_sync_print_vacc_statuses(vacc_status)
                    return False
        self.logger.info('    Done arming')
        return True

    def _vacc_sync_check_loadtimes(self):
        """

        :return:
        """
        lsws = THREADED_FPGA_OP(
            self.hosts, timeout=10,
            target_function=(
                lambda x: x.registers.vacc_time_lsw.read()['data']),)
        msws = THREADED_FPGA_OP(
            self.hosts, timeout=10,
            target_function=(
                lambda x: x.registers.vacc_time_msw.read()['data']),)
        _host0 = self.hosts[0].host
        for host in self.hosts:
            if ((lsws[host.host]['lsw'] != lsws[_host0]['lsw']) or
                    (msws[host.host]['msw'] != msws[_host0]['msw'])):
                _err = 'xeng_vacc_sync: all hosts do not have matching ' \
                       'vacc LSWs and MSWs'
                self.logger.error(_err)
                self.logger.error('LSWs: %s' % lsws)
                self.logger.error('MSWs: %s' % msws)
                vacc_status = self.vacc_status()
                self._vacc_sync_print_vacc_statuses(vacc_status)
                return False
        lsw = lsws[self.hosts[0].host]['lsw']
        msw = msws[self.hosts[0].host]['msw']
        xldtime = (msw << 32) | lsw
        self.logger.info('    x engines have vacc ld time %i' % xldtime)
        return True

    def _vacc_sync_wait_for_arm(self, load_mcount):
        """

        :param load_mcount:
        :return:
        """
        t_now = time.time()
        time_from_mcnt = self.corr.time_from_mcnt(load_mcount)
        wait_time = time_from_mcnt - t_now + 0.2
        if wait_time <= 0:
            self.logger.error('    This is wonky - why is the wait_time '
                              'less than zero? %.3f' % wait_time)
            self.logger.error('    corr synch epoch: %i' %
                              self.corr.get_synch_time())
            self.logger.error('    time.time(): %.10f' % t_now)
            self.logger.error('    time_from_mcnt: %.10f' % time_from_mcnt)
            self.logger.error('    ldmcnt: %i' % load_mcount)
            # hack
            wait_time = t_now + 4

        self.logger.info('    Waiting %2.2f seconds for arm to '
                         'trigger.' % wait_time)
        time.sleep(wait_time)

    def _vacc_sync_check_load_count(self, load_count0):
        """
        Did the vaccs load counts increment correctly?
        :param load_count0:
        :return:
        """
        vacc_status = self.vacc_status()
        load_count_new = vacc_status[self.hosts[0].host][0]['loadcount']
        for host in self.hosts:
            for status in vacc_status[host.host]:
                if ((status['loadcount'] != load_count_new) or
                        (status['loadcount'] != load_count0 + 1)):
                    self.logger.error('vacc did not trigger!')
                    self._vacc_sync_print_vacc_statuses(vacc_status)
                    return False
        self.logger.info('    All vaccs triggered correctly.')
        return True

    def _vacc_sync_final_check(self):
        """
        Check the vacc status, errors and accumulations
        :return:
        """
        self.logger.info('\tChecking for errors & accumulations...')
        vac_okay = self._vacc_check_okay_initial()
        if not vac_okay:
            vacc_status = self.vacc_status()
            vacc_error_detail = THREADED_FPGA_FUNC(
                self.hosts, timeout=5,
                target_function='vacc_get_error_detail')
            self.logger.error('\t\txeng_vacc_sync: exited on vacc error')
            self.logger.error('\t\txeng_vacc_sync: vacc statii:')
            for host, item in vacc_status.items():
                self.logger.error('\t\t\t%s: %s' % (host, str(item)))
            self.logger.error('\t\txeng_vacc_sync: vacc errors:')
            for host, item in vacc_error_detail.items():
                self.logger.error('\t\t\t%s: %s' % (host, str(item)))
            self.logger.error('\t\txeng_vacc_sync: exited on vacc error')
            return False
        self.logger.info('\t...accumulations rolling in without error.')
        return True

    def _vacc_check_okay_initial(self):
        """
        After an initial setup, is the vacc okay?
        Are the error counts zero and the counters
        ticking over?
        :return: True or False
        """
        vacc_status = self.vacc_status()
        note_errors = False
        for host in self.hosts:
            for xeng_ctr, status in enumerate(vacc_status[host.host]):
                _msgpref = '{h}:{x} - '.format(h=host, x=xeng_ctr)
                errs = status['errors']
                thresh = self.corr.qdr_vacc_error_threshold
                if (errs > 0) and (errs < thresh):
                    self.logger.warn(
                        '\t\t{pref}{thresh} > vacc errors > 0. Que '
                        'pasa?'.format(pref=_msgpref, thresh=thresh))
                    note_errors = True
                elif (errs > 0) and (errs >= thresh):
                    self.logger.error(
                        '\t\t{pref}vacc errors > {thresh}. Problems.'.format(
                            pref=_msgpref, thresh=thresh))
                    return False
                if status['count'] <= 0:
                    self.logger.error(
                        '\t\t{}vacc counts <= 0. Que pasa?'.format(_msgpref))
                    return False
        if note_errors:
            # investigate the errors further, what caused them?
            if self._vacc_non_parity_errors():
                self.logger.error('\t\t\tsome vacc errors, but they\'re not '
                                  'parity errors. Problems.')
                return False
            self.logger.info('\t\tvacc_check_okay_initial: mostly okay, some '
                             'QDR parity errors')
        else:
            self.logger.info('\t\tvacc_check_okay_initial: all okay')
        return True

    def _vacc_non_parity_errors(self):
        """
        Are VACC errors other than parity errors occuring?
        :return:
        """
        _loops = 2
        parity_errors = 0
        for ctr in range(_loops):
            detail = THREADED_FPGA_FUNC(
                self.hosts, timeout=5, target_function='vacc_get_error_detail')
            for xhost in detail:
                for vals in detail[xhost]:
                    for field in vals:
                        if vals[field] > 0:
                            if field != 'parity':
                                return True
                            else:
                                parity_errors += 1
            if ctr < _loops - 1:
                time.sleep(self.get_acc_time() * 1.1)
        if parity_errors == 0:
            self.logger.error('\t\tThat\'s odd, VACC errors reported but '
                              'nothing caused them?')
            return True
        return False

    def vacc_sync(self):
        """
        Sync the vector accumulators on all the x-engines.
        Assumes that the x-engines are all receiving data.
        :return: the vacc synch time, in seconds since the UNIX epoch
        """

        if self.vacc_synch_running.is_set():
            self.logger.error('vacc_sync called when it was already running?')
            return
        self.vacc_synch_running.set()
        min_load_time = 2

        attempts = 0
        try:
            while True:
                attempts += 1

                if attempts > MAX_VACC_SYNCH_ATTEMPTS:
                    raise VaccSynchAttemptsMaxedOut(
                        'Reached maximum vacc synch attempts, aborting')

                # check if the vaccs need resetting
                self._vacc_sync_check_reset()

                # estimate the sync time, if needed
                self._vacc_sync_calc_load_mcount(time.time())

                # work out the load time
                vacc_load_time = self._vacc_sync_create_loadtime(min_load_time)

                # set the vacc load time on the xengines
                load_mcount = self._vacc_sync_calc_load_mcount(vacc_load_time)

                # set the load mcount on the x-engines
                self.logger.info('    Applying load time: %i.' % load_mcount)
                THREADED_FPGA_FUNC(
                    self.hosts, timeout=10,
                    target_function=('vacc_set_loadtime', (load_mcount,),))

                # check the current counts
                (arm_count0,
                 load_count0) = self._vacc_sync_check_counts_initial()

                # arm the xhosts
                THREADED_FPGA_FUNC(
                    self.hosts, timeout=10, target_function='vacc_arm')

                # did the arm count increase?
                if not self._vacc_sync_check_arm_count(arm_count0):
                    continue

                # check the the load time was stored correctly
                if not self._vacc_sync_check_loadtimes():
                    continue

                # wait for the vaccs to arm
                self._vacc_sync_wait_for_arm(load_mcount)

                # check the status to see that the load count increased
                if not self._vacc_sync_check_load_count(load_count0):
                    continue

                # allow vacc to flush and correctly populate parity bits:
                self.logger.info('    Waiting %2.2fs for an accumulation to '
                                 'flush, to correctly populate parity bits.' %
                                 self.get_acc_time())
                time.sleep(self.get_acc_time() + 0.2)

                self.logger.info('    Clearing status and reseting counters.')
                THREADED_FPGA_FUNC(self.hosts, timeout=10,
                                   target_function='clear_status')

                # wait for a good accumulation to finish.
                self.logger.info('    Waiting %2.2fs for an accumulation to '
                                 'flush before checking counters.' %
                                 self.get_acc_time())
                time.sleep(self.get_acc_time() + 0.2)

                # check the vacc status, errors and accumulations
                if not self._vacc_sync_final_check():
                    continue

                # done
                synch_time = self.corr.time_from_mcnt(load_mcount)
                self.vacc_synch_running.clear()
                return synch_time
        except KeyboardInterrupt:
            self.vacc_synch_running.clear()
        except VaccSynchAttemptsMaxedOut as e:
            self.vacc_synch_running.clear()
            self.logger.error(e.message)
            raise e

    def set_acc_time(self, acc_time_s, vacc_resync=True):
        """
        Set the vacc accumulation length based on a required dump time,
        in seconds
        :param acc_time_s: new dump time, in seconds
        :param vacc_resync: force a vacc resynchronisation
        :return:
        """
        if use_xeng_sim:
            raise RuntimeError('That\'s not an option anymore.')
        new_acc_len = (
            (self.corr.sample_rate_hz * acc_time_s) /
            (self.corr.xeng_accumulation_len * self.corr.n_chans * 2.0))
        new_acc_len = round(new_acc_len)
        self.corr.logger.info('set_acc_time: %.3fs -> new_acc_len(%i)' %
                              (acc_time_s, new_acc_len))
        self.set_acc_len(new_acc_len, vacc_resync)
        if self.corr.sensor_manager:
            sensor = self.corr.sensor_manager.sensor_get('integration-time')
            sensor.set_value(self.get_acc_time())

    def get_acc_time(self):
        """
        Get the dump time currently being used.
    
        Note: Will only be correct if accumulation time was set using this
        correlator
        object instance since cached values are used for the calculation.
        I.e., the number of accumulations are _not_ read from the FPGAs.
        :return:
        """
        return (self.corr.xeng_accumulation_len * self.corr.accumulation_len *
                self.corr.n_chans * 2.0) / self.corr.sample_rate_hz

    def get_acc_len(self):
        """
        Read the acc len currently programmed into the FPGA.
        :return:
        """
        return self.hosts[0].registers.acc_len.read_uint()

    def set_acc_len(self, acc_len=None, vacc_resync=True):
        """
        Set the QDR vector accumulation length.
        :param acc_len:
        :param vacc_resync: force a vacc resynchronisation
        :return:
        """
        if (acc_len is not None) and (acc_len <= 0):
            _err = 'new acc_len of %i makes no sense' % acc_len
            self.logger.error(_err)
            raise RuntimeError(_err)
        reenable_timer = False
        if self.vacc_check_enabled.is_set():
            self.vacc_check_timer_stop()
            reenable_timer = True
        if acc_len is not None:
            self.corr.accumulation_len = acc_len
        THREADED_FPGA_OP(
            self.hosts, timeout=10,
            target_function=(
                lambda fpga_:
                fpga_.registers.acc_len.write_int(self.corr.accumulation_len),))
        if self.corr.sensor_manager:
            sensor = self.corr.sensor_manager.sensor_get('n-accs')
            sensor.set_value(self.corr.accumulation_len)
        self.logger.info('Set vacc accumulation length %d system-wide '
                         '(%.2f seconds)' %
                         (self.corr.accumulation_len, self.get_acc_time()))
        self.corr.speadops.update_metadata([0x1015, 0x1016])
        if vacc_resync:
            self.vacc_sync()
        if reenable_timer:
            self.vacc_check_timer_start()

    def xeng_tx_enable(self, data_stream):
        """
        Start transmission of data streams from the x-engines
        :param data_stream - the data stream on which to act
        :return:
        """
        dstrm = data_stream or self.data_stream
        THREADED_FPGA_OP(
                self.hosts, timeout=5,
                target_function=(
                    lambda fpga_:
                    fpga_.registers.control.write(gbe_txen=True),))
        self.logger.info('X-engine output enabled')

    def xeng_tx_disable(self, data_stream):
        """
        Start transmission of data streams from the x-engines
        :param data_stream - the data stream on which to act
        :return:
        """
        dstrm = data_stream or self.data_stream
        THREADED_FPGA_OP(
                self.hosts, timeout=5,
                target_function=(
                    lambda fpga_:
                    fpga_.registers.control.write(gbe_txen=False),))
        self.logger.info('X-engine output disabled')

    def spead_meta_update_stream_destination(self):
        """

        :return:
        """
        meta_ig = self.data_stream.meta_ig
        self.corr.speadops.add_item(
            meta_ig,
            name='rx_udp_port', id=0x1022,
            description='Destination UDP port for %s data '
                        'output.' % self.data_stream.name,
            shape=[], format=[('u', SPEAD_ADDRSIZE)],
            value=self.data_stream.destination.port)

        ipstr = numpy.array(str(self.data_stream.destination.ip))
        self.corr.speadops.add_item(
            meta_ig,
            name='rx_udp_ip_str', id=0x1024,
            description='Destination IP address for %s data '
                        'output.' % self.data_stream.name,
            shape=ipstr.shape,
            dtype=ipstr.dtype,
            value=ipstr)

    # x-engine-specific SPEAD operations
    def spead_meta_update_all(self):
        """
        Update metadata for this correlator's xengine output.
        :return:
        """
        meta_ig = self.data_stream.meta_ig

        self.corr.speadops.item_0x1007(meta_ig)

        self.corr.speadops.add_item(
            meta_ig,
            name='n_bls', id=0x1008,
            description='Number of baselines in the data stream.',
            shape=[], format=[('u', SPEAD_ADDRSIZE)],
            value=len(self.corr.baselines))

        self.corr.speadops.add_item(
            meta_ig,
            name='n_chans', id=0x1009,
            description='Number of frequency channels in an integration.',
            shape=[], format=[('u', SPEAD_ADDRSIZE)],
            value=self.corr.n_chans)

        self.corr.speadops.item_0x100a(meta_ig)

        n_xengs = len(self.corr.xhosts) * self.corr.x_per_fpga
        self.corr.speadops.add_item(
            meta_ig,
            name='n_xengs', id=0x100B,
            description='The number of x-engines in the system.',
            shape=[], format=[('u', SPEAD_ADDRSIZE)],
            value=n_xengs)

        bls_ordering = numpy.array(
            [baseline for baseline in self.corr.baselines])
        # this is a list of the baseline stream pairs, e.g. ['ant0x' 'ant0y']
        self.corr.speadops.add_item(
            meta_ig,
            name='bls_ordering', id=0x100C,
            description='The baseline ordering in the output data stream.',
            shape=bls_ordering.shape,
            dtype=bls_ordering.dtype,
            value=bls_ordering)

        self.corr.speadops.item_0x100e(meta_ig)

        self.corr.speadops.add_item(
            meta_ig,
            name='center_freq', id=0x1011,
            description='The on-sky centre-frequency.',
            shape=[], format=[('f', 64)],
            value=int(self.corr.configd['fengine']['true_cf']))

        self.corr.speadops.add_item(
            meta_ig,
            name='bandwidth', id=0x1013,
            description='The input (analogue) bandwidth of the system.',
            shape=[], format=[('f', 64)],
            value=int(self.corr.configd['fengine']['bandwidth']))

        self.corr.speadops.item_0x1015(meta_ig)
        self.corr.speadops.item_0x1016(meta_ig)
        self.corr.speadops.item_0x101e(meta_ig)

        self.corr.speadops.add_item(
            meta_ig,
            name='xeng_acc_len', id=0x101F,
            description='Number of spectra accumulated inside X engine. '
                        'Determines minimum integration time and '
                        'user-configurable integration time stepsize. '
                        'X-engine correlator internals.',
            shape=[], format=[('u', SPEAD_ADDRSIZE)],
            value=self.corr.xeng_accumulation_len)

        self.corr.speadops.item_0x1020(meta_ig)

        pkt_len = int(self.corr.configd['fengine']['10gbe_pkt_len'])
        self.corr.speadops.add_item(
            meta_ig,
            name='feng_pkt_len', id=0x1021,
            description='Payload size of 10GbE packet exchange between '
                        'F and X engines in 64 bit words. Usually equal '
                        'to the number of spectra accumulated inside X '
                        'engine. F-engine correlator internals.',
            shape=[], format=[('u', SPEAD_ADDRSIZE)],
            value=pkt_len)

        self.spead_meta_update_stream_destination()

        port = int(self.corr.configd['fengine']['10gbe_port'])
        self.corr.speadops.add_item(
            meta_ig,
            name='feng_udp_port', id=0x1023,
            description='Port for F-engines 10Gbe links in the system.',
            shape=[], format=[('u', SPEAD_ADDRSIZE)],
            value=port)

        ipstr = numpy.array(self.corr.configd['fengine']['10gbe_start_ip'])
        self.corr.speadops.add_item(
            meta_ig,
            name='feng_start_ip', id=0x1025,
            description='Start IP address for F-engines in the system.',
            shape=ipstr.shape,
            dtype=ipstr.dtype,
            value=ipstr)

        self.corr.speadops.add_item(
            meta_ig,
            name='xeng_rate', id=0x1026,
            description='Target clock rate of processing engines (xeng).',
            shape=[], format=[('u', SPEAD_ADDRSIZE)],
            value=self.corr.xeng_clk)

        self.corr.speadops.item_0x1027(meta_ig)

        x_per_fpga = int(self.corr.configd['xengine']['x_per_fpga'])
        self.corr.speadops.add_item(
            meta_ig,
            name='x_per_fpga', id=0x1041,
            description='Number of X engines per FPGA host.',
            shape=[], format=[('u', SPEAD_ADDRSIZE)],
            value=x_per_fpga)

        self.corr.speadops.add_item(
            meta_ig,
            name='ddc_mix_freq', id=0x1043,
            description='Digital downconverter mixing frequency as a fraction '
                        'of the ADC sampling frequency. eg: 0.25. Set to zero '
                        'if no DDC is present.',
            shape=[], format=[('u', SPEAD_ADDRSIZE)],
            value=0)

        self.corr.speadops.item_0x1045(meta_ig)
        self.corr.speadops.item_0x1046(meta_ig)

        self.corr.speadops.add_item(
            meta_ig,
            name='xeng_out_bits_per_sample', id=0x1048,
            description='The number of bits per value of the xeng '
                        'accumulator output. Note this is for a '
                        'single value, not the combined complex size.',
            shape=[], format=[('u', SPEAD_ADDRSIZE)],
            value=self.corr.xeng_outbits)

        self.corr.speadops.add_item(
            meta_ig,
            name='f_per_fpga', id=0x1049,
            description='Number of F engines per FPGA host.',
            shape=[], format=[('u', SPEAD_ADDRSIZE)],
            value=self.corr.f_per_fpga)

        self.corr.speadops.item_0x104a(meta_ig)
        self.corr.speadops.item_0x104b(meta_ig)

        self.corr.speadops.item_0x1400(meta_ig)

        self.corr.speadops.item_0x1600(meta_ig)

        self.corr.speadops.add_item(
            meta_ig,
            name='flags_xeng_raw', id=0x1601,
            description='Flags associated with xeng_raw data output. '
                        'bit 34 - corruption or data missing during integration'
                        'bit 33 - overrange in data path '
                        'bit 32 - noise diode on during integration '
                        'bits 0 - 31 reserved for internal debugging',
            shape=[], format=[('u', SPEAD_ADDRSIZE)])

        self.corr.speadops.add_item(
            meta_ig,
            name='xeng_raw', id=0x1800,
            description='Raw data for %i xengines in the system. This item '
                        'represents a full spectrum (all frequency channels) '
                        'assembled from lowest frequency to highest '
                        'frequency. Each frequency channel contains the data '
                        'for all baselines (n_bls given by SPEAD ID 0x100b). '
                        'Each value is a complex number - two (real and '
                        'imaginary) unsigned integers.' % n_xengs,
            # dtype=numpy.int32,
            dtype=numpy.dtype('>i4'),
            shape=[self.corr.n_chans, len(self.corr.baselines), 2])
            # shape=[self.corr.n_chans * len(self.corr.baselines), 2])

    def spead_meta_issue_all(self, data_stream):
        """
        Issue = update the metadata then send it.
        :param data_stream: The DataStream object for which to send metadata
        :return: True if the callback transmits the metadata as well
        """
        dstrm = data_stream or self.data_stream
        self.spead_meta_update_all()
        dstrm.meta_transmit()
        self.logger.info('Issued SPEAD data descriptor for data stream %s '
                         'to %s.' % (dstrm.name,
                                     dstrm.meta_destination))
        return True
コード例 #18
0
ファイル: async_task_manager.py プロジェクト: rydzykje/aucote
class AsyncTaskManager(object):
    """
    Aucote uses asynchronous task executed in ioloop. Some of them,
    especially scanners, should finish before ioloop will stop

    This class should be accessed by instance class method, which returns global instance of task manager

    """
    _instances = {}

    TASKS_POLITIC_WAIT = 0
    TASKS_POLITIC_KILL_WORKING_FIRST = 1
    TASKS_POLITIC_KILL_PROPORTIONS = 2
    TASKS_POLITIC_KILL_WORKING = 3

    def __init__(self, parallel_tasks=10):
        self._shutdown_condition = Event()
        self._stop_condition = Event()
        self._cron_tasks = {}
        self._parallel_tasks = parallel_tasks
        self._tasks = Queue()
        self._task_workers = {}
        self._events = {}
        self._limit = self._parallel_tasks
        self._next_task_number = 0
        self._toucan_keys = {}

    @classmethod
    def instance(cls, name=None, **kwargs):
        """
        Return instance of AsyncTaskManager

        Returns:
            AsyncTaskManager

        """
        if cls._instances.get(name) is None:
            cls._instances[name] = AsyncTaskManager(**kwargs)
        return cls._instances[name]

    @property
    def shutdown_condition(self):
        """
        Event which is resolved if every job is done and AsyncTaskManager is ready to shutdown

        Returns:
            Event
        """
        return self._shutdown_condition

    def start(self):
        """
        Start CronTabCallback tasks

        Returns:
            None

        """
        for task in self._cron_tasks.values():
            task.start()

        for number in range(self._parallel_tasks):
            self._task_workers[number] = IOLoop.current().add_callback(
                partial(self.process_tasks, number))

        self._next_task_number = self._parallel_tasks

    def add_crontab_task(self, task, cron, event=None):
        """
        Add function to scheduler and execute at cron time

        Args:
            task (function):
            cron (str): crontab value
            event (Event): event which prevent from running task with similar aim, eg. security scans

        Returns:
            None

        """
        if event is not None:
            event = self._events.setdefault(event, Event())
        self._cron_tasks[task] = AsyncCrontabTask(cron, task, event)

    @gen.coroutine
    def stop(self):
        """
        Stop CronTabCallback tasks and wait on them to finish

        Returns:
            None

        """
        for task in self._cron_tasks.values():
            task.stop()
        IOLoop.current().add_callback(self._prepare_shutdown)
        yield [self._stop_condition.wait(), self._tasks.join()]
        self._shutdown_condition.set()

    def _prepare_shutdown(self):
        """
        Check if ioloop can be stopped

        Returns:
            None

        """
        if any(task.is_running() for task in self._cron_tasks.values()):
            IOLoop.current().add_callback(self._prepare_shutdown)
            return

        self._stop_condition.set()

    def clear(self):
        """
        Clear list of tasks

        Returns:
            None

        """
        self._cron_tasks = {}
        self._shutdown_condition.clear()
        self._stop_condition.clear()

    async def process_tasks(self, number):
        """
        Execute queue. Every task in executed in separated thread (_Executor)

        """
        log.info("Starting worker %s", number)
        while True:
            try:
                item = self._tasks.get_nowait()
                try:
                    log.debug("Worker %s: starting %s", number, item)
                    thread = _Executor(task=item, number=number)
                    self._task_workers[number] = thread
                    thread.start()

                    while thread.is_alive():
                        await sleep(0.5)
                except:
                    log.exception("Worker %s: exception occurred", number)
                finally:
                    log.debug("Worker %s: %s finished", number, item)
                    self._tasks.task_done()
                    tasks_per_scan = (
                        '{}: {}'.format(scanner, len(tasks))
                        for scanner, tasks in self.tasks_by_scan.items())
                    log.debug("Tasks left in queue: %s (%s)",
                              self.unfinished_tasks, ', '.join(tasks_per_scan))
                    self._task_workers[number] = None
            except QueueEmpty:
                await gen.sleep(0.5)
                if self._stop_condition.is_set() and self._tasks.empty():
                    return
            finally:
                if self._limit < len(self._task_workers):
                    break

        del self._task_workers[number]

        log.info("Closing worker %s", number)

    def add_task(self, task):
        """
        Add task to the queue

        Args:
            task:

        Returns:
            None

        """
        self._tasks.put(task)

    @property
    def unfinished_tasks(self):
        """
        Task which are still processed or in queue

        Returns:
            int

        """
        return self._tasks._unfinished_tasks

    @property
    def tasks_by_scan(self):
        """
        Returns queued tasks grouped by scan
        """
        tasks = self._tasks._queue

        return_value = {}

        for task in tasks:
            return_value.setdefault(task.context.scanner.NAME, []).append(task)

        return return_value

    @property
    def cron_tasks(self):
        """
        List of cron tasks

        Returns:
            list

        """
        return self._cron_tasks.values()

    def cron_task(self, name):
        for task in self._cron_tasks.values():
            if task.func.NAME == name:
                return task

    def change_throttling_toucan(self, key, value):
        self.change_throttling(value)

    def change_throttling(self, new_value):
        """
        Change throttling value. Keeps throttling value between 0 and 1.

        Behaviour of algorithm is described in docs/throttling.md

        Only working tasks are closing here. Idle workers are stop by themselves

        """
        if new_value > 1:
            new_value = 1
        if new_value < 0:
            new_value = 0

        new_value = round(new_value * 100) / 100

        old_limit = self._limit
        self._limit = round(self._parallel_tasks * float(new_value))

        working_tasks = [
            number for number, task in self._task_workers.items()
            if task is not None
        ]
        current_tasks = len(self._task_workers)

        task_politic = cfg['service.scans.task_politic']

        if task_politic == self.TASKS_POLITIC_KILL_WORKING_FIRST:
            tasks_to_kill = current_tasks - self._limit
        elif task_politic == self.TASKS_POLITIC_KILL_PROPORTIONS:
            tasks_to_kill = round((old_limit - self._limit) *
                                  len(working_tasks) / self._parallel_tasks)
        elif task_politic == self.TASKS_POLITIC_KILL_WORKING:
            tasks_to_kill = (old_limit - self._limit) - (
                len(self._task_workers) - len(working_tasks))
        else:
            tasks_to_kill = 0

        log.debug('%s tasks will be killed', tasks_to_kill)

        for number in working_tasks:
            if tasks_to_kill <= 0:
                break
            self._task_workers[number].stop()
            tasks_to_kill -= 1

        self._limit = round(self._parallel_tasks * float(new_value))

        current_tasks = len(self._task_workers)

        for number in range(self._limit - current_tasks):
            self._task_workers[self._next_task_number] = None
            IOLoop.current().add_callback(
                partial(self.process_tasks, self._next_task_number))
            self._next_task_number += 1
コード例 #19
0
class MockFitsWriterClient(object):
    """
    Wrapper class for a KATCP client to a EddFitsWriterServer
    """
    def __init__(self, address):
        """
        @brief      Construct new instance
        """
        self._address = address
        self._ioloop = IOLoop.current()
        self._stop_event = Event()
        self._is_stopped = Condition()
        self._socket = None

    def reset_connection(self):
        self._socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
        self._socket.setblocking(False)
        try:
            self._socket.connect(self._address)
        except socket.error as error:
            if error.args[0] == errno.EINPROGRESS:
                pass
            else:
                raise error

    @coroutine
    def recv_nbytes(self, nbytes):
        received_bytes = 0
        data = b''
        while received_bytes < nbytes:
            if self._stop_event.is_set():
                raise StopEvent
            try:
                log.debug("Requesting {} bytes".format(nbytes -
                                                       received_bytes))
                current_data = self._socket.recv(nbytes - received_bytes)
                received_bytes += len(current_data)
                data += current_data
                log.debug("Received {} bytes ({} of {} bytes)".format(
                    len(current_data), received_bytes, nbytes))
            except socket.error as error:
                error_id = error.args[0]
                if error_id == errno.EAGAIN or error_id == errno.EWOULDBLOCK:
                    yield sleep(0.1)
                else:
                    log.exception("Unexpected error on socket recv: {}".format(
                        str(error)))
                    raise error
        raise Return(data)

    @coroutine
    def recv_loop(self):
        try:
            header, sections = yield self.recv_packet()
        except StopEvent:
            log.debug("Notifying that recv calls have stopped")
            self._is_stopped.notify()
        except Exception:
            log.exception("Failure while receiving packet")
        else:
            self._ioloop.add_callback(self.recv_loop)

    def start(self):
        self._stop_event.clear()
        self.reset_connection()
        self._ioloop.add_callback(self.recv_loop)

    @coroutine
    def stop(self, timeout=2):
        self._stop_event.set()
        try:
            success = yield self._is_stopped.wait(timeout=self._ioloop.time() +
                                                  timeout)
            if not success:
                raise TimeoutError
        except TimeoutError:
            log.error(("Could not stop the client within "
                       "the {} second limit").format(timeout))
        except Exception:
            log.exception("Fucup")

    @coroutine
    def recv_packet(self):
        log.debug("Receiving packet header")
        raw_header = yield self.recv_nbytes(C.sizeof(FWHeader))
        log.debug("Converting packet header")
        header = FWHeader.from_buffer_copy(raw_header)
        log.info("Received header: {}".format(header))
        fw_data_type = header.channel_data_type.strip().upper()
        c_data_type, np_data_type = TYPE_MAP[fw_data_type]
        sections = []
        for section in range(header.nsections):
            log.debug("Receiving section {} of {}".format(
                section + 1, header.nsections))
            raw_section_header = yield self.recv_nbytes(
                C.sizeof(FWSectionHeader))
            section_header = FWSectionHeader.from_buffer_copy(
                raw_section_header)
            log.info("Section {} header: {}".format(section, section_header))
            log.debug("Receiving section data")
            raw_bytes = yield self.recv_nbytes(
                C.sizeof(c_data_type) * section_header.nchannels)
            data = np.frombuffer(raw_bytes, dtype=np_data_type)
            log.info("Section {} data: {}".format(section, data[:10]))
            sections.append((section_header, data))
        raise Return((header, sections))
コード例 #20
0
ファイル: tornado.py プロジェクト: FloFaber/Sudoku
class DBusRouter:
    def __init__(self, conn: DBusConnection):
        self.conn = conn
        self._replies = ReplyMatcher()
        self._filters = MessageFilters()
        self._stop_receiving = Event()
        IOLoop.current().add_callback(self._receiver)

        # For backwards compatibility - old-style signal callbacks
        self.router = Router(Future)

    async def send(self, message, *, serial=None):
        await self.conn.send(message, serial=serial)

    async def send_and_get_reply(self, message):
        check_replyable(message)
        if self._stop_receiving.is_set():
            raise RouterClosed("This DBusRouter has stopped")

        serial = next(self.conn.outgoing_serial)

        with self._replies.catch(serial, Future()) as reply_fut:
            await self.send(message, serial=serial)
            return (await reply_fut)

    def filter(self, rule, *, queue: Optional[Queue] = None, bufsize=1):
        """Create a filter for incoming messages

        Usage::

            with router.filter(rule) as queue:
                matching_msg = await queue.get()

        :param jeepney.MatchRule rule: Catch messages matching this rule
        :param tornado.queues.Queue queue: Matched messages will be added to this
        :param int bufsize: If no queue is passed in, create one with this size
        """
        return FilterHandle(self._filters, rule, queue or Queue(bufsize))

    def stop(self):
        self._stop_receiving.set()

    def __enter__(self):
        return self

    def __exit__(self, exc_type, exc_val, exc_tb):
        self.stop()
        return False

    # Backwards compatible interface (from old DBusConnection) --------

    @property
    def unique_name(self):
        return self.conn.unique_name

    async def send_message(self, message: Message):
        if (message.header.message_type == MessageType.method_return and
                not (message.header.flags & MessageFlag.no_reply_expected)):
            return unwrap_msg(await self.send_and_get_reply(message))
        else:
            await self.send(message)

    # Code to run in receiver task ------------------------------------

    def _dispatch(self, msg: Message):
        """Handle one received message"""
        if self._replies.dispatch(msg):
            return

        for filter in self._filters.matches(msg):
            try:
                filter.queue.put_nowait(msg)
            except QueueFull:
                pass

    async def _receiver(self):
        """Receiver loop - runs in a separate task"""
        try:
            while True:
                for coro in as_completed(
                    [self.conn.receive(),
                     self._stop_receiving.wait()]):
                    msg = await coro
                    if msg is None:
                        return  # Stopped
                    self._dispatch(msg)
                    self.router.incoming(msg)
        finally:
            self.is_running = False
            # Send errors to any tasks still waiting for a message.
            self._replies.drop_all()
コード例 #21
0
ファイル: pubnub_tornado.py プロジェクト: pubnub/python
class TornadoReconnectionManager(ReconnectionManager):
    def __init__(self, pubnub):
        self._cancelled_event = Event()
        super(TornadoReconnectionManager, self).__init__(pubnub)

    @gen.coroutine
    def _register_heartbeat_timer(self):
        self._cancelled_event.clear()

        while not self._cancelled_event.is_set():
            if self._pubnub.config.reconnect_policy == PNReconnectionPolicy.EXPONENTIAL:
                self._timer_interval = int(math.pow(2, self._connection_errors) - 1)
                if self._timer_interval > self.MAXEXPONENTIALBACKOFF:
                    self._timer_interval = self.MINEXPONENTIALBACKOFF
                    self._connection_errors = 1
                    logger.debug("timerInterval > MAXEXPONENTIALBACKOFF at: %s" % utils.datetime_now())
                elif self._timer_interval < 1:
                    self._timer_interval = self.MINEXPONENTIALBACKOFF
                logger.debug("timerInterval = %d at: %s" % (self._timer_interval, utils.datetime_now()))
            else:
                self._timer_interval = self.INTERVAL

            # >>> Wait given interval or cancel
            sleeper = tornado.gen.sleep(self._timer_interval)
            canceller = self._cancelled_event.wait()

            wi = tornado.gen.WaitIterator(canceller, sleeper)

            while not wi.done():
                try:
                    future = wi.next()
                    yield future
                except Exception as e:
                    # TODO: verify the error will not be eaten
                    logger.error(e)
                    raise
                else:
                    if wi.current_future == sleeper:
                        break
                    elif wi.current_future == canceller:
                        return
                    else:
                        raise Exception("unknown future raised")

            logger.debug("reconnect loop at: %s" % utils.datetime_now())

            # >>> Attempt to request /time/0 endpoint
            try:
                yield self._pubnub.time().result()
                self._connection_errors = 1
                self._callback.on_reconnect()
                logger.debug("reconnection manager stop due success time endpoint call: %s" % utils.datetime_now())
                break
            except Exception:
                if self._pubnub.config.reconnect_policy == PNReconnectionPolicy.EXPONENTIAL:
                    logger.debug("reconnect interval increment at: %s" % utils.datetime_now())
                    self._connection_errors += 1

    def start_polling(self):
        if self._pubnub.config.reconnect_policy == PNReconnectionPolicy.NONE:
            logger.warn("reconnection policy is disabled, please handle reconnection manually.")
            return

        self._pubnub.ioloop.spawn_callback(self._register_heartbeat_timer)

    def stop_polling(self):
        if self._cancelled_event is not None and not self._cancelled_event.is_set():
            self._cancelled_event.set()
コード例 #22
0
class TornadoSubscriptionManager(SubscriptionManager):
    def __init__(self, pubnub_instance):

        subscription_manager = self

        self._message_queue = Queue()
        self._consumer_event = Event()
        self._cancellation_event = Event()
        self._subscription_lock = Semaphore(1)
        # self._current_request_key_object = None
        self._heartbeat_periodic_callback = None
        self._reconnection_manager = TornadoReconnectionManager(pubnub_instance)

        super(TornadoSubscriptionManager, self).__init__(pubnub_instance)
        self._start_worker()

        class TornadoReconnectionCallback(ReconnectionCallback):
            def on_reconnect(self):
                subscription_manager.reconnect()

                pn_status = PNStatus()
                pn_status.category = PNStatusCategory.PNReconnectedCategory
                pn_status.error = False

                subscription_manager._subscription_status_announced = True
                subscription_manager._listener_manager.announce_status(pn_status)

        self._reconnection_listener = TornadoReconnectionCallback()
        self._reconnection_manager.set_reconnection_listener(self._reconnection_listener)

    def _set_consumer_event(self):
        self._consumer_event.set()

    def _message_queue_put(self, message):
        self._message_queue.put(message)

    def _start_worker(self):
        self._consumer = TornadoSubscribeMessageWorker(self._pubnub,
                                                       self._listener_manager,
                                                       self._message_queue,
                                                       self._consumer_event)
        run = stack_context.wrap(self._consumer.run)
        self._pubnub.ioloop.spawn_callback(run)

    def reconnect(self):
        self._should_stop = False
        self._pubnub.ioloop.spawn_callback(self._start_subscribe_loop)
        # self._register_heartbeat_timer()

    def disconnect(self):
        self._should_stop = True
        self._stop_heartbeat_timer()
        self._stop_subscribe_loop()

    @tornado.gen.coroutine
    def _start_subscribe_loop(self):
        self._stop_subscribe_loop()

        yield self._subscription_lock.acquire()

        self._cancellation_event.clear()

        combined_channels = self._subscription_state.prepare_channel_list(True)
        combined_groups = self._subscription_state.prepare_channel_group_list(True)

        if len(combined_channels) == 0 and len(combined_groups) == 0:
            return

        envelope_future = Subscribe(self._pubnub) \
            .channels(combined_channels).channel_groups(combined_groups) \
            .timetoken(self._timetoken).region(self._region) \
            .filter_expression(self._pubnub.config.filter_expression) \
            .cancellation_event(self._cancellation_event) \
            .future()

        canceller_future = self._cancellation_event.wait()

        wi = tornado.gen.WaitIterator(envelope_future, canceller_future)

        # iterates 2 times: one for result one for cancelled
        while not wi.done():
            try:
                result = yield wi.next()
            except Exception as e:
                # TODO: verify the error will not be eaten
                logger.error(e)
                raise
            else:
                if wi.current_future == envelope_future:
                    e = result
                elif wi.current_future == canceller_future:
                    return
                else:
                    raise Exception("Unexpected future resolved: %s" % str(wi.current_future))

                if e.is_error():
                    # 599 error doesn't works - tornado use this status code
                    # for a wide range of errors, for ex:
                    # HTTP Server Error (599): [Errno -2] Name or service not known
                    if e.status is not None and e.status.category == PNStatusCategory.PNTimeoutCategory:
                        self._pubnub.ioloop.spawn_callback(self._start_subscribe_loop)
                        return

                    logger.error("Exception in subscribe loop: %s" % str(e))

                    if e.status is not None and e.status.category == PNStatusCategory.PNAccessDeniedCategory:
                        e.status.operation = PNOperationType.PNUnsubscribeOperation

                    self._listener_manager.announce_status(e.status)

                    self._reconnection_manager.start_polling()
                    self.disconnect()
                    return
                else:
                    self._handle_endpoint_call(e.result, e.status)

                    self._pubnub.ioloop.spawn_callback(self._start_subscribe_loop)

            finally:
                self._cancellation_event.set()
                yield tornado.gen.moment
                self._subscription_lock.release()
                self._cancellation_event.clear()
                break

    def _stop_subscribe_loop(self):
        if self._cancellation_event is not None and not self._cancellation_event.is_set():
            self._cancellation_event.set()

    def _stop_heartbeat_timer(self):
        if self._heartbeat_periodic_callback is not None:
            self._heartbeat_periodic_callback.stop()

    def _register_heartbeat_timer(self):
        super(TornadoSubscriptionManager, self)._register_heartbeat_timer()
        self._heartbeat_periodic_callback = PeriodicCallback(
            stack_context.wrap(self._perform_heartbeat_loop),
            self._pubnub.config.heartbeat_interval * TornadoSubscriptionManager.HEARTBEAT_INTERVAL_MULTIPLIER,
            self._pubnub.ioloop)
        self._heartbeat_periodic_callback.start()

    @tornado.gen.coroutine
    def _perform_heartbeat_loop(self):
        if self._heartbeat_call is not None:
            # TODO: cancel call
            pass

        cancellation_event = Event()
        state_payload = self._subscription_state.state_payload()
        presence_channels = self._subscription_state.prepare_channel_list(False)
        presence_groups = self._subscription_state.prepare_channel_group_list(False)

        if len(presence_channels) == 0 and len(presence_groups) == 0:
            return

        try:
            envelope = yield self._pubnub.heartbeat() \
                .channels(presence_channels) \
                .channel_groups(presence_groups) \
                .state(state_payload) \
                .cancellation_event(cancellation_event) \
                .future()

            heartbeat_verbosity = self._pubnub.config.heartbeat_notification_options
            if envelope.status.is_error:
                if heartbeat_verbosity == PNHeartbeatNotificationOptions.ALL or \
                        heartbeat_verbosity == PNHeartbeatNotificationOptions.ALL:
                    self._listener_manager.announce_status(envelope.status)
            else:
                if heartbeat_verbosity == PNHeartbeatNotificationOptions.ALL:
                    self._listener_manager.announce_status(envelope.status)

        except PubNubTornadoException:
            pass
            # TODO: check correctness
            # if e.status is not None and e.status.category == PNStatusCategory.PNTimeoutCategory:
            #     self._start_subscribe_loop()
            # else:
            #     self._listener_manager.announce_status(e.status)
        except Exception as e:
            print(e)
        finally:
            cancellation_event.set()

    @tornado.gen.coroutine
    def _send_leave(self, unsubscribe_operation):
        envelope = yield Leave(self._pubnub) \
            .channels(unsubscribe_operation.channels) \
            .channel_groups(unsubscribe_operation.channel_groups).future()
        self._listener_manager.announce_status(envelope.status)
コード例 #23
0
ファイル: pubnub_tornado.py プロジェクト: pubnub/python
class TornadoSubscriptionManager(SubscriptionManager):
    def __init__(self, pubnub_instance):

        subscription_manager = self

        self._message_queue = Queue()
        self._consumer_event = Event()
        self._cancellation_event = Event()
        self._subscription_lock = Semaphore(1)
        # self._current_request_key_object = None
        self._heartbeat_periodic_callback = None
        self._reconnection_manager = TornadoReconnectionManager(pubnub_instance)

        super(TornadoSubscriptionManager, self).__init__(pubnub_instance)
        self._start_worker()

        class TornadoReconnectionCallback(ReconnectionCallback):
            def on_reconnect(self):
                subscription_manager.reconnect()

                pn_status = PNStatus()
                pn_status.category = PNStatusCategory.PNReconnectedCategory
                pn_status.error = False

                subscription_manager._subscription_status_announced = True
                subscription_manager._listener_manager.announce_status(pn_status)

        self._reconnection_listener = TornadoReconnectionCallback()
        self._reconnection_manager.set_reconnection_listener(self._reconnection_listener)

    def _set_consumer_event(self):
        self._consumer_event.set()

    def _message_queue_put(self, message):
        self._message_queue.put(message)

    def _start_worker(self):
        self._consumer = TornadoSubscribeMessageWorker(self._pubnub,
                                                       self._listener_manager,
                                                       self._message_queue,
                                                       self._consumer_event)
        run = stack_context.wrap(self._consumer.run)
        self._pubnub.ioloop.spawn_callback(run)

    def reconnect(self):
        self._should_stop = False
        self._pubnub.ioloop.spawn_callback(self._start_subscribe_loop)
        # self._register_heartbeat_timer()

    def disconnect(self):
        self._should_stop = True
        self._stop_heartbeat_timer()
        self._stop_subscribe_loop()

    @tornado.gen.coroutine
    def _start_subscribe_loop(self):
        self._stop_subscribe_loop()

        yield self._subscription_lock.acquire()

        self._cancellation_event.clear()

        combined_channels = self._subscription_state.prepare_channel_list(True)
        combined_groups = self._subscription_state.prepare_channel_group_list(True)

        if len(combined_channels) == 0 and len(combined_groups) == 0:
            return

        envelope_future = Subscribe(self._pubnub) \
            .channels(combined_channels).channel_groups(combined_groups) \
            .timetoken(self._timetoken).region(self._region) \
            .filter_expression(self._pubnub.config.filter_expression) \
            .cancellation_event(self._cancellation_event) \
            .future()

        canceller_future = self._cancellation_event.wait()

        wi = tornado.gen.WaitIterator(envelope_future, canceller_future)

        # iterates 2 times: one for result one for cancelled
        while not wi.done():
            try:
                result = yield wi.next()
            except Exception as e:
                # TODO: verify the error will not be eaten
                logger.error(e)
                raise
            else:
                if wi.current_future == envelope_future:
                    e = result
                elif wi.current_future == canceller_future:
                    return
                else:
                    raise Exception("Unexpected future resolved: %s" % str(wi.current_future))

                if e.is_error():
                    # 599 error doesn't works - tornado use this status code
                    # for a wide range of errors, for ex:
                    # HTTP Server Error (599): [Errno -2] Name or service not known
                    if e.status is not None and e.status.category == PNStatusCategory.PNTimeoutCategory:
                        self._pubnub.ioloop.spawn_callback(self._start_subscribe_loop)
                        return

                    logger.error("Exception in subscribe loop: %s" % str(e))

                    if e.status is not None and e.status.category == PNStatusCategory.PNAccessDeniedCategory:
                        e.status.operation = PNOperationType.PNUnsubscribeOperation

                    self._listener_manager.announce_status(e.status)

                    self._reconnection_manager.start_polling()
                    self.disconnect()
                    return
                else:
                    self._handle_endpoint_call(e.result, e.status)

                    self._pubnub.ioloop.spawn_callback(self._start_subscribe_loop)

            finally:
                self._cancellation_event.set()
                yield tornado.gen.moment
                self._subscription_lock.release()
                self._cancellation_event.clear()
                break

    def _stop_subscribe_loop(self):
        if self._cancellation_event is not None and not self._cancellation_event.is_set():
            self._cancellation_event.set()

    def _stop_heartbeat_timer(self):
        if self._heartbeat_periodic_callback is not None:
            self._heartbeat_periodic_callback.stop()

    def _register_heartbeat_timer(self):
        super(TornadoSubscriptionManager, self)._register_heartbeat_timer()
        self._heartbeat_periodic_callback = PeriodicCallback(
            stack_context.wrap(self._perform_heartbeat_loop),
            self._pubnub.config.heartbeat_interval * TornadoSubscriptionManager.HEARTBEAT_INTERVAL_MULTIPLIER,
            self._pubnub.ioloop)
        self._heartbeat_periodic_callback.start()

    @tornado.gen.coroutine
    def _perform_heartbeat_loop(self):
        if self._heartbeat_call is not None:
            # TODO: cancel call
            pass

        cancellation_event = Event()
        state_payload = self._subscription_state.state_payload()
        presence_channels = self._subscription_state.prepare_channel_list(False)
        presence_groups = self._subscription_state.prepare_channel_group_list(False)

        if len(presence_channels) == 0 and len(presence_groups) == 0:
            return

        try:
            envelope = yield self._pubnub.heartbeat() \
                .channels(presence_channels) \
                .channel_groups(presence_groups) \
                .state(state_payload) \
                .cancellation_event(cancellation_event) \
                .future()

            heartbeat_verbosity = self._pubnub.config.heartbeat_notification_options
            if envelope.status.is_error:
                if heartbeat_verbosity == PNHeartbeatNotificationOptions.ALL or \
                        heartbeat_verbosity == PNHeartbeatNotificationOptions.ALL:
                    self._listener_manager.announce_status(envelope.status)
            else:
                if heartbeat_verbosity == PNHeartbeatNotificationOptions.ALL:
                    self._listener_manager.announce_status(envelope.status)

        except PubNubTornadoException:
            pass
            # TODO: check correctness
            # if e.status is not None and e.status.category == PNStatusCategory.PNTimeoutCategory:
            #     self._start_subscribe_loop()
            # else:
            #     self._listener_manager.announce_status(e.status)
        except Exception as e:
            print(e)
        finally:
            cancellation_event.set()

    @tornado.gen.coroutine
    def _send_leave(self, unsubscribe_operation):
        envelope = yield Leave(self._pubnub) \
            .channels(unsubscribe_operation.channels) \
            .channel_groups(unsubscribe_operation.channel_groups).future()
        self._listener_manager.announce_status(envelope.status)
コード例 #24
0
class WebUpdater:
    def __init__(self, config, cmd_helper):
        self.server = cmd_helper.get_server()
        self.cmd_helper = cmd_helper
        self.repo = config.get('repo').strip().strip("/")
        self.owner, self.name = self.repo.split("/", 1)
        if hasattr(config, "get_name"):
            self.name = config.get_name().split()[-1]
        self.path = os.path.realpath(os.path.expanduser(config.get("path")))
        self.persistent_files = []
        pfiles = config.get('persistent_files', None)
        if pfiles is not None:
            self.persistent_files = [
                pf.strip().strip("/") for pf in pfiles.split("\n")
                if pf.strip()
            ]
            if ".version" in self.persistent_files:
                raise config.error(
                    "Invalid value for option 'persistent_files': "
                    "'.version' can not be persistent")

        self.version = self.remote_version = self.dl_url = "?"
        self.etag = None
        self.init_evt = Event()
        self.refresh_condition = None
        self._get_local_version()
        logging.info(f"\nInitializing Client Updater: '{self.name}',"
                     f"\nversion: {self.version}"
                     f"\npath: {self.path}")

    def _get_local_version(self):
        version_path = os.path.join(self.path, ".version")
        if os.path.isfile(os.path.join(self.path, ".version")):
            with open(version_path, "r") as f:
                v = f.read()
            self.version = v.strip()

    async def check_initialized(self, timeout=None):
        if self.init_evt.is_set():
            return
        if timeout is not None:
            timeout = IOLoop.current().time() + timeout
        await self.init_evt.wait(timeout)

    async def refresh(self):
        if self.refresh_condition is None:
            self.refresh_condition = Condition()
        else:
            self.refresh_condition.wait()
            return
        try:
            self._get_local_version()
            await self._get_remote_version()
        except Exception:
            logging.exception("Error Refreshing Client")
        self.init_evt.set()
        self.refresh_condition.notify_all()
        self.refresh_condition = None

    async def _get_remote_version(self):
        # Remote state
        url = f"https://api.github.com/repos/{self.repo}/releases/latest"
        try:
            result = await self.cmd_helper.github_api_request(url,
                                                              etag=self.etag)
        except Exception:
            logging.exception(f"Client {self.repo}: Github Request Error")
            result = {}
        if result is None:
            # No change, update not necessary
            return
        self.etag = result.get('etag', None)
        self.remote_version = result.get('name', "?")
        release_assets = result.get('assets', [{}])[0]
        self.dl_url = release_assets.get('browser_download_url', "?")
        logging.info(f"Github client Info Received:\nRepo: {self.name}\n"
                     f"Local Version: {self.version}\n"
                     f"Remote Version: {self.remote_version}\n"
                     f"url: {self.dl_url}")

    async def update(self, *args):
        await self.check_initialized(20.)
        if self.refresh_condition is not None:
            # wait for refresh if in progess
            self.refresh_condition.wait()
        if self.remote_version == "?":
            await self.refresh()
            if self.remote_version == "?":
                raise self.server.error(
                    f"Client {self.repo}: Unable to locate update")
        if self.dl_url == "?":
            raise self.server.error(
                f"Client {self.repo}: Invalid download url")
        if self.version == self.remote_version:
            # Already up to date
            return
        self.cmd_helper.notify_update_response(
            f"Downloading Client: {self.name}")
        archive = await self.cmd_helper.http_download_request(self.dl_url)
        with tempfile.TemporaryDirectory(suffix=self.name,
                                         prefix="client") as tempdir:
            if os.path.isdir(self.path):
                # find and move persistent files
                for fname in os.listdir(self.path):
                    src_path = os.path.join(self.path, fname)
                    if fname in self.persistent_files:
                        dest_dir = os.path.dirname(os.path.join(
                            tempdir, fname))
                        os.makedirs(dest_dir, exist_ok=True)
                        shutil.move(src_path, dest_dir)
                shutil.rmtree(self.path)
            os.mkdir(self.path)
            with zipfile.ZipFile(io.BytesIO(archive)) as zf:
                zf.extractall(self.path)
            # Move temporary files back into
            for fname in os.listdir(tempdir):
                src_path = os.path.join(tempdir, fname)
                dest_dir = os.path.dirname(os.path.join(self.path, fname))
                os.makedirs(dest_dir, exist_ok=True)
                shutil.move(src_path, dest_dir)
        self.version = self.remote_version
        version_path = os.path.join(self.path, ".version")
        if not os.path.exists(version_path):
            with open(version_path, "w") as f:
                f.write(self.version)
        self.cmd_helper.notify_update_response(
            f"Client Update Finished: {self.name}", is_complete=True)

    def get_update_status(self):
        return {
            'name': self.name,
            'owner': self.owner,
            'version': self.version,
            'remote_version': self.remote_version
        }
コード例 #25
0
ファイル: pubnub_tornado.py プロジェクト: pubnub/python
class SubscribeListener(SubscribeCallback):
    def __init__(self):
        self.connected = False
        self.connected_event = Event()
        self.disconnected_event = Event()
        self.presence_queue = Queue()
        self.message_queue = Queue()
        self.error_queue = Queue()

    def status(self, pubnub, status):
        if utils.is_subscribed_event(status) and not self.connected_event.is_set():
            self.connected_event.set()
        elif utils.is_unsubscribed_event(status) and not self.disconnected_event.is_set():
            self.disconnected_event.set()
        elif status.is_error():
            self.error_queue.put_nowait(status.error_data.exception)

    def message(self, pubnub, message):
        self.message_queue.put(message)

    def presence(self, pubnub, presence):
        self.presence_queue.put(presence)

    @tornado.gen.coroutine
    def _wait_for(self, coro):
        error = self.error_queue.get()
        wi = tornado.gen.WaitIterator(coro, error)

        while not wi.done():
            result = yield wi.next()

            if wi.current_future == coro:
                raise gen.Return(result)
            elif wi.current_future == error:
                raise result
            else:
                raise Exception("Unexpected future resolved: %s" % str(wi.current_future))

    @tornado.gen.coroutine
    def wait_for_connect(self):
        if not self.connected_event.is_set():
            yield self._wait_for(self.connected_event.wait())
        else:
            raise Exception("instance is already connected")

    @tornado.gen.coroutine
    def wait_for_disconnect(self):
        if not self.disconnected_event.is_set():
            yield self._wait_for(self.disconnected_event.wait())
        else:
            raise Exception("instance is already disconnected")

    @tornado.gen.coroutine
    def wait_for_message_on(self, *channel_names):
        channel_names = list(channel_names)
        while True:
            try: # NOQA
                env = yield self._wait_for(self.message_queue.get())
                if env.channel in channel_names:
                    raise tornado.gen.Return(env)
                else:
                    continue
            finally:
                self.message_queue.task_done()

    @tornado.gen.coroutine
    def wait_for_presence_on(self, *channel_names):
        channel_names = list(channel_names)
        while True:
            try:
                try:
                    env = yield self._wait_for(self.presence_queue.get())
                except: # NOQA E722 pylint: disable=W0702
                    break
                if env.channel in channel_names:
                    raise tornado.gen.Return(env)
                else:
                    continue
            finally:
                self.presence_queue.task_done()
コード例 #26
0
class GitUpdater:
    def __init__(self, umgr, config, path=None, env=None):
        self.server = umgr.server
        self.execute_cmd = umgr.execute_cmd
        self.execute_cmd_with_response = umgr.execute_cmd_with_response
        self.notify_update_response = umgr.notify_update_response
        self.name = config.get_name().split()[-1]
        self.repo_path = path
        if path is None:
            self.repo_path = config.get('path')
        self.env = config.get("env", env)
        dist_packages = None
        if self.env is not None:
            self.env = os.path.expanduser(self.env)
            dist_packages = config.get('python_dist_packages', None)
            self.python_reqs = os.path.join(self.repo_path,
                                            config.get("requirements"))
        self.origin = config.get("origin").lower()
        self.install_script = config.get('install_script', None)
        if self.install_script is not None:
            self.install_script = os.path.abspath(
                os.path.join(self.repo_path, self.install_script))
        self.venv_args = config.get('venv_args', None)
        self.python_dist_packages = None
        self.python_dist_path = None
        self.env_package_path = None
        if dist_packages is not None:
            self.python_dist_packages = [
                p.strip() for p in dist_packages.split('\n') if p.strip()
            ]
            self.python_dist_path = os.path.abspath(
                config.get('python_dist_path'))
            if not os.path.exists(self.python_dist_path):
                raise config.error(
                    "Invalid path for option 'python_dist_path'")
            self.env_package_path = os.path.abspath(
                os.path.join(os.path.dirname(self.env), "..",
                             config.get('env_package_path')))
        for opt in [
                "repo_path", "env", "python_reqs", "install_script",
                "python_dist_path", "env_package_path"
        ]:
            val = getattr(self, opt)
            if val is None:
                continue
            if not os.path.exists(val):
                raise config.error("Invalid path for option '%s': %s" %
                                   (val, opt))

        self.version = self.cur_hash = "?"
        self.remote_version = self.remote_hash = "?"
        self.init_evt = Event()
        self.refresh_condition = None
        self.debug = umgr.repo_debug
        self.remote = "origin"
        self.branch = "master"
        self.is_valid = self.is_dirty = self.detached = False

    def _get_version_info(self):
        ver_path = os.path.join(self.repo_path, "scripts/version.txt")
        vinfo = {}
        if os.path.isfile(ver_path):
            data = ""
            with open(ver_path, 'r') as f:
                data = f.read()
            try:
                entries = [e.strip() for e in data.split('\n') if e.strip()]
                vinfo = dict([i.split('=') for i in entries])
                vinfo = {
                    k: tuple(re.findall(r"\d+", v))
                    for k, v in vinfo.items()
                }
            except Exception:
                pass
            else:
                self._log_info(f"Version Info Found: {vinfo}")
        vinfo['version'] = tuple(re.findall(r"\d+", self.version))
        return vinfo

    def _log_exc(self, msg, traceback=True):
        log_msg = f"Repo {self.name}: {msg}"
        if traceback:
            logging.exception(log_msg)
        else:
            logging.info(log_msg)
        return self.server.error(msg)

    def _log_info(self, msg):
        log_msg = f"Repo {self.name}: {msg}"
        logging.info(log_msg)

    def _notify_status(self, msg, is_complete=False):
        log_msg = f"Repo {self.name}: {msg}"
        logging.debug(log_msg)
        self.notify_update_response(log_msg, is_complete)

    async def check_initialized(self, timeout=None):
        if self.init_evt.is_set():
            return
        if timeout is not None:
            timeout = IOLoop.current().time() + timeout
        await self.init_evt.wait(timeout)

    async def refresh(self):
        if self.refresh_condition is None:
            self.refresh_condition = Condition()
        else:
            self.refresh_condition.wait()
            return
        try:
            await self._check_version()
        except Exception:
            logging.exception("Error Refreshing git state")
        self.init_evt.set()
        self.refresh_condition.notify_all()
        self.refresh_condition = None

    async def _check_version(self, need_fetch=True):
        self.is_valid = self.detached = False
        self.cur_hash = self.branch = self.remote = "?"
        self.version = self.remote_version = "?"
        try:
            blist = await self.execute_cmd_with_response(
                f"git -C {self.repo_path} branch --list")
            if blist.startswith("fatal:"):
                self._log_info(f"Invalid git repo at path '{self.repo_path}'")
                return
            branch = None
            for b in blist.split("\n"):
                b = b.strip()
                if b[0] == "*":
                    branch = b[2:]
                    break
            if branch is None:
                self._log_info(
                    "Unable to retreive current branch from branch list\n"
                    f"{blist}")
                return
            if "HEAD detached" in branch:
                bparts = branch.split()[-1].strip("()")
                self.remote, self.branch = bparts.split("/")
                self.detached = True
            else:
                self.branch = branch.strip()
                self.remote = await self.execute_cmd_with_response(
                    f"git -C {self.repo_path} config --get"
                    f" branch.{self.branch}.remote")
            if need_fetch:
                await self.execute_cmd(
                    f"git -C {self.repo_path} fetch {self.remote} --prune -q",
                    retries=3)
            remote_url = await self.execute_cmd_with_response(
                f"git -C {self.repo_path} remote get-url {self.remote}")
            cur_hash = await self.execute_cmd_with_response(
                f"git -C {self.repo_path} rev-parse HEAD")
            remote_hash = await self.execute_cmd_with_response(
                f"git -C {self.repo_path} rev-parse "
                f"{self.remote}/{self.branch}")
            repo_version = await self.execute_cmd_with_response(
                f"git -C {self.repo_path} describe --always "
                "--tags --long --dirty")
            remote_version = await self.execute_cmd_with_response(
                f"git -C {self.repo_path} describe {self.remote}/{self.branch}"
                " --always --tags --long")
        except Exception:
            self._log_exc("Error retreiving git info")
            return

        self.is_dirty = repo_version.endswith("dirty")
        versions = []
        for ver in [repo_version, remote_version]:
            tag_version = "?"
            ver_match = re.match(r"v\d+\.\d+\.\d-\d+", ver)
            if ver_match:
                tag_version = ver_match.group()
            versions.append(tag_version)
        self.version, self.remote_version = versions
        self.cur_hash = cur_hash.strip()
        self.remote_hash = remote_hash.strip()
        self._log_info(
            f"Repo Detected:\nPath: {self.repo_path}\nRemote: {self.remote}\n"
            f"Branch: {self.branch}\nRemote URL: {remote_url}\n"
            f"Current SHA: {self.cur_hash}\n"
            f"Remote SHA: {self.remote_hash}\nVersion: {self.version}\n"
            f"Remote Version: {self.remote_version}\n"
            f"Is Dirty: {self.is_dirty}\nIs Detached: {self.detached}")
        if self.debug:
            self.is_valid = True
            self._log_info("Debug enabled, bypassing official repo check")
        elif self.branch == "master" and self.remote == "origin":
            if self.detached:
                self._log_info("Detached HEAD detected, repo invalid")
                return
            remote_url = remote_url.lower()
            if remote_url[-4:] != ".git":
                remote_url += ".git"
            if remote_url == self.origin:
                self.is_valid = True
                self._log_info("Validity check for git repo passed")
            else:
                self._log_info(f"Invalid git origin url '{remote_url}'")
        else:
            self._log_info("Git repo not on offical remote/branch: "
                           f"{self.remote}/{self.branch}")

    async def update(self, update_deps=False):
        await self.check_initialized(20.)
        if self.refresh_condition is not None:
            self.refresh_condition.wait()
        if not self.is_valid:
            raise self._log_exc("Update aborted, repo is not valid", False)
        if self.is_dirty:
            raise self._log_exc("Update aborted, repo is has been modified",
                                False)
        if self.remote_hash == self.cur_hash:
            # No need to update
            return
        self._notify_status("Updating Repo...")
        try:
            if self.detached:
                await self.execute_cmd(
                    f"git -C {self.repo_path} fetch {self.remote} -q",
                    retries=3)
                await self.execute_cmd(f"git -C {self.repo_path} checkout"
                                       f" {self.remote}/{self.branch} -q")
            else:
                await self.execute_cmd(f"git -C {self.repo_path} pull -q",
                                       retries=3)
        except Exception:
            raise self._log_exc("Error running 'git pull'")
        # Check Semantic Versions
        vinfo = self._get_version_info()
        cur_version = vinfo.get('version', ())
        update_deps |= cur_version < vinfo.get('deps_version', ())
        need_env_rebuild = cur_version < vinfo.get('env_version', ())
        if update_deps:
            await self._install_packages()
            await self._update_virtualenv(need_env_rebuild)
        elif need_env_rebuild:
            await self._update_virtualenv(True)
        # Refresh local repo state
        await self._check_version(need_fetch=False)
        if self.name == "moonraker":
            # Launch restart async so the request can return
            # before the server restarts
            self._notify_status("Update Finished...", is_complete=True)
            IOLoop.current().call_later(.1, self.restart_service)
        else:
            await self.restart_service()
            self._notify_status("Update Finished...", is_complete=True)

    async def _install_packages(self):
        if self.install_script is None:
            return
        # Open install file file and read
        inst_path = self.install_script
        if not os.path.isfile(inst_path):
            self._log_info(f"Unable to open install script: {inst_path}")
            return
        with open(inst_path, 'r') as f:
            data = f.read()
        packages = re.findall(r'PKGLIST="(.*)"', data)
        packages = [p.lstrip("${PKGLIST}").strip() for p in packages]
        if not packages:
            self._log_info(f"No packages found in script: {inst_path}")
            return
        # TODO: Log and notify that packages will be installed
        pkgs = " ".join(packages)
        logging.debug(f"Repo {self.name}: Detected Packages: {pkgs}")
        self._notify_status("Installing system dependencies...")
        # Install packages with apt-get
        try:
            await self.execute_cmd(f"{APT_CMD} update",
                                   timeout=300.,
                                   notify=True)
            await self.execute_cmd(f"{APT_CMD} install --yes {pkgs}",
                                   timeout=3600.,
                                   notify=True)
        except Exception:
            self._log_exc("Error updating packages via apt-get")
            return

    async def _update_virtualenv(self, rebuild_env=False):
        if self.env is None:
            return
        # Update python dependencies
        bin_dir = os.path.dirname(self.env)
        env_path = os.path.normpath(os.path.join(bin_dir, ".."))
        if rebuild_env:
            self._notify_status(f"Creating virtualenv at: {env_path}...")
            if os.path.exists(env_path):
                shutil.rmtree(env_path)
            try:
                await self.execute_cmd(
                    f"virtualenv {self.venv_args} {env_path}", timeout=300.)
            except Exception:
                self._log_exc(f"Error creating virtualenv")
                return
            if not os.path.exists(self.env):
                raise self._log_exc("Failed to create new virtualenv", False)
        reqs = self.python_reqs
        if not os.path.isfile(reqs):
            self._log_exc(f"Invalid path to requirements_file '{reqs}'")
            return
        pip = os.path.join(bin_dir, "pip")
        self._notify_status("Updating python packages...")
        try:
            await self.execute_cmd(f"{pip} install -r {reqs}",
                                   timeout=1200.,
                                   notify=True,
                                   retries=3)
        except Exception:
            self._log_exc("Error updating python requirements")
        self._install_python_dist_requirements()

    def _install_python_dist_requirements(self):
        dist_reqs = self.python_dist_packages
        if dist_reqs is None:
            return
        dist_path = self.python_dist_path
        site_path = self.env_package_path
        for pkg in dist_reqs:
            for f in os.listdir(dist_path):
                if f.startswith(pkg):
                    src = os.path.join(dist_path, f)
                    dest = os.path.join(site_path, f)
                    self._notify_status(f"Linking to dist package: {pkg}")
                    if os.path.islink(dest):
                        os.remove(dest)
                    elif os.path.exists(dest):
                        self._notify_status(
                            f"Error symlinking dist package: {pkg}, "
                            f"file already exists: {dest}")
                        continue
                    os.symlink(src, dest)
                    break

    async def restart_service(self):
        self._notify_status("Restarting Service...")
        try:
            await self.execute_cmd(f"sudo systemctl restart {self.name}")
        except Exception:
            raise self._log_exc("Error restarting service")

    def get_update_status(self):
        return {
            'remote_alias': self.remote,
            'branch': self.branch,
            'version': self.version,
            'remote_version': self.remote_version,
            'current_hash': self.cur_hash,
            'remote_hash': self.remote_hash,
            'is_dirty': self.is_dirty,
            'is_valid': self.is_valid,
            'detached': self.detached,
            'debug_enabled': self.debug
        }
コード例 #27
0
class ProjectGroomer(object):
    """ Cleans up expired transactions for a project. """
    def __init__(self, project_id, coordinator, zk_client, db_access,
                 thread_pool):
        """ Creates a new ProjectGroomer.

    Args:
      project_id: A string specifying a project ID.
      coordinator: A GroomingCoordinator.
      zk_client: A KazooClient.
      db_access: A DatastoreProxy.
      thread_pool: A ThreadPoolExecutor.
    """
        self.project_id = project_id

        self._coordinator = coordinator
        self._zk_client = zk_client
        self._tornado_zk = TornadoKazoo(self._zk_client)
        self._db_access = db_access
        self._thread_pool = thread_pool
        self._project_node = '/appscale/apps/{}'.format(self.project_id)
        self._containers = []
        self._inactive_containers = set()
        self._batch_resolver = BatchResolver(self.project_id, self._db_access)

        self._zk_client.ensure_path(self._project_node)
        self._zk_client.ChildrenWatch(self._project_node,
                                      self._update_containers)

        self._txid_manual_offset = 0
        self._offset_node = '/'.join([self._project_node, OFFSET_NODE])
        self._zk_client.DataWatch(self._offset_node, self._update_offset)

        self._stop_event = AsyncEvent()
        self._stopped_event = AsyncEvent()

        # Keeps track of cleanup results for each round of grooming.
        self._txids_cleaned = 0
        self._oldest_valid_tx_time = None

        self._worker_queue = AsyncQueue(maxsize=MAX_CONCURRENCY)
        for _ in range(MAX_CONCURRENCY):
            IOLoop.current().spawn_callback(self._worker)

        IOLoop.current().spawn_callback(self.start)

    @gen.coroutine
    def start(self):
        """ Starts the grooming process until the stop event is set. """
        logger.info('Grooming {}'.format(self.project_id))
        while True:
            if self._stop_event.is_set():
                break

            try:
                yield self._groom_project()
            except Exception:
                # Prevent the grooming loop from stopping if an error is encountered.
                logger.exception('Unexpected error while grooming {}'.format(
                    self.project_id))
                yield gen.sleep(MAX_TX_DURATION)

        self._stopped_event.set()

    @gen.coroutine
    def stop(self):
        """ Stops the grooming process. """
        logger.info('Stopping grooming process for {}'.format(self.project_id))
        self._stop_event.set()
        yield self._stopped_event.wait()

    @gen.coroutine
    def _worker(self):
        """ Processes items in the worker queue. """
        while True:
            tx_path, composite_indexes = yield self._worker_queue.get()
            try:
                tx_time = yield self._resolve_txid(tx_path, composite_indexes)
                if tx_time is None:
                    self._txids_cleaned += 1

                if tx_time is not None and tx_time < self._oldest_valid_tx_time:
                    self._oldest_valid_tx_time = tx_time
            except Exception:
                logger.exception(
                    'Unexpected error while resolving {}'.format(tx_path))
            finally:
                self._worker_queue.task_done()

    def _update_offset(self, new_offset, _):
        """ Watches for updates to the manual offset node.

    Args:
      new_offset: A string specifying the new manual offset.
    """
        self._txid_manual_offset = int(new_offset or 0)

    def _update_containers(self, nodes):
        """ Updates the list of active txid containers.

    Args:
      nodes: A list of strings specifying ZooKeeper nodes.
    """
        counters = [
            int(node[len(CONTAINER_PREFIX):] or 1) for node in nodes
            if node.startswith(CONTAINER_PREFIX)
            and node not in self._inactive_containers
        ]
        counters.sort()

        containers = [CONTAINER_PREFIX + str(counter) for counter in counters]
        if containers and containers[0] == '{}1'.format(CONTAINER_PREFIX):
            containers[0] = CONTAINER_PREFIX

        self._containers = containers

    @gen.coroutine
    def _groom_project(self):
        """ Runs the grooming process. """
        index = self._coordinator.index
        worker_count = self._coordinator.total_workers

        oldest_valid_tx_time = yield self._fetch_and_clean(index, worker_count)

        # Wait until there's a reasonable chance that some transactions have
        # timed out.
        next_timeout_eta = oldest_valid_tx_time + MAX_TX_DURATION

        # The oldest ignored transaction should still be valid, but ensure that
        # the timeout is not negative.
        next_timeout = max(0, next_timeout_eta - time.time())
        time_to_wait = datetime.timedelta(seconds=next_timeout +
                                          (MAX_TX_DURATION / 2))

        # Allow the wait to be cut short when a project is removed.
        try:
            yield self._stop_event.wait(timeout=time_to_wait)
        except gen.TimeoutError:
            return

    @gen.coroutine
    def _remove_locks(self, txid, tx_path):
        """ Removes entity locks involved with the transaction.

    Args:
      txid: An integer specifying the transaction ID.
      tx_path: A string specifying the location of the transaction node.
    """
        groups_path = '/'.join([tx_path, 'groups'])
        try:
            groups_data = yield self._tornado_zk.get(groups_path)
        except NoNodeError:
            # If the group list does not exist, the locks have not been acquired.
            return

        group_paths = json.loads(groups_data[0])
        for group_path in group_paths:
            try:
                contenders = yield self._tornado_zk.get_children(group_path)
            except NoNodeError:
                # The lock may have been cleaned up or not acquired in the first place.
                continue

            for contender in contenders:
                contender_path = '/'.join([group_path, contender])
                contender_data = yield self._tornado_zk.get(contender_path)
                contender_txid = int(contender_data[0])
                if contender_txid != txid:
                    continue

                yield self._tornado_zk.delete(contender_path)
                break

    @gen.coroutine
    def _remove_path(self, tx_path):
        """ Removes a ZooKeeper node.

    Args:
      tx_path: A string specifying the path to delete.
    """
        try:
            yield self._tornado_zk.delete(tx_path)
        except NoNodeError:
            pass
        except NotEmptyError:
            yield self._thread_pool.submit(self._zk_client.delete,
                                           tx_path,
                                           recursive=True)

    @gen.coroutine
    def _resolve_txid(self, tx_path, composite_indexes):
        """ Cleans up a transaction if it has expired.

    Args:
      tx_path: A string specifying the location of the ZooKeeper node.
      composite_indexes: A list of CompositeIndex objects.
    Returns:
      The transaction start time if still valid, None if invalid because this
      method will also delete it.
    """
        try:
            tx_data = yield self._tornado_zk.get(tx_path)
        except NoNodeError:
            return

        tx_time = float(tx_data[0])

        _, container, tx_node = tx_path.rsplit('/', 2)
        tx_node_id = int(tx_node.lstrip(COUNTER_NODE_PREFIX))
        container_count = int(container[len(CONTAINER_PREFIX):] or 1)
        if tx_node_id < 0:
            yield self._remove_path(tx_path)
            return

        container_size = MAX_SEQUENCE_COUNTER + 1
        automatic_offset = (container_count - 1) * container_size
        txid = self._txid_manual_offset + automatic_offset + tx_node_id

        if txid < 1:
            yield self._remove_path(tx_path)
            return

        # If the transaction is still valid, return the time it was created.
        if tx_time + MAX_TX_DURATION >= time.time():
            raise gen.Return(tx_time)

        yield self._batch_resolver.resolve(txid, composite_indexes)
        yield self._remove_locks(txid, tx_path)
        yield self._remove_path(tx_path)
        yield self._batch_resolver.cleanup(txid)

    @gen.coroutine
    def _fetch_and_clean(self, worker_index, worker_count):
        """ Cleans up expired transactions.

    Args:
      worker_index: An integer specifying this worker's index.
      worker_count: An integer specifying the number of total workers.
    Returns:
      A float specifying the time of the oldest valid transaction as a unix
      timestamp.
    """
        self._txids_cleaned = 0
        self._oldest_valid_tx_time = time.time()

        children = []
        for index, container in enumerate(self._containers):
            container_path = '/'.join([self._project_node, container])
            new_children = yield self._tornado_zk.get_children(container_path)

            if not new_children and index < len(self._containers) - 1:
                self._inactive_containers.add(container)

            children.extend(
                ['/'.join([container_path, node]) for node in new_children])

        logger.debug('Found {} transaction IDs for {}'.format(
            len(children), self.project_id))

        if not children:
            raise gen.Return(self._oldest_valid_tx_time)

        # Refresh these each time so that the indexes are fresh.
        encoded_indexes = yield self._thread_pool.submit(
            self._db_access.get_indices, self.project_id)
        composite_indexes = [
            CompositeIndex(index) for index in encoded_indexes
        ]

        for tx_path in children:
            tx_node_id = int(
                tx_path.split('/')[-1].lstrip(COUNTER_NODE_PREFIX))
            # Only resolve transactions that this worker has been assigned.
            if tx_node_id % worker_count != worker_index:
                continue

            yield self._worker_queue.put((tx_path, composite_indexes))

        yield self._worker_queue.join()

        if self._txids_cleaned > 0:
            logger.info('Cleaned up {} expired txids for {}'.format(
                self._txids_cleaned, self.project_id))

        raise gen.Return(self._oldest_valid_tx_time)
コード例 #28
0
class WebUpdater:
    def __init__(self, umgr, config):
        self.umgr = umgr
        self.server = umgr.server
        self.notify_update_response = umgr.notify_update_response
        self.repo = config.get('repo').strip().strip("/")
        self.name = self.repo.split("/")[-1]
        if hasattr(config, "get_name"):
            self.name = config.get_name().split()[-1]
        self.path = os.path.realpath(os.path.expanduser(config.get("path")))
        self.version = self.remote_version = self.dl_url = "?"
        self.etag = None
        self.init_evt = Event()
        self.refresh_condition = None
        self._get_local_version()
        logging.info(f"\nInitializing Client Updater: '{self.name}',"
                     f"\nversion: {self.version}"
                     f"\npath: {self.path}")

    def _get_local_version(self):
        version_path = os.path.join(self.path, ".version")
        if os.path.isfile(os.path.join(self.path, ".version")):
            with open(version_path, "r") as f:
                v = f.read()
            self.version = v.strip()

    async def check_initialized(self, timeout=None):
        if self.init_evt.is_set():
            return
        if timeout is not None:
            timeout = IOLoop.current().time() + timeout
        await self.init_evt.wait(timeout)

    async def refresh(self):
        if self.refresh_condition is None:
            self.refresh_condition = Condition()
        else:
            self.refresh_condition.wait()
            return
        try:
            self._get_local_version()
            await self._get_remote_version()
        except Exception:
            logging.exception("Error Refreshing Client")
        self.init_evt.set()
        self.refresh_condition.notify_all()
        self.refresh_condition = None

    async def _get_remote_version(self):
        # Remote state
        url = f"https://api.github.com/repos/{self.repo}/releases/latest"
        try:
            result = await self.umgr.github_api_request(url, etag=self.etag)
        except Exception:
            logging.exception(f"Client {self.repo}: Github Request Error")
            result = {}
        if result is None:
            # No change, update not necessary
            return
        self.etag = result.get('etag', None)
        self.remote_version = result.get('name', "?")
        release_assets = result.get('assets', [{}])[0]
        self.dl_url = release_assets.get('browser_download_url', "?")
        logging.info(f"Github client Info Received:\nRepo: {self.name}\n"
                     f"Local Version: {self.version}\n"
                     f"Remote Version: {self.remote_version}\n"
                     f"url: {self.dl_url}")

    async def update(self, *args):
        await self.check_initialized(20.)
        if self.refresh_condition is not None:
            # wait for refresh if in progess
            self.refresh_condition.wait()
        if self.remote_version == "?":
            await self.refresh()
            if self.remote_version == "?":
                raise self.server.error(
                    f"Client {self.repo}: Unable to locate update")
        if self.dl_url == "?":
            raise self.server.error(
                f"Client {self.repo}: Invalid download url")
        if self.version == self.remote_version:
            # Already up to date
            return
        if os.path.isdir(self.path):
            shutil.rmtree(self.path)
        os.mkdir(self.path)
        self.notify_update_response(f"Downloading Client: {self.name}")
        archive = await self.umgr.http_download_request(self.dl_url)
        with zipfile.ZipFile(io.BytesIO(archive)) as zf:
            zf.extractall(self.path)
        self.version = self.remote_version
        version_path = os.path.join(self.path, ".version")
        if not os.path.exists(version_path):
            with open(version_path, "w") as f:
                f.write(self.version)
        self.notify_update_response(f"Client Update Finished: {self.name}",
                                    is_complete=True)

    def get_update_status(self):
        return {
            'name': self.name,
            'version': self.version,
            'remote_version': self.remote_version
        }
コード例 #29
0
class GitUpdater:
    def __init__(self, umgr, name, path, env):
        self.server = umgr.server
        self.execute_cmd = umgr.execute_cmd
        self.execute_cmd_with_response = umgr.execute_cmd_with_response
        self.notify_update_response = umgr.notify_update_response
        self.github_request = umgr.github_request
        self.name = name
        self.repo_path = path
        self.env = env
        self.version = self.cur_hash = self.remote_hash = "?"
        self.init_evt = Event()
        self.is_valid = self.is_dirty = False
        IOLoop.current().spawn_callback(self.refresh)

    def _get_version_info(self):
        ver_path = os.path.join(self.repo_path, "scripts/version.txt")
        vinfo = {}
        if os.path.isfile(ver_path):
            data = ""
            with open(ver_path, 'r') as f:
                data = f.read()
            try:
                entries = [e.strip() for e in data.split('\n') if e.strip()]
                vinfo = dict([i.split('=') for i in entries])
                vinfo = {k: tuple(re.findall(r"\d+", v)) for k, v in
                         vinfo.items()}
            except Exception:
                pass
            else:
                self._log_info(f"Version Info Found: {vinfo}")
        vinfo['version'] = tuple(re.findall(r"\d+", self.version))
        return vinfo

    def _log_exc(self, msg, traceback=True):
        log_msg = f"Repo {self.name}: {msg}"
        if traceback:
            logging.exception(log_msg)
        else:
            logging.info(log_msg)
        return self.server.error(msg)

    def _log_info(self, msg):
        log_msg = f"Repo {self.name}: {msg}"
        logging.info(log_msg)

    def _notify_status(self, msg, is_complete=False):
        log_msg = f"Repo {self.name}: {msg}"
        logging.debug(log_msg)
        self.notify_update_response(log_msg, is_complete)

    async def check_initialized(self, timeout=None):
        if self.init_evt.is_set():
            return
        if timeout is not None:
            to = IOLoop.current().time() + timeout
        await self.init_evt.wait(to)

    async def refresh(self):
        await self._check_local_version()
        await self._check_remote_version()
        self.init_evt.set()

    async def _check_local_version(self):
        self.is_valid = False
        self.cur_hash = "?"
        try:
            branch = await self.execute_cmd_with_response(
                f"git -C {self.repo_path} rev-parse --abbrev-ref HEAD")
            origin = await self.execute_cmd_with_response(
                f"git -C {self.repo_path} remote get-url origin")
            hash = await self.execute_cmd_with_response(
                f"git -C {self.repo_path} rev-parse HEAD")
            repo_version = await self.execute_cmd_with_response(
                f"git -C {self.repo_path} describe --always "
                "--tags --long --dirty")
        except Exception:
            self._log_exc("Error retreiving git info")
            return

        self.is_dirty = repo_version.endswith("dirty")
        tag_version = "?"
        ver_match = re.match(r"v\d+\.\d+\.\d-\d+", repo_version)
        if ver_match:
            tag_version = ver_match.group()
        self.version = tag_version

        if not branch.startswith("fatal:"):
            self.cur_hash = hash
            if branch == "master":
                origin = origin.lower()
                if origin[-4:] != ".git":
                    origin += ".git"
                if origin == REPO_DATA[self.name]['origin']:
                    self.is_valid = True
                    self._log_info("Validity check for git repo passed")
                else:
                    self._log_info(f"Invalid git origin '{origin}'")
            else:
                self._log_info("Git repo not on master branch")
        else:
            self._log_info(f"Invalid git repo at path '{self.repo_path}'")

    async def _check_remote_version(self):
        repo_url = REPO_DATA[self.name]['repo_url']
        try:
            branch_info = await self.github_request(repo_url)
        except Exception:
            raise self._log_exc(f"Error retreiving github info")
        commit_hash = branch_info.get('commit', {}).get('sha', None)
        if commit_hash is None:
            self.is_valid = False
            self.upstream_version = "?"
            raise self._log_exc(f"Invalid github response", False)
        self._log_info(f"Received latest commit hash: {commit_hash}")
        self.remote_hash = commit_hash

    async def update(self, update_deps=False):
        if not self.is_valid:
            raise self._log_exc("Update aborted, repo is not valid", False)
        if self.is_dirty:
            raise self._log_exc(
                "Update aborted, repo is has been modified", False)
        if self.remote_hash == "?":
            await self._check_remote_version()
        if self.remote_hash == self.cur_hash:
            # No need to update
            return
        self._notify_status("Updating Repo...")
        try:
            await self.execute_cmd(f"git -C {self.repo_path} pull -q")
        except Exception:
            raise self._log_exc("Error running 'git pull'")
        # Check Semantic Versions
        vinfo = self._get_version_info()
        cur_version = vinfo.get('version', ())
        update_deps |= cur_version < vinfo.get('deps_version', ())
        need_env_rebuild = cur_version < vinfo.get('env_version', ())
        if update_deps:
            await self._install_packages()
            await self._update_virtualenv(need_env_rebuild)
        elif need_env_rebuild:
            await self._update_virtualenv(True)
        # Refresh local repo state
        await self._check_local_version()
        if self.name == "moonraker":
            # Launch restart async so the request can return
            # before the server restarts
            self._notify_status("Update Finished...",
                                is_complete=True)
            IOLoop.current().call_later(.1, self.restart_service)
        else:
            await self.restart_service()
            self._notify_status("Update Finished...", is_complete=True)

    async def _install_packages(self):
        # Open install file file and read
        inst_script = REPO_DATA[self.name]['install_script']
        inst_path = os.path.join(self.repo_path, inst_script)
        if not os.path.isfile(inst_path):
            self._log_info(f"Unable to open install script: {inst_path}")
            return
        with open(inst_path, 'r') as f:
            data = f.read()
        packages = re.findall(r'PKGLIST="(.*)"', data)
        packages = [p.lstrip("${PKGLIST}").strip() for p in packages]
        if not packages:
            self._log_info(f"No packages found in script: {inst_path}")
            return
        # TODO: Log and notify that packages will be installed
        pkgs = " ".join(packages)
        logging.debug(f"Repo {self.name}: Detected Packages: {pkgs}")
        self._notify_status("Installing system dependencies...")
        # Install packages with apt-get
        try:
            await self.execute_cmd(
                f"{APT_CMD} update", timeout=300., notify=True)
            await self.execute_cmd(
                f"{APT_CMD} install --yes {pkgs}", timeout=3600.,
                notify=True)
        except Exception:
            self._log_exc("Error updating packages via apt-get")
            return

    async def _update_virtualenv(self, rebuild_env=False):
        # Update python dependencies
        bin_dir = os.path.dirname(self.env)
        if rebuild_env:
            env_path = os.path.normpath(os.path.join(bin_dir, ".."))
            env_args = REPO_DATA[self.name]['venv_args']
            self._notify_status(f"Creating virtualenv at: {env_path}...")
            if os.path.exists(env_path):
                shutil.rmtree(env_path)
            try:
                await self.execute_cmd(
                    f"virtualenv {env_args} {env_path}", timeout=300.)
            except Exception:
                self._log_exc(f"Error creating virtualenv")
                return
            if not os.path.expanduser(self.env):
                raise self._log_exc("Failed to create new virtualenv", False)
            dist_pkgs = REPO_DATA[self.name]['dist_packages']
            dist_dir = REPO_DATA[self.name]['dist_dir']
            site_path = REPO_DATA[self.name]['site_pkg_path']
            for pkg in dist_pkgs:
                for f in os.listdir(dist_dir):
                    if f.startswith(pkg):
                        src = os.path.join(dist_dir, f)
                        dest = os.path.join(env_path, site_path, f)
                        self._notify_status(f"Linking to dist package: {pkg}")
                        os.symlink(src, dest)
                        break
        reqs = os.path.join(
            self.repo_path, REPO_DATA[self.name]['requirements'])
        if not os.path.isfile(reqs):
            self._log_exc(f"Invalid path to requirements_file '{reqs}'")
            return
        pip = os.path.join(bin_dir, "pip")
        self._notify_status("Updating python packages...")
        try:
            await self.execute_cmd(
                f"{pip} install -r {reqs}", timeout=1200., notify=True,
                retries=3)
        except Exception:
            self._log_exc("Error updating python requirements")

    async def restart_service(self):
        self._notify_status("Restarting Service...")
        try:
            await self.execute_cmd(f"sudo systemctl restart {self.name}")
        except Exception:
            raise self._log_exc("Error restarting service")

    def get_update_status(self):
        return {
            'version': self.version,
            'current_hash': self.cur_hash,
            'remote_hash': self.remote_hash,
            'is_dirty': self.is_dirty,
            'is_valid': self.is_valid}