Exemplo n.º 1
0
    def test_generic_callback_ok(self):
        from kazoo.client import _generic_callback
        from kazoo.handlers.threading import SequentialThreadingHandler
        handler = SequentialThreadingHandler()

        # No args
        asy = handler.async_result()
        _generic_callback(asy, 0, zookeeper.OK)
        eq_(asy.get(), None)

        # One arg thats not a dict
        asy = handler.async_result()
        _generic_callback(asy, 0, zookeeper.OK, 12)
        eq_(asy.get(), 12)

        # One arg thats a node struct
        asy = handler.async_result()
        _generic_callback(asy, 0, zookeeper.OK, dummy_dict)
        eq_(asy.get().acl_version, 1)

        # two args, second is struct
        asy = handler.async_result()
        _generic_callback(asy, 0, zookeeper.OK, 11, dummy_dict)
        val = asy.get()
        eq_(val[1].acl_version, 1)
        eq_(val[0], 11)
Exemplo n.º 2
0
    def test_generic_callback_error(self):
        from kazoo.client import _generic_callback
        from kazoo.handlers.threading import SequentialThreadingHandler
        handler = SequentialThreadingHandler()

        asy = handler.async_result()
        _generic_callback(asy, 0, zookeeper.CONNECTIONLOSS)
        self.assertRaises(zookeeper.ConnectionLossException, asy.get)
Exemplo n.º 3
0
    def test_exists_callback(self):
        from kazoo.client import _exists_callback
        from kazoo.handlers.threading import SequentialThreadingHandler
        handler = SequentialThreadingHandler()
        asy = handler.async_result()
        _exists_callback(asy, 0, zookeeper.OK, True)
        eq_(asy.get(), True)

        asy = handler.async_result()
        _exists_callback(asy, 0, zookeeper.CONNECTIONLOSS, False)
        self.assertRaises(zookeeper.ConnectionLossException, asy.get)
Exemplo n.º 4
0
    def setUp(self):
        self._driver = FakeDriver()
        self._storage = FakeStorage(SequentialThreadingHandler())
        self._zk_client = FakeClient(storage=self._storage)
        self._zk_client.start()

        self._framework_id = mesos_pb2.FrameworkID()
        self._framework_id.value = "framework_id_0"

        self._offer = mesos_pb2.Offer()
        self._offer.id.value = "offer_id_0"
        self._offer.framework_id.value = self._framework_id.value
        self._offer.slave_id.value = "slave_id_0"
        self._offer.hostname = "localhost"

        resources = create_resources(cpus=4,
                                     mem=512 * 3,
                                     ports=set([10000, 10001, 10002]))
        self._offer.resources.extend(resources)

        self._framework_user = "******"

        self._zk_url = "zk://host/mysos/test"
        self._cluster = MySQLCluster("cluster0", "user", "pass", 3)

        self._tmpdir = tempfile.mkdtemp()
        self._state_provider = LocalStateProvider(self._tmpdir)

        framework_info = mesos_pb2.FrameworkInfo(user=getpass.getuser(),
                                                 name="mysos",
                                                 checkpoint=False)
        self._state = Scheduler(framework_info)
Exemplo n.º 5
0
    def setup(self, request):
        self._driver = FakeDriver()
        self._storage = FakeStorage(SequentialThreadingHandler())
        self._zk_client = FakeClient(storage=self._storage)
        self._zk_client.start()

        self._offer = mesos_pb2.Offer()
        self._offer.id.value = "offer_id_0"
        self._offer.framework_id.value = "framework_id_0"
        self._offer.slave_id.value = "slave_id_0"
        self._offer.hostname = "localhost"

        # Enough memory and ports to fit three tasks.
        resources = create_resources(cpus=4,
                                     mem=512 * 3,
                                     ports=set([10000, 10001, 10002]))
        self._offer.resources.extend(resources)

        self._framework_user = "******"

        # Some tests use the default launcher; some don't.
        self._zk_url = "zk://host/mysos/test"
        self._cluster = MySQLCluster("cluster0", "user", "pass", 3)

        # Construct the state provider based on the test parameter.
        if request.param == LocalStateProvider:
            tmpdir = tempfile.mkdtemp()
            self._state_provider = LocalStateProvider(tmpdir)
            request.addfinalizer(lambda: shutil.rmtree(tmpdir, True)
                                 )  # Clean up after ourselves.
        elif request.param == ZooKeeperStateProvider:
            self._state_provider = ZooKeeperStateProvider(
                self._zk_client, "/mysos/test")

        self._launcher = MySQLClusterLauncher(
            self._driver,
            self._cluster,
            self._state_provider,
            self._zk_url,
            self._zk_client,
            self._framework_user,
            "./executor.pex",
            "cmd.sh",
            Amount(5, Time.SECONDS),
            "/etc/mysos/admin_keyfile.yml",
            query_interval=Amount(150, Time.MILLISECONDS))  # Short interval.

        self._elected = threading.Event()
        self._launchers = [self._launcher]  # See teardown().

        request.addfinalizer(self.teardown)
Exemplo n.º 6
0
def test_scheduler_runs():
    """
    Verifies that the scheduler successfully launches 3 "no-op" MySQL tasks.
    NOTE: Due to the limitation of zake the scheduler's ZK operations are not propagated to
    executors in separate processes but they are unit-tested separately.
  """
    import mesos.native

    # Make sure fake_mysos_executor.pex is available to be fetched by Mesos slave.
    assert os.path.isfile('dist/fake_mysos_executor.pex')

    storage = FakeStorage(SequentialThreadingHandler())
    zk_client = FakeClient(storage=storage)
    zk_client.start()

    zk_url = "zk://fake_host/home/mysos/clusters"
    cluster_name = "test_cluster"
    num_nodes = 3

    state_provider = LocalStateProvider(safe_mkdtemp())

    framework_info = FrameworkInfo(user=getpass.getuser(),
                                   name="mysos",
                                   checkpoint=False)

    state = Scheduler(framework_info)

    scheduler = MysosScheduler(state, state_provider, getpass.getuser(),
                               os.path.abspath("dist/fake_mysos_executor.pex"),
                               "./fake_mysos_executor.pex", zk_client, zk_url,
                               Amount(40, Time.SECONDS), "/fakepath",
                               gen_encryption_key())

    scheduler_driver = mesos.native.MesosSchedulerDriver(
        scheduler, framework_info, "local")
    scheduler_driver.start()

    # Wait until the scheduler is connected and becomes available.
    assert scheduler.connected.wait(30)

    scheduler.create_cluster(cluster_name, "mysql_user", num_nodes)

    # A slave is promoted to be the master.
    deadline(
        lambda: wait_for_master(
            get_cluster_path(posixpath.join(zk_url, 'discover'), cluster_name),
            zk_client), Amount(40, Time.SECONDS))

    assert scheduler_driver.stop() == DRIVER_STOPPED
Exemplo n.º 7
0
def zk_conn():
    conn_retry_policy = KazooRetry(max_tries=-1,
                                   delay=0.1,
                                   backoff=2,
                                   max_delay=3600)
    handler = SequentialThreadingHandler()
    conn = KazooClient(hosts="127.0.0.1:2181",
                       timeout=60,
                       handler=handler,
                       connection_retry=conn_retry_policy,
                       command_retry=conn_retry_policy)

    conn.start()
    yield conn
    conn.stop()
Exemplo n.º 8
0
    def __init__(self, port):
        """
        Read config and spawn child processes.
        :type port: int
        """
        self._log = logging.getLogger('sent.daemon')
        self._log.info('Creating Sentinel')

        self._port = port
        self.children = dict()
        self._settings = None
        self._system = get_system()
        self._hostname = socket.getfqdn()
        self._prev_state = None
        self.listener_lock = Lock()
        self.version = get_version()
        self.task_client = None

        self.zkclient = KazooClient(hosts=get_zk_conn_string(),
                                    timeout=60.0,
                                    handler=SequentialThreadingHandler(),
                                    logger=logging.getLogger('kazoo.daemon'))

        self.zkclient.add_listener(self._zk_listener)
        # this will run self._reset_after_connection_loss
        self.zkclient.start()
        while not self._settings:
            self._log.info('Waiting for settings.')
            time.sleep(1)

        self._tmp_dir = os.path.join(
            self._settings.get('zookeeper').get('temp_directory', '/'), 'ruok')

        self.task_client = ZKTaskClient(
            self.children, self.zkclient,
            self._settings.get('zookeeper', {}).get('task'))

        self._rest_server = tornado.httpserver.HTTPServer(
            RestServer(self.children, self.version, self._tmp_dir,
                       self._hostname, self.zkclient))

        signal.signal(signal.SIGINT, self._handle_sigint)
        signal.signal(signal.SIGTERM, self._handle_sigint)
        self._log.info('Created Sentinel')
Exemplo n.º 9
0
 def _makeOne(self, *args):
     from kazoo.handlers.threading import SequentialThreadingHandler
     return SequentialThreadingHandler(*args)
Exemplo n.º 10
0
 def _makeHandler(self):
     from kazoo.handlers.threading import SequentialThreadingHandler
     return SequentialThreadingHandler()
Exemplo n.º 11
0
    def __init__(self,
                 hosts='127.0.0.1:2181',
                 timeout=10.0,
                 client_id=None,
                 handler=None,
                 default_acl=None,
                 auth_data=None,
                 read_only=None,
                 randomize_hosts=True,
                 connection_retry=None,
                 command_retry=None,
                 logger=None,
                 **kwargs):
        """Create a :class:`KazooClient` instance. All time arguments
        are in seconds.

        :param hosts: Comma-separated list of hosts to connect to
                      (e.g. 127.0.0.1:2181,127.0.0.1:2182,[::1]:2183).
        :param timeout: The longest to wait for a Zookeeper connection.
        :param client_id: A Zookeeper client id, used when
                          re-establishing a prior session connection.
        :param handler: An instance of a class implementing the
                        :class:`~kazoo.interfaces.IHandler` interface
                        for callback handling.
        :param default_acl: A default ACL used on node creation.
        :param auth_data:
            A list of authentication credentials to use for the
            connection. Should be a list of (scheme, credential)
            tuples as :meth:`add_auth` takes.
        :param read_only: Allow connections to read only servers.
        :param randomize_hosts: By default randomize host selection.
        :param connection_retry:
            A :class:`kazoo.retry.KazooRetry` object to use for
            retrying the connection to Zookeeper. Also can be a dict of
            options which will be used for creating one.
        :param command_retry:
            A :class:`kazoo.retry.KazooRetry` object to use for
            the :meth:`KazooClient.retry` method. Also can be a dict of
            options which will be used for creating one.
        :param logger: A custom logger to use instead of the module
            global `log` instance.

        Basic Example:

        .. code-block:: python

            zk = KazooClient()
            zk.start()
            children = zk.get_children('/')
            zk.stop()

        As a convenience all recipe classes are available as attributes
        and get automatically bound to the client. For example::

            zk = KazooClient()
            zk.start()
            lock = zk.Lock('/lock_path')

        .. versionadded:: 0.6
            The read_only option. Requires Zookeeper 3.4+

        .. versionadded:: 0.6
            The retry_max_delay option.

        .. versionadded:: 0.6
            The randomize_hosts option.

        .. versionchanged:: 0.8
            Removed the unused watcher argument (was second argument).

        .. versionadded:: 1.2
            The connection_retry, command_retry and logger options.

        """
        self.logger = logger or log

        # Record the handler strategy used
        self.handler = handler if handler else SequentialThreadingHandler()
        if inspect.isclass(self.handler):
            raise ConfigurationError("Handler must be an instance of a class, "
                                     "not the class: %s" % self.handler)

        self.auth_data = auth_data if auth_data else set([])
        self.default_acl = default_acl
        self.randomize_hosts = randomize_hosts
        self.hosts = None
        self.chroot = None
        self.set_hosts(hosts)

        # Curator like simplified state tracking, and listeners for
        # state transitions
        self._state = KeeperState.CLOSED
        self.state = KazooState.LOST
        self.state_listeners = set()

        self._reset()
        self.read_only = read_only

        if client_id:
            self._session_id = client_id[0]
            self._session_passwd = client_id[1]
        else:
            self._reset_session()

        # ZK uses milliseconds
        self._session_timeout = int(timeout * 1000)

        # We use events like twitter's client to track current and
        # desired state (connected, and whether to shutdown)
        self._live = self.handler.event_object()
        self._writer_stopped = self.handler.event_object()
        self._stopped = self.handler.event_object()
        self._stopped.set()
        self._writer_stopped.set()

        self.retry = self._conn_retry = None

        if type(connection_retry) is dict:
            self._conn_retry = KazooRetry(**connection_retry)
        elif type(connection_retry) is KazooRetry:
            self._conn_retry = connection_retry

        if type(command_retry) is dict:
            self.retry = KazooRetry(**command_retry)
        elif type(command_retry) is KazooRetry:
            self.retry = command_retry

        if type(self._conn_retry) is KazooRetry:
            if self.handler.sleep_func != self._conn_retry.sleep_func:
                raise ConfigurationError("Retry handler and event handler "
                                         " must use the same sleep func")

        if type(self.retry) is KazooRetry:
            if self.handler.sleep_func != self.retry.sleep_func:
                raise ConfigurationError(
                    "Command retry handler and event "
                    "handler must use the same sleep func")

        if self.retry is None or self._conn_retry is None:
            old_retry_keys = dict(_RETRY_COMPAT_DEFAULTS)
            for key in old_retry_keys:
                try:
                    old_retry_keys[key] = kwargs.pop(key)
                    warnings.warn(
                        'Passing retry configuration param %s to the'
                        ' client directly is deprecated, please pass a'
                        ' configured retry object (using param %s)' %
                        (key, _RETRY_COMPAT_MAPPING[key]),
                        DeprecationWarning,
                        stacklevel=2)
                except KeyError:
                    pass

            retry_keys = {}
            for oldname, value in old_retry_keys.items():
                retry_keys[_RETRY_COMPAT_MAPPING[oldname]] = value

            if self._conn_retry is None:
                self._conn_retry = KazooRetry(
                    sleep_func=self.handler.sleep_func, **retry_keys)
            if self.retry is None:
                self.retry = KazooRetry(sleep_func=self.handler.sleep_func,
                                        **retry_keys)

        self._conn_retry.interrupt = lambda: self._stopped.is_set()
        self._connection = ConnectionHandler(self,
                                             self._conn_retry.copy(),
                                             logger=self.logger)

        # Every retry call should have its own copy of the retry helper
        # to avoid shared retry counts
        self._retry = self.retry

        def _retry(*args, **kwargs):
            return self._retry.copy()(*args, **kwargs)

        self.retry = _retry

        self.Barrier = partial(Barrier, self)
        self.Counter = partial(Counter, self)
        self.DoubleBarrier = partial(DoubleBarrier, self)
        self.ChildrenWatch = partial(ChildrenWatch, self)
        self.DataWatch = partial(DataWatch, self)
        self.Election = partial(Election, self)
        self.Lock = partial(Lock, self)
        self.Party = partial(Party, self)
        self.Queue = partial(Queue, self)
        self.LockingQueue = partial(LockingQueue, self)
        self.SetPartitioner = partial(SetPartitioner, self)
        self.Semaphore = partial(Semaphore, self)
        self.ShallowParty = partial(ShallowParty, self)

        # If we got any unhandled keywords, complain like python would
        if kwargs:
            raise TypeError('__init__() got unexpected keyword arguments: %s' %
                            (kwargs.keys(), ))
Exemplo n.º 12
0
 def setUp(self):
   self._storage = FakeStorage(SequentialThreadingHandler())
   self._client = FakeClient(storage=self._storage)
   self._client.start()
   self._state_provider = ZooKeeperStateProvider(self._client, '/mysos')
Exemplo n.º 13
0
# cluster for the client to utilize it.
zk = KazooClient(hosts='127.0.0.1:2181')
zk.start()
print("Client started!\n")

# Clear the nodes at the end or else you migth get a NodeExistsError next time
zk.delete("/app", recursive=True)

# Ensure a path, create if it doesn't exist
zk.ensure_path("/app")

# Initialize queue at specified location
zk_queue = LockingQueue(zk, "/app/queue")

# Thread handler
handler = SequentialThreadingHandler()


# Watchers ---------------------------------------------------------------
@zk.DataWatch("/app/queue")
def watch_queue(data, stat):
    if data == "":
        pass
    elif data is not None or stat is not None:
        global input_done
        # print("Queue Node:")
        # print("%s\n" % data)

        # Distrubute incoming data to nodes
        if input_done:  # If the user is done with entering inputs
            zk.set("/app/addition", data)
Exemplo n.º 14
0
    def __init__(self, config, settings, queue, system, application_type,
                 cancel_flag):
        """
        :type config: dict (xml)
        :type settings: dict
        :type queue: zoom.agent.entities.unique_queue.UniqueQueue
        :type system: zoom.common.types.PlatformType
        :type application_type: zoom.common.types.ApplicationType
        :type cancel_flag: zoom.agent.entities.thread_safe_object.ThreadSafeObject
        """
        self.config = config
        self._settings = settings
        self.name = verify_attribute(self.config, 'id', none_allowed=False)
        self._log = logging.getLogger('sent.{0}.app'.format(self.name))
        # informational attributes
        self._host = socket.getfqdn()
        self._system = system
        self._predicates = list()
        self._running = True  # used to manually stop the run loop
        self._prev_state = None
        self._actions = dict()  # created in _reset_watches on zk connect
        self._env = os.environ.get('EnvironmentToUse', 'Staging')
        self._apptype = application_type
        self._restart_on_crash = \
            verify_attribute(self.config, 'restart_on_crash', none_allowed=True)
        self._post_stop_sleep = verify_attribute(self.config,
                                                 'post_stop_sleep',
                                                 none_allowed=True,
                                                 cast=int,
                                                 default=5)

        # tool-like attributes
        self.listener_lock = Lock()
        self._action_queue = queue
        self._mode = ApplicationMode(
            ApplicationMode.MANUAL,
            callback=self._update_agent_node_with_app_details)
        self._state = ThreadSafeObject(
            ApplicationState.OK,
            callback=self._update_agent_node_with_app_details)
        self._start_stop_time = ''  # Default to empty string for comparison
        self._login_user = '******'  # Default to Zoom
        self._user_set_in_react = False
        self._run_check_mode = False
        self._pd_svc_key = verify_attribute(config,
                                            'pagerduty_service',
                                            none_allowed=True)

        restartmax = verify_attribute(config,
                                      'restartmax',
                                      none_allowed=True,
                                      cast=int,
                                      default=3)
        self._rl = RestartLogic(
            self.name,
            restartmax,
            count_callback=self._update_agent_node_with_app_details)

        self._read_only = False

        self._paths = self._init_paths(self.config, settings, application_type)

        # clients
        self.zkclient = KazooClient(hosts=get_zk_conn_string(),
                                    timeout=60.0,
                                    handler=SequentialThreadingHandler(),
                                    logger=logging.getLogger(
                                        'kazoo.app.{0}'.format(self.name)))

        self.zkclient.add_listener(self._zk_listener)
        self._proc_client = self._init_proc_client(self.config,
                                                   application_type,
                                                   cancel_flag)

        self._actions = self._init_actions(settings)
        self._work_manager = self._init_work_manager(self._action_queue)
Exemplo n.º 15
0
    def __init__(self, hosts='127.0.0.1:2181',
                 timeout=10.0, client_id=None, max_retries=None,
                 retry_delay=0.1, retry_backoff=2, retry_jitter=0.8,
                 retry_max_delay=3600, handler=None, default_acl=None,
                 auth_data=None, read_only=None, randomize_hosts=True):
        """Create a :class:`KazooClient` instance. All time arguments
        are in seconds.

        :param hosts: Comma-separated list of hosts to connect to
                      (e.g. 127.0.0.1:2181,127.0.0.1:2182).
        :param timeout: The longest to wait for a Zookeeper connection.
        :param client_id: A Zookeeper client id, used when
                          re-establishing a prior session connection.
        :param max_retries: Maximum retries when using the
                            :meth:`KazooClient.retry` method.
        :param retry_delay: Initial delay when retrying a call.
        :param retry_backoff:
            Backoff multiplier between retry attempts. Defaults to 2
            for exponential back-off.
        :param retry_jitter:
            How much jitter delay to introduce per call. An amount of
            time up to this will be added per retry call to avoid
            hammering the server.
        :param retry_max_delay:
            Maximum delay in seconds, regardless of other backoff
            settings. Defaults to one hour.
        :param handler: An instance of a class implementing the
                        :class:`~kazoo.interfaces.IHandler` interface
                        for callback handling.
        :param default_acl: A default ACL used on node creation.
        :param auth_data:
            A list of authentication credentials to use for the
            connection. Should be a list of (scheme, credential)
            tuples as :meth:`add_auth` takes.
        :param read_only: Allow connections to read only servers.
        :param randomize_hosts: By default randomize host selection.

        Retry parameters will be used for connection establishment
        attempts and reconnects.

        Basic Example:

        .. code-block:: python

            zk = KazooClient()
            zk.start()
            children = zk.get_children('/')
            zk.stop()

        As a convenience all recipe classes are available as attributes
        and get automatically bound to the client. For example::

            zk = KazooClient()
            zk.start()
            lock = zk.Lock('/lock_path')

        .. versionadded:: 0.6
            The read_only option. Requires Zookeeper 3.4+

        .. versionadded:: 0.6
            The retry_max_delay option.

        .. versionadded:: 0.6
            The randomize_hosts option.

        .. versionchanged:: 0.8
            Removed the unused watcher argument (was second argument).

        """
        self.log_debug = logging.DEBUG >= log.getEffectiveLevel()

        # Record the handler strategy used
        self.handler = handler if handler else SequentialThreadingHandler()
        if inspect.isclass(self.handler):
            raise ConfigurationError("Handler must be an instance of a class, "
                                     "not the class: %s" % self.handler)

        self.auth_data = auth_data if auth_data else set([])
        self.default_acl = default_acl
        self.randomize_hosts = randomize_hosts
        self.hosts, chroot = collect_hosts(hosts, randomize_hosts)
        if chroot:
            self.chroot = normpath(chroot)
        else:
            self.chroot = ''

        # Curator like simplified state tracking, and listeners for
        # state transitions
        self._state = KeeperState.CLOSED
        self.state = KazooState.LOST
        self.state_listeners = set()

        self._reset()
        self.read_only = read_only

        if client_id:
            self._session_id = client_id[0]
            self._session_passwd = client_id[1]
        else:
            self._reset_session()

        # ZK uses milliseconds
        self._session_timeout = int(timeout * 1000)

        # We use events like twitter's client to track current and
        # desired state (connected, and whether to shutdown)
        self._live = self.handler.event_object()
        self._writer_stopped = self.handler.event_object()
        self._stopped = self.handler.event_object()
        self._stopped.set()
        self._writer_stopped.set()

        self.retry = KazooRetry(
            max_tries=max_retries,
            delay=retry_delay,
            backoff=retry_backoff,
            max_jitter=retry_jitter,
            max_delay=retry_max_delay,
            sleep_func=self.handler.sleep_func
        )
        self.retry_sleeper = self.retry.retry_sleeper.copy()

        self._connection = ConnectionHandler(
            self, self.retry.retry_sleeper.copy(), log_debug=self.log_debug)

        # convenience API
        from kazoo.recipe.barrier import Barrier
        from kazoo.recipe.barrier import DoubleBarrier
        from kazoo.recipe.counter import Counter
        from kazoo.recipe.election import Election
        from kazoo.recipe.lock import Lock
        from kazoo.recipe.lock import Semaphore
        from kazoo.recipe.partitioner import SetPartitioner
        from kazoo.recipe.party import Party
        from kazoo.recipe.party import ShallowParty
        from kazoo.recipe.queue import Queue
        from kazoo.recipe.queue import LockingQueue
        from kazoo.recipe.watchers import ChildrenWatch
        from kazoo.recipe.watchers import DataWatch

        self.Barrier = partial(Barrier, self)
        self.Counter = partial(Counter, self)
        self.DoubleBarrier = partial(DoubleBarrier, self)
        self.ChildrenWatch = partial(ChildrenWatch, self)
        self.DataWatch = partial(DataWatch, self)
        self.Election = partial(Election, self)
        self.Lock = partial(Lock, self)
        self.Party = partial(Party, self)
        self.Queue = partial(Queue, self)
        self.LockingQueue = partial(LockingQueue, self)
        self.SetPartitioner = partial(SetPartitioner, self)
        self.Semaphore = partial(Semaphore, self)
        self.ShallowParty = partial(ShallowParty, self)
 def setUp(self):
   self._storage = FakeStorage(SequentialThreadingHandler())
   self._client = FakeClient(storage=self._storage)
   self._client.start()
   self._self_instance = ServiceInstance(Endpoint("host", 10000))
   self._state_manager = FakeStateManager()
Exemplo n.º 17
0
 def setUp(self):
     self.storage = FakeStorage(SequentialThreadingHandler())
     self.client = FakeClient(storage=self.storage)
     self.client.start()