Ejemplo n.º 1
0
    def _get_mysql_connection(self, host, port, user, passwd, db=None):
        """
        Returns a ``Deferred`` which fires with a PyMySQL connection when one
        has been created.

        Parameters are passed directly to PyMySQL:
        https://github.com/PyMySQL/PyMySQL

        Raise any exceptions thrown when failing to connect if they indicate
        that MySQL has started.
        """
        def connect_to_mysql():
            try:
                return connect(
                    host=host,
                    port=MYSQL_EXTERNAL_PORT,
                    user=user,
                    passwd=passwd,
                    db=db,
                )
            except OperationalError as e:
                # PyMySQL doesn't provided a structured way to get this.
                # https://github.com/PyMySQL/PyMySQL/issues/274
                if "Connection refused" in str(e):
                    return False
                else:
                    raise

        d = loop_until(connect_to_mysql)
        return d
Ejemplo n.º 2
0
    def wait_for_dataset(self, dataset_properties):
        """
        Poll the dataset state API until the supplied dataset exists.

        :param dict dataset_properties: The attributes of the dataset that
            we're waiting for.
        :returns: A ``Deferred`` which fires with a 2-tuple of ``Cluster`` and
            API response when a dataset with the supplied properties appears in
            the cluster.
        """
        def created():
            """
            Check the dataset state list for the expected dataset.
            """
            request = self.datasets_state()

            def got_body(body):
                # State listing doesn't have metadata or deleted, but does
                # have unpredictable path.
                expected_dataset = dataset_properties.copy()
                del expected_dataset[u"metadata"]
                del expected_dataset[u"deleted"]
                for dataset in body:
                    dataset.pop("path")
                return expected_dataset in body
            request.addCallback(got_body)
            return request

        waiting = loop_until(created)
        waiting.addCallback(lambda ignored: (self, dataset_properties))
        return waiting
Ejemplo n.º 3
0
    def test_kibana_connects_es(self):
        """
        Kibana can connect to Elasticsearch.
        """
        try:
            driver = webdriver.PhantomJS()
            self.addCleanup(driver.quit)
        except WebDriverException:
            raise SkipTest("PhantomJS must be installed.")

        url = "http://{ip}:{port}".format(
            ip=self.node_1,
            port=KIBANA_EXTERNAL_PORT)
        no_connect_error = "Could not contact Elasticsearch"
        success = "No results"

        waiting_for_es = self._get_elasticsearch(self.node_1)

        def wait_for_banner():
            """
            After a short amount of time, a banner will be displayed either
            saying that there are no results, or that Kibana cannot connect
            to Elasticsearch. This test can succeed or fail when this
            banner is shown.
            """
            source = driver.page_source
            if no_connect_error in source:
                self.fail("Kibana cannot connect to Elasticsearch.")
            elif success in source:
                return True

        waiting_for_es.addCallback(lambda _: driver.get(url))
        return waiting_for_es.addCallback(
            lambda _: loop_until(wait_for_banner))
Ejemplo n.º 4
0
    def test_iterates(self, logger):
        """
        If the predicate returns something falsey followed by something truthy,
        then ``loop_until`` returns it immediately.
        """
        result = object()
        results = [None, result]

        def predicate():
            return results.pop(0)
        clock = Clock()

        d = loop_until(predicate, reactor=clock)

        self.assertNoResult(d)

        clock.advance(0.1)
        self.assertEqual(
            self.successResultOf(d),
            result)

        action = LoggedAction.of_type(logger.messages, LOOP_UNTIL_ACTION)[0]
        assertContainsFields(self, action.start_message, {
            'predicate': predicate,
        })
        assertContainsFields(self, action.end_message, {
            'result': result,
        })
        self.assertTrue(action.succeeded)
        message = LoggedMessage.of_type(
            logger.messages, LOOP_UNTIL_ITERATION_MESSAGE)[0]
        self.assertEqual(action.children, [message])
        assertContainsFields(self, message.message, {
            'result': None,
        })
Ejemplo n.º 5
0
def get_mongo_client(host, port=27017):
    """
    Returns a ``Deferred`` which fires with a ``MongoClient`` when one has been
    created.

    See http://api.mongodb.org/python/current/api/pymongo/mongo_client.html#
        pymongo.mongo_client.MongoClient
    for more parameter information.

    :param bytes host: Hostname or IP address of the instance to connect to.
    :param int port: Port number on which to connect.

    The tutorial says "If you get a connection refused error try again after a
    few seconds; the application might take some time to fully start up."
    and so here we wait until the client can be created.
    """
    def create_mongo_client():
        try:
            client = MongoClient(host=host, port=port)
            client.areyoualive.posts.insert({"ping": 1})
            return client
        except PyMongoError:
            return False

    d = loop_until(create_mongo_client)
    return d
Ejemplo n.º 6
0
    def test_kibana_connects_es(self):
        """
        Kibana can connect to Elasticsearch.
        """
        try:
            driver = webdriver.PhantomJS()
            self.addCleanup(driver.quit)
        except WebDriverException:
            raise SkipTest("PhantomJS must be installed.")

        url = "http://{ip}:{port}".format(ip=self.node_1,
                                          port=KIBANA_EXTERNAL_PORT)
        no_connect_error = "Could not contact Elasticsearch"
        success = "No results"

        waiting_for_es = self._get_elasticsearch(self.node_1)

        def wait_for_banner():
            """
            After a short amount of time, a banner will be displayed either
            saying that there are no results, or that Kibana cannot connect
            to Elasticsearch. This test can succeed or fail when this
            banner is shown.
            """
            source = driver.page_source
            if no_connect_error in source:
                self.fail("Kibana cannot connect to Elasticsearch.")
            elif success in source:
                return True

        waiting_for_es.addCallback(lambda _: driver.get(url))
        return waiting_for_es.addCallback(
            lambda _: loop_until(wait_for_banner))
Ejemplo n.º 7
0
def perform_run_remotely(base_dispatcher, intent):
    connection_helper = get_connection_helper(username=intent.username,
                                              address=intent.address,
                                              port=intent.port)

    context = Message.new(username=intent.username,
                          address=intent.address,
                          port=intent.port)

    def connect():
        connection = connection_helper.secureConnection()
        connection.addErrback(lambda _: False)
        return connection

    connection = yield loop_until(connect)

    dispatcher = ComposedDispatcher([
        get_ssh_dispatcher(
            connection=connection,
            context=context,
        ),
        base_dispatcher,
    ])

    yield perform(dispatcher, intent.commands)

    yield connection_helper.cleanupConnection(connection, False)
Ejemplo n.º 8
0
    def wait_for_container(self, container_properties):
        """
        Poll the container state API until a container exists with all the
        supplied ``container_properties``.

        :param dict container_properties: The attributes of the container that
            we're waiting for. All the keys, values and those of nested
            dictionaries must match.
        :returns: A ``Deferred`` which fires with a 2-tuple of ``Cluster`` and
            API response when a container with the supplied properties appears
            in the cluster.
        """
        def created():
            """
            Check the container state list for the expected container
            properties.
            """
            request = self.current_containers()

            def got_response(result):
                cluster, containers = result
                expected_container = container_properties.copy()
                for container in containers:
                    container_items = container.items()
                    if all([
                        item in container_items
                        for item in expected_container.items()
                    ]):
                        # Return cluster and container state
                        return self, container
                return False
            request.addCallback(got_response)
            return request

        return loop_until(created)
Ejemplo n.º 9
0
def perform_run_remotely(base_dispatcher, intent):
    connection_helper = get_connection_helper(
        username=intent.username, address=intent.address, port=intent.port)

    context = Message.new(
        username=intent.username, address=intent.address, port=intent.port)

    def connect():
        connection = connection_helper.secureConnection()
        connection.addErrback(lambda _: False)
        return connection

    connection = yield loop_until(connect)

    dispatcher = ComposedDispatcher([
        get_ssh_dispatcher(
            connection=connection,
            context=context,
        ),
        base_dispatcher,
    ])

    yield perform(dispatcher, intent.commands)

    yield connection_helper.cleanupConnection(
        connection, False)
Ejemplo n.º 10
0
        def wait_for_hits(elasticsearch):
            def get_hits():
                try:
                    num_hits = elasticsearch.search()[u'hits'][u'total']
                except TransportError:
                    return False

                if num_hits == len(expected_messages):
                    return elasticsearch

            waiting_for_hits = loop_until(get_hits)
            return waiting_for_hits
Ejemplo n.º 11
0
        def wait_for_hits(elasticsearch):
            def get_hits():
                try:
                    num_hits = elasticsearch.search()[u'hits'][u'total']
                except TransportError:
                    return False

                if num_hits == len(expected_messages):
                    return elasticsearch

            waiting_for_hits = loop_until(get_hits)
            return waiting_for_hits
Ejemplo n.º 12
0
def get_test_cluster(node_count=0):
    """
    Build a ``Cluster`` instance with at least ``node_count`` nodes.

    :param int node_count: The number of nodes to ensure in the cluster.

    :returns: A ``Deferred`` which fires with a ``Cluster`` instance.
    """
    control_node = environ.get('FLOCKER_ACCEPTANCE_CONTROL_NODE')

    if control_node is None:
        raise SkipTest(
            "Set acceptance testing control node IP address using the " +
            "FLOCKER_ACCEPTANCE_CONTROL_NODE environment variable.")

    agent_nodes_env_var = environ.get('FLOCKER_ACCEPTANCE_AGENT_NODES')

    if agent_nodes_env_var is None:
        raise SkipTest(
            "Set acceptance testing node IP addresses using the " +
            "FLOCKER_ACCEPTANCE_AGENT_NODES environment variable and a " +
            "colon separated list.")

    agent_nodes = filter(None, agent_nodes_env_var.split(':'))

    if len(agent_nodes) < node_count:
        raise SkipTest("This test requires a minimum of {necessary} nodes, "
                       "{existing} node(s) are set.".format(
                           necessary=node_count, existing=len(agent_nodes)))

    cluster = Cluster(
        control_node=ControlService(address=control_node),
        nodes=[]
    )

    # Wait until nodes are up and running:
    def nodes_available():
        d = cluster.current_nodes()
        d.addCallback(lambda (cluster, nodes): len(nodes) >= node_count)
        return d
    agents_connected = loop_until(nodes_available)

    # Extract node hostnames from API that lists nodes. Currently we
    # happen know these in advance, but in FLOC-1631 node identification
    # will switch to UUIDs instead.
    agents_connected.addCallback(lambda _: cluster.current_nodes())
    agents_connected.addCallback(lambda (cluster, nodes): cluster.set(
        "nodes", [Node(uuid=node[u"uuid"],
                       address=node["host"].encode("ascii"))
                  for node in nodes]))
    return agents_connected
Ejemplo n.º 13
0
    def _get_postgres_connection(self, host, user, port, database=None):
        """
        Returns a ``Deferred`` which fires with a pg800 connection when one
        has been created.

        See http://pythonhosted.org//pg8000/dbapi.html#pg8000.connect for
        parameter information.
        """
        def connect_to_postgres():
            try:
                return connect(host=host, user=user, port=port,
                               database=database)
            except (InterfaceError, ProgrammingError):
                return False

        d = loop_until(connect_to_postgres)
        return d
Ejemplo n.º 14
0
    def _get_postgres_connection(self, host, user, port, database=None):
        """
        Returns a ``Deferred`` which fires with a pg800 connection when one
        has been created.

        See http://pythonhosted.org//pg8000/dbapi.html#pg8000.connect for
        parameter information.
        """
        def connect_to_postgres():
            try:
                return connect(host=host, user=user, port=port,
                               database=database)
            except (InterfaceError, ProgrammingError):
                return False

        d = loop_until(connect_to_postgres)
        return d
Ejemplo n.º 15
0
 def mysql_connect(result):
     def mysql_can_connect():
         try:
             return connect(
                 host=host,
                 port=port,
                 user=user,
                 passwd=passwd,
                 db=db,
             )
         except Error as e:
             Message.new(
                 message_type="acceptance:mysql_connect_error",
                 error=str(e)).write(Logger())
             return False
     dl = loop_until(mysql_can_connect)
     return dl
Ejemplo n.º 16
0
    def got_cluster(cluster):
        def got_results(results):
            cluster, existing_containers = results
            expected = []
            for hostname, apps in expected_deployment.items():
                expected += [container_configuration_response(app, hostname)
                             for app in apps]
            for app in expected:
                app[u"running"] = True
            return sorted(existing_containers) == sorted(expected)

        def configuration_matches_state():
            d = cluster.current_containers()
            d.addCallback(got_results)
            return d

        return loop_until(configuration_matches_state)
def wait_for_socket(hostname, port):
    # TODO: upstream this modified version into flocker (it was copied from
    # flocker.acceptance.test_api)
    """
    Wait until remote TCP socket is available.

    :param str hostname: The host where the remote service is running.

    :return Deferred: Fires when socket is available.
    """
    def api_available():
        try:
            s = socket.socket()
            s.connect((hostname, port))
            return True
        except socket.error:
            return False
    return loop_until(api_available)
Ejemplo n.º 18
0
 def mysql_connect(result):
     def mysql_can_connect():
         try:
             return connect(
                 host=host,
                 port=MYSQL_EXTERNAL_PORT,
                 user=user,
                 passwd=passwd,
                 db=db,
             )
         except OperationalError as e:
             # PyMySQL doesn't provided a structured way to
             # get this.
             # https://github.com/PyMySQL/PyMySQL/issues/274
             if "Connection refused" in str(e):
                 return False
             else:
                 raise
     dl = loop_until(mysql_can_connect)
     return dl
Ejemplo n.º 19
0
    def _get_elasticsearch(self, node):
        """
        Get an Elasticsearch instance on a node once one is available.

        :param node: The node hosting, or soon-to-be hosting, an Elasticsearch
            instance.
        :return: A running ``Elasticsearch`` instance.
        """
        elasticsearch = Elasticsearch(
            hosts=[{"host": node, "port": ELASTICSEARCH_EXTERNAL_PORT}],
        )

        def wait_for_ping():
            if elasticsearch.ping():
                return elasticsearch
            else:
                return False

        waiting_for_ping = loop_until(wait_for_ping)
        return waiting_for_ping
Ejemplo n.º 20
0
    def _send_messages_to_logstash(self, node, messages):
        """
        Wait for logstash to start up and then send messages to it using
        Telnet.

        :param node: The node hosting, or soon-to-be hosting, a logstash
            instance.
        :param set expected_messages: A set of strings to send to logstash.
        """
        def get_telnet_connection_to_logstash():
            try:
                return Telnet(host=node, port=LOGSTASH_EXTERNAL_PORT)
            except error:
                return False

        waiting_for_logstash = loop_until(get_telnet_connection_to_logstash)

        def send_messages(telnet):
            for message in messages:
                telnet.write(message + "\n")

        return waiting_for_logstash.addCallback(send_messages)
Ejemplo n.º 21
0
    def _send_messages_to_logstash(self, node, messages):
        """
        Wait for logstash to start up and then send messages to it using
        Telnet.

        :param node: The node hosting, or soon-to-be hosting, a logstash
            instance.
        :param set expected_messages: A set of strings to send to logstash.
        """
        def get_telnet_connection_to_logstash():
            try:
                return Telnet(host=node, port=LOGSTASH_EXTERNAL_PORT)
            except error:
                return False

        waiting_for_logstash = loop_until(get_telnet_connection_to_logstash)

        def send_messages(telnet):
            for message in messages:
                telnet.write(message + "\n")

        return waiting_for_logstash.addCallback(send_messages)
Ejemplo n.º 22
0
    def _get_elasticsearch(self, node):
        """
        Get an Elasticsearch instance on a node once one is available.

        :param node: The node hosting, or soon-to-be hosting, an Elasticsearch
            instance.
        :return: A running ``Elasticsearch`` instance.
        """
        elasticsearch = Elasticsearch(hosts=[{
            "host":
            node,
            "port":
            ELASTICSEARCH_EXTERNAL_PORT
        }], )

        def wait_for_ping():
            if elasticsearch.ping():
                return elasticsearch
            else:
                return False

        waiting_for_ping = loop_until(wait_for_ping)
        return waiting_for_ping
Ejemplo n.º 23
0
    def got_cluster(cluster):
        ip_to_uuid = {node.address: node.uuid for node in cluster.nodes}
        uuid_to_ip = {node.uuid: node.address for node in cluster.nodes}

        def got_results(results):
            cluster, existing_containers = results
            expected = []
            for hostname, apps in expected_deployment.items():
                node_uuid = ip_to_uuid[hostname]
                expected += [container_configuration_response(app, node_uuid)
                             for app in apps]
            for app in expected:
                app[u"running"] = True
                app[u"host"] = uuid_to_ip[app["node_uuid"]]

            return sorted(existing_containers) == sorted(expected)

        def configuration_matches_state():
            d = cluster.current_containers()
            d.addCallback(got_results)
            return d

        return loop_until(configuration_matches_state)
Ejemplo n.º 24
0
def get_mongo_client(host, port=27017):
    """
    Returns a ``Deferred`` which fires with a ``MongoClient`` when one has been
    created.

    See http://api.mongodb.org/python/current/api/pymongo/mongo_client.html#
        pymongo.mongo_client.MongoClient
    for more parameter information.

    :param bytes host: Hostname or IP address of the instance to connect to.
    :param int port: Port number on which to connect.

    The tutorial says "If you get a connection refused error try again after a
    few seconds; the application might take some time to fully start up."
    and so here we wait until the client can be created.
    """
    def create_mongo_client():
        try:
            return MongoClient(host=host, port=port)
        except ConnectionFailure:
            return False

    d = loop_until(create_mongo_client)
    return d
Ejemplo n.º 25
0
    def test_immediate_success(self, logger):
        """
        If the predicate returns something truthy immediately, then
        ``loop_until`` returns a deferred that has already fired with that
        value.
        """
        result = object()

        def predicate():
            return result
        clock = Clock()
        d = loop_until(predicate, reactor=clock)
        self.assertEqual(
            self.successResultOf(d),
            result)

        action = LoggedAction.of_type(logger.messages, LOOP_UNTIL_ACTION)[0]
        assertContainsFields(self, action.start_message, {
            'predicate': predicate,
        })
        assertContainsFields(self, action.end_message, {
            'action_status': 'succeeded',
            'result': result,
        })
Ejemplo n.º 26
0
def get_nodes(test_case, num_nodes):
    """
    Create or get ``num_nodes`` nodes with no Docker containers on them.

    This is an alternative to
    http://doc-dev.clusterhq.com/gettingstarted/tutorial/
    vagrant-setup.html#creating-vagrant-vms-needed-for-flocker

    XXX This pretends to be asynchronous because num_nodes Docker containers
    will be created instead to replace this in some circumstances, see
    https://clusterhq.atlassian.net/browse/FLOC-900

    :param test_case: The ``TestCase`` running this unit test.
    :param int num_nodes: The number of nodes to start up.

    :return: A ``Deferred`` which fires with a set of IP addresses.
    """

    nodes_env_var = environ.get("FLOCKER_ACCEPTANCE_NODES")

    if nodes_env_var is None:
        raise SkipTest(
            "Set acceptance testing node IP addresses using the " +
            "FLOCKER_ACCEPTANCE_NODES environment variable and a colon " +
            "separated list.")

    # Remove any empty strings, for example if the list has ended with a colon
    nodes = filter(None, nodes_env_var.split(':'))

    if len(nodes) < num_nodes:
        raise SkipTest("This test requires a minimum of {necessary} nodes, "
                       "{existing} node(s) are set.".format(
                           necessary=num_nodes, existing=len(nodes)))

    reachable_nodes = set()

    for node in nodes:
        sock = socket()
        try:
            can_connect = not sock.connect_ex((node, 22))
        except gaierror:
            can_connect = False
        finally:
            if can_connect:
                reachable_nodes.add(node)
            sock.close()

    if len(reachable_nodes) < num_nodes:
        unreachable_nodes = set(nodes) - reachable_nodes
        test_case.fail(
            "At least {min} node(s) must be running and reachable on port 22. "
            "The following node(s) are reachable: {reachable}. "
            "The following node(s) are not reachable: {unreachable}.".format(
                min=num_nodes,
                reachable=", ".join(str(node) for node in reachable_nodes),
                unreachable=", ".join(str(node) for node in unreachable_nodes),
            )
        )

    # Only return the desired number of nodes
    reachable_nodes = set(sorted(reachable_nodes)[:num_nodes])

    # Remove all existing containers; we make sure to pass in node
    # hostnames since we still rely on flocker-deploy to distribute SSH
    # keys for now.
    clean_deploy = {u"version": 1,
                    u"nodes": {node: [] for node in reachable_nodes}}
    clean_applications = {u"version": 1,
                          u"applications": {}}
    flocker_deploy(test_case, clean_deploy, clean_applications)
    getting = get_test_cluster()

    def no_containers(cluster):
        d = cluster.current_containers()
        d.addCallback(lambda result: len(result[1]) == 0)
        return d
    getting.addCallback(lambda cluster:
                        loop_until(lambda: no_containers(cluster)))

    def clean_zfs(_):
        for node in reachable_nodes:
            _clean_node(test_case, node)
    getting.addCallback(clean_zfs)
    getting.addCallback(lambda _: reachable_nodes)
    return getting
def wait_for_plugin(hostname):
    """
    Wait until a non-zero number of plugins are loaded.
    """
    return loop_until(lambda:
            "flocker.sock" in shell(hostname, "ls -alh %s" % (PLUGIN_DIR,)))