def rebalance_out(self, cluster_servers, server_to_remove):
        """
        Issues a call to the admin_serve to remove a server from a pool.
        Then wait for rebalance to complete.
        """
        if not isinstance(server_to_remove, CouchbaseServer):
            raise TypeError("'server_to_remove' must be a 'CouchbaseServer'")

        # Add all servers except server_to_add to known nodes
        known_nodes = "knownNodes="
        for server in cluster_servers:
            server = server.replace("http://", "")
            server = server.replace(":8091", "")
            known_nodes += "ns_1@{},".format(server)

        # Add server_to_add to known nodes
        ejected_node = "ejectedNodes=ns_1@{}".format(server_to_remove.host)
        data = "{}&{}".format(ejected_node, known_nodes)

        log_info("Starting rebalance out: {} with nodes {}".format(server_to_remove.host, data))
        # Override session headers for this one off request
        resp = self._session.post(
            "{}/controller/rebalance".format(self.url),
            headers={"Content-Type": "application/x-www-form-urlencoded"},
            data=data
        )
        log_r(resp)
        resp.raise_for_status()

        self._wait_for_rebalance_complete()

        return True
    def _create_internal_rbac_bucket_user(self, bucketname):
        # Create user with username=bucketname and assign role
        # bucket_admin and cluster_admin
        roles = "ro_admin,bucket_full_access[{}]".format(bucketname)
        password = '******'

        data_user_params = {
            "name": bucketname,
            "roles": roles,
            "password": password
        }

        log_info("Creating RBAC user {} with password {} and roles {}".format(
            bucketname, password, roles))

        rbac_url = "{}/settings/rbac/users/local/{}".format(
            self.url, bucketname)

        resp = None
        try:
            resp = self._session.put(rbac_url,
                                     data=data_user_params,
                                     auth=('Administrator', 'password'))
            log_r(resp)
            resp.raise_for_status()
        except HTTPError as h:
            log_info("resp code: {}; error: {}".format(resp, h))
            raise RBACUserCreationError(h)
    def delete_buckets(self):
        count = 0
        while count < 3:
            resp = self._session.get("{}/pools/default/buckets".format(self.url))
            log_r(resp)
            resp.raise_for_status()

            obj = json.loads(resp.text)

            existing_bucket_names = []
            for entry in obj:
                existing_bucket_names.append(entry["name"])

            log_info("Existing buckets: {}".format(existing_bucket_names))
            log_info("Deleting buckets: {}".format(existing_bucket_names))

            # HACK around Couchbase Server issue where issuing a bucket delete via REST occasionally returns 500 error
            delete_num = 0
            # Delete existing buckets
            for bucket_name in existing_bucket_names:
                resp = self._session.delete("{0}/pools/default/buckets/{1}".format(self.url, bucket_name))
                log_r(resp)
                if resp.status_code == 200:
                    delete_num += 1

            if delete_num == len(existing_bucket_names):
                break
            else:
                # A 500 error may have occured, query for buckets and try to delete them again
                time.sleep(5)
                count += 1

        # Check that max retries did not occur
        if count == 3:
            raise CBServerError("Max retries for bucket creation hit. Could not delete buckets!")
Beispiel #4
0
 def stop(self):
     """
     1. Flush and close the logfile capturing the LiteServ output
     2. Kill the LiteServ process
     3. Verify that no service is running on http://<host>:<port>
     """
     if self._verify_running():
         log_info("Stopping LiteServ: http://{}:{}".format(self.host, self.port))
         log_info("Stopping LiteServ: {}".format(self.liteserv_admin_url))
         try:
             resp = self.session.put("{}/stop".format(self.liteserv_admin_url))
         except ConnectionError:
             self.open_app()
             self._wait_until_reachable(port=59850)
             resp = self.session.put("{}/stop".format(self.liteserv_admin_url))
         log_r(resp)
         resp.raise_for_status()
     else:
         log_info("LiteServ is not running")
     # Using --exit in ios-sim means, --log has no effect
     # Have to separately copy the simulator logs
     if self.logfile_name and self.device_id:
         home = os.environ['HOME']
         ios_log_file = "{}/Library/Logs/CoreSimulator/{}/system.log".format(home, self.device_id)
         copyfile(ios_log_file, self.logfile_name)
         # Empty the simulator logs so that the next test run
         # will only have logs for that run
         open(ios_log_file, 'w').close()
     self._verify_not_running()
    def get_bucket_names(self):
        """ Returns list of the bucket names for a given Couchbase Server."""

        bucket_names = []

        error_count = 0
        max_retries = 5

        # Retry to avoid intermittent Connection issues when getting buckets
        while True:
            if error_count == max_retries:
                raise CBServerError(
                    "Error! Could not get buckets after retries.")
            try:
                resp = self._session.get("{}/pools/default/buckets".format(
                    self.url))
                log_r(resp)
                resp.raise_for_status()
                break
            except ConnectionError:
                log_info(
                    "Hit a ConnectionError while trying to get buckets. Retrying ..."
                )
                error_count += 1
                time.sleep(1)

        obj = json.loads(resp.text)

        for entry in obj:
            bucket_names.append(entry["name"])

        log_info("Found buckets: {}".format(bucket_names))
        return bucket_names
def test_openidconnect_no_session(params_from_base_test_setup, sg_conf_name):
    """Authenticate with a test openid provider that is configured to NOT add a Set-Cookie header"""

    cluster_config = params_from_base_test_setup["cluster_config"]
    mode = params_from_base_test_setup["mode"]
    sg_conf = sync_gateway_config_path_for_mode(sg_conf_name, mode)

    cluster_helper = ClusterKeywords()
    topology = cluster_helper.get_cluster_topology(cluster_config)
    sg_url = topology["sync_gateways"][0]["public"]
    sg_db = "db"

    log_info("Running 'test_openidconnect_no_session'")
    log_info("Using cluster_config: {}".format(cluster_config))
    log_info("Using sg_url: {}".format(sg_url))
    log_info("Using sg_db: {}".format(sg_db))

    cluster_helper = ClusterKeywords()
    cluster_helper.reset_cluster(
        cluster_config=cluster_config,
        sync_gateway_config=sg_conf
    )

    # multipart/form data content
    formdata = {
        'username': ('', 'testuser'),
        'authenticated': ('', 'Return a valid authorization code for this user')
    }

    authenticate_url = discover_authenticate_url(sg_url, sg_db, "testnosessions")

    # Make the request to _oidc_testing
    response = requests.post(authenticate_url, files=formdata)
    log_r(response)
    assert "Set-Cookie" not in response.headers
    def start(self, logfile_name):
        """
        1. Starts a LiteServ with logging to provided logfile file object.
           The running LiteServ process will be stored in the self.process property.
        2. The method will poll on the endpoint to make sure LiteServ is available.
        3. The expected version will be compared with the version reported by http://<host>:<port>
        4. Return the url of the running LiteServ
        """

        if self.storage_engine != "SQLite":
            raise NotImplementedError("Need to make sure to support other storage types")

        self._verify_not_running()

        if self.port == 59850:
            raise LiteServError("On iOS, port 59850 is reserved for the admin port")

        liteserv_admin_url = "http://{}:59850".format(self.host)
        log_info("Starting LiteServ: {}".format(liteserv_admin_url))

        data = {
            "port": int(self.port)
        }

        resp = self.session.put("{}/start".format(liteserv_admin_url), data=json.dumps(data))
        log_r(resp)
        resp.raise_for_status()

        self._verify_launched()

        return "http://{}:{}".format(self.host, self.port)
Beispiel #8
0
def test_openidconnect_no_session(params_from_base_test_setup, sg_conf_name):
    """Authenticate with a test openid provider that is configured to NOT add a Set-Cookie header"""

    cluster_config = params_from_base_test_setup["cluster_config"]
    mode = params_from_base_test_setup["mode"]
    sg_conf = sync_gateway_config_path_for_mode(sg_conf_name, mode)

    cluster_helper = ClusterKeywords()
    topology = cluster_helper.get_cluster_topology(cluster_config)
    sg_url = topology["sync_gateways"][0]["public"]
    sg_db = "db"

    log_info("Running 'test_openidconnect_no_session'")
    log_info("Using cluster_config: {}".format(cluster_config))
    log_info("Using sg_url: {}".format(sg_url))
    log_info("Using sg_db: {}".format(sg_db))

    cluster_helper = ClusterKeywords()
    cluster_helper.reset_cluster(
        cluster_config=cluster_config,
        sync_gateway_config=sg_conf
    )

    # multipart/form data content
    formdata = {
        'username': ('', 'testuser'),
        'authenticated': ('', 'Return a valid authorization code for this user')
    }

    authenticate_url = discover_authenticate_url(sg_url, sg_db, "testnosessions")

    # Make the request to _oidc_testing
    response = requests.post(authenticate_url, files=formdata)
    log_r(response)
    assert "Set-Cookie" not in response.headers
    def start(self, timeout=1000, heartbeat=None, request_timeout=None):
        """
        Start a longpoll changes feed and and store the results in self.processed changes
        """

        # convert to seconds for use with requests lib api
        if request_timeout is not None:
            request_timeout /= 1000

        auth_type = get_auth_type(self.auth)
        current_seq_num = 0

        log_info("[Changes Tracker] Changes Tracker Starting ...")

        while not self.cancel:

            data = {
                "feed": "longpoll",
                "style": "all_docs",
                "since": current_seq_num
            }

            if timeout is not None:
                data["timeout"] = timeout

            if heartbeat is not None:
                data["heartbeat"] = heartbeat

            if auth_type == AuthType.session:
                try:
                    resp = requests.post("{}/_changes".format(self.endpoint), data=json.dumps(data), cookies=dict(SyncGatewaySession=self.auth[1]), timeout=request_timeout)
                except Timeout as to:
                    log_info("Request timed out. Exiting longpoll loop ...")
                    logging.debug(to)
                    break
            elif auth_type == AuthType.http_basic:
                try:
                    resp = requests.post("{}/_changes".format(self.endpoint), data=json.dumps(data), auth=self.auth, timeout=request_timeout)
                except Timeout as to:
                    log_info("Request timed out. Exiting longpoll loop ...")
                    logging.debug(to)
                    break
            else:
                try:
                    resp = requests.post("{}/_changes".format(self.endpoint), data=json.dumps(data), timeout=request_timeout)
                except Timeout as to:
                    log_info("Request timed out. Exiting longpoll loop ...")
                    logging.debug(to)
                    break

            log_r(resp)
            resp.raise_for_status()
            resp_obj = resp.json()

            self.process_changes(resp_obj["results"])
            current_seq_num = resp_obj["last_seq"]

        log_info("[Changes Tracker] End of longpoll changes loop")
    def _get_tasks(self):
        """
        Returns the current tasks from the server
        """
        resp = self._session.get("{}/pools/default/tasks".format(self.url))
        log_r(resp)
        resp.raise_for_status()
        resp_obj = resp.json()

        return resp_obj
    def _get_tasks(self):
        """
        Returns the current tasks from the server
        """
        resp = self._session.get("{}/pools/default/tasks".format(self.url))
        log_r(resp)
        resp.raise_for_status()
        resp_obj = resp.json()

        return resp_obj
Beispiel #12
0
def test_openidconnect_large_scope(params_from_base_test_setup, sg_conf_name):
    """Authenticate against a test provider config that only has a larger scope than the default,
    and make sure things like the nickname are returned in the jwt token returned back"""

    cluster_config = params_from_base_test_setup["cluster_config"]
    mode = params_from_base_test_setup["mode"]
    sg_conf = sync_gateway_config_path_for_mode(sg_conf_name, mode)

    cluster_helper = ClusterKeywords()
    topology = cluster_helper.get_cluster_topology(cluster_config)
    sg_url = topology["sync_gateways"][0]["public"]
    sg_db = "db"

    log_info("Running 'test_openidconnect_large_scope'")
    log_info("Using cluster_config: {}".format(cluster_config))
    log_info("Using sg_url: {}".format(sg_url))
    log_info("Using sg_db: {}".format(sg_db))

    cluster_helper = ClusterKeywords()
    cluster_helper.reset_cluster(
        cluster_config=cluster_config,
        sync_gateway_config=sg_conf
    )

    # multipart/form data content
    formdata = {
        'username': ('', 'testuser'),
        'authenticated': ('', 'Return a valid authorization code for this user')
    }

    # get the authenticate endpoint and query params, should look something like:
    #     authenticate?client_id=sync_gateway&redirect_uri= ...
    authenticate_endpoint = discover_authenticate_endpoint(sg_url, sg_db, "testlargescope")

    # build the full url
    url = "{}/{}/_oidc_testing/{}".format(
        sg_url,
        sg_db,
        authenticate_endpoint
    )

    # Make the request to _oidc_testing
    response = requests.post(url, files=formdata)
    log_r(response)

    # extract the token from the response
    response_json = response.json()
    id_token = response_json["id_token"]

    # {u'iss': u'http://localhost:4984/db/_oidc_testing', u'iat': 1466050188, u'aud': u'sync_gateway', u'exp': 1466053788, u'sub': u'testuser'}
    decoded_id_token = jwt.decode(id_token, verify=False)

    log_info("decoded_id_token: {}".format(decoded_id_token))

    assert "nickname" in decoded_id_token.keys()
def test_openidconnect_large_scope(params_from_base_test_setup, sg_conf_name):
    """Authenticate against a test provider config that only has a larger scope than the default,
    and make sure things like the nickname are returned in the jwt token returned back"""

    cluster_config = params_from_base_test_setup["cluster_config"]
    mode = params_from_base_test_setup["mode"]
    sg_conf = sync_gateway_config_path_for_mode(sg_conf_name, mode)

    cluster_helper = ClusterKeywords()
    topology = cluster_helper.get_cluster_topology(cluster_config)
    sg_url = topology["sync_gateways"][0]["public"]
    sg_db = "db"

    log_info("Running 'test_openidconnect_large_scope'")
    log_info("Using cluster_config: {}".format(cluster_config))
    log_info("Using sg_url: {}".format(sg_url))
    log_info("Using sg_db: {}".format(sg_db))

    cluster_helper = ClusterKeywords()
    cluster_helper.reset_cluster(
        cluster_config=cluster_config,
        sync_gateway_config=sg_conf
    )

    # multipart/form data content
    formdata = {
        'username': ('', 'testuser'),
        'authenticated': ('', 'Return a valid authorization code for this user')
    }

    # get the authenticate endpoint and query params, should look something like:
    #     authenticate?client_id=sync_gateway&redirect_uri= ...
    authenticate_endpoint = discover_authenticate_endpoint(sg_url, sg_db, "testlargescope")

    # build the full url
    url = "{}/{}/_oidc_testing/{}".format(
        sg_url,
        sg_db,
        authenticate_endpoint
    )

    # Make the request to _oidc_testing
    response = requests.post(url, files=formdata)
    log_r(response)

    # extract the token from the response
    response_json = response.json()
    id_token = response_json["id_token"]

    # {u'iss': u'http://localhost:4984/db/_oidc_testing', u'iat': 1466050188, u'aud': u'sync_gateway', u'exp': 1466053788, u'sub': u'testuser'}
    decoded_id_token = jwt.decode(id_token, verify=False)

    log_info("decoded_id_token: {}".format(decoded_id_token))

    assert "nickname" in decoded_id_token.keys()
def get_server_version(host):
    resp = requests.get("http://*****:*****@{}:8091/pools".format(host))
    log_r(resp)
    resp.raise_for_status()
    resp_obj = resp.json()

    # Actual version is the following format 4.1.1-5914-enterprise
    running_server_version = resp_obj["implementationVersion"]
    running_server_version_parts = running_server_version.split("-")

    # Return version in the formatt 4.1.1-5487
    return "{}-{}".format(running_server_version_parts[0], running_server_version_parts[1])
    def _verify_not_running(self):
        """
        Verifys that the endpoint does not return a 200 from a running service
        """
        try:
            resp = self.session.get("http://{}:{}/".format(self.host, self.port))
        except ConnectionError:
            # Expecting connection error if LiteServ is not running on the port
            return

        log_r(resp)
        raise LiteServError("There should be no service running on the port")
    def delete_bucket(self, name):
        """ Delete a Couchbase Server bucket with the given 'name' """
        server_version = get_server_version(self.host, self.cbs_ssl)
        server_major_version = int(server_version.split(".")[0])

        if server_major_version >= 5:
            self._delete_internal_rbac_bucket_user(name)

        resp = self._session.delete("{0}/pools/default/buckets/{1}".format(
            self.url, name))
        log_r(resp)
        resp.raise_for_status()
    def _verify_not_running(self):
        """
        Verifys that the endpoint does not return a 200 from a running service
        """
        try:
            resp = self.session.get("http://{}:{}/".format(self.host, self.port))
        except ConnectionError:
            # Expecting connection error if LiteServ is not running on the port
            return

        log_r(resp)
        raise LiteServError("There should be no service running on the port")
    def create_bucket(self, name, ramQuotaMB=1024):
        """
        1. Create CBS bucket via REST
        2. Create client connection and poll until bucket is available
           Catch all connection exception and break when KeyNotFound error is thrown
        3. Verify all server nodes are in a 'healthy' state before proceeding

        Followed the docs below that suggested this approach.
        http://docs.couchbase.com/admin/admin/REST/rest-bucket-create.html
        """

        log_info("Creating bucket {} with RAM {}".format(name, ramQuotaMB))

        data = {
            "name": name,
            "ramQuotaMB": str(ramQuotaMB),
            "authType": "sasl",
            "proxyPort": "11211",
            "bucketType": "couchbase",
            "flushEnabled": "1"
        }

        resp = self._session.post("{}/pools/default/buckets".format(self.url), data=data)
        log_r(resp)
        resp.raise_for_status()

        # Create client an retry until KeyNotFound error is thrown
        start = time.time()
        while True:

            if time.time() - start > keywords.constants.CLIENT_REQUEST_TIMEOUT:
                raise Exception("TIMEOUT while trying to create server buckets.")
            try:
                bucket = Bucket("couchbase://{}/{}".format(self.host, name))
                bucket.get('foo')
            except ProtocolError:
                log_info("Client Connection failed: Retrying ...")
                time.sleep(1)
                continue
            except TemporaryFailError:
                log_info("Failure from server: Retrying ...")
                time.sleep(1)
                continue
            except NotFoundError:
                log_info("Key not found error: Bucket is ready!")
                break

        self.wait_for_ready_state()

        return name
def get_sg_accel_version(host):
    resp = requests.get("http://{}:4985".format(host))
    log_r(resp)
    resp.raise_for_status()
    resp_obj = resp.json()

    running_version = resp_obj["version"]
    running_version_parts = re.split("[ /(;)]", running_version)

    if running_version_parts[3] == "HEAD":
        running_version_formatted = running_version_parts[6]
    else:
        running_version_formatted = "{}-{}".format(running_version_parts[3], running_version_parts[4])

    # Returns the version as 338493 commit format or 1.2.1-4 version format
    return running_version_formatted
    def recover(self, server_to_recover):

        if not isinstance(server_to_recover, CouchbaseServer):
            raise TypeError("'server_to_add' must be a 'CouchbaseServer'")

        log_info("Setting recover mode to 'delta' for server {}".format(server_to_recover.host))
        data = "otpNode=ns_1@{}&recoveryType=delta".format(server_to_recover.host)
        # Override session headers for this one off request
        resp = self._session.post(
            "{}/controller/setRecoveryType".format(self.url),
            headers={"Content-Type": "application/x-www-form-urlencoded"},
            data=data
        )

        log_r(resp)
        resp.raise_for_status()
    def stop(self):
        """
        1. Flush and close the logfile capturing the LiteServ output
        2. Kill the LiteServ process
        3. Verify that no service is running on http://<host>:<port>
        """

        log_info("Stopping LiteServ: http://{}:{}".format(self.host, self.port))

        liteserv_admin_url = "http://{}:59850".format(self.host)
        log_info("Stopping LiteServ: {}".format(liteserv_admin_url))
        resp = self.session.put("{}/stop".format(liteserv_admin_url))
        log_r(resp)
        resp.raise_for_status()

        self._verify_not_running()
def get_sg_accel_version(host):
    resp = requests.get("http://{}:4985".format(host))
    log_r(resp)
    resp.raise_for_status()
    resp_obj = resp.json()

    running_version = resp_obj["version"]
    running_version_parts = re.split("[ /(;)]", running_version)

    if running_version_parts[3] == "HEAD":
        running_version_formatted = running_version_parts[6]
    else:
        running_version_formatted = "{}-{}".format(running_version_parts[3],
                                                   running_version_parts[4])

    # Returns the version as 338493 commit format or 1.2.1-4 version format
    return running_version_formatted
    def recover(self, server_to_recover):

        if not isinstance(server_to_recover, CouchbaseServer):
            raise TypeError("'server_to_add' must be a 'CouchbaseServer'")

        log_info("Setting recover mode to 'delta' for server {}".format(
            server_to_recover.host))
        data = "otpNode=ns_1@{}&recoveryType=delta".format(
            server_to_recover.host)
        # Override session headers for this one off request
        resp = self._session.post(
            "{}/controller/setRecoveryType".format(self.url),
            headers={"Content-Type": "application/x-www-form-urlencoded"},
            data=data)

        log_r(resp)
        resp.raise_for_status()
    def add_node(self, server_to_add, services="kv"):
        """
        Add the server_to_add to a Couchbase Server cluster
        """

        if not isinstance(server_to_add, CouchbaseServer):
            raise TypeError("'server_to_add' must be a 'CouchbaseServer'")

        log_info("Adding server node {} to cluster ...".format(
            server_to_add.host))
        data = "hostname={}&user=Administrator&password=password&services={}".format(
            server_to_add.host, services)

        # HACK: Retry below addresses the following problem:
        #  1. Rebalance a node out
        #  2. Try to to immediately add node back into the cluster
        #  3. Fails because node is in state where it can't be add in yet
        # To work around this:
        #  1. Retry / wait until add node POST command is successful
        start = time.time()
        while True:

            if time.time() - start > keywords.constants.CLIENT_REQUEST_TIMEOUT:
                raise Exception("wait_for_rebalance_complete: TIMEOUT")

            # Override session headers for this one off request
            resp = self._session.post(
                "{}/controller/addNode".format(self.url),
                headers={"Content-Type": "application/x-www-form-urlencoded"},
                data=data)

            log_r(resp)

            # If status of the POST is not 200, retry the request after a second
            if resp.status_code == 200:
                log_info("{} added to cluster successfully".format(
                    server_to_add.host))
                break
            else:
                log_info(
                    "{}: {}: Could not add {} to cluster. Retrying ...".format(
                        resp.status_code, resp.json(), server_to_add.host))
                time.sleep(1)
    def rebalance_in(self, cluster_servers, server_to_add):
        """
        Adds a server from a pool and waits for rebalance to complete.
        cluster_servers should be a list of endpoints running Couchbase server.
            ex. ["http:192.168.33.10:8091", "http:192.168.33.11:8091", ...]
        """

        if not isinstance(server_to_add, CouchbaseServer):
            raise TypeError("'server_to_add' must be a 'CouchbaseServer'")

        # Add all servers except server_to_add to known nodes
        known_nodes = "knownNodes="
        for server in cluster_servers:
            if "https" in server:
                server = server.replace("https://", "")
                server = server.replace(":18091", "")
            else:
                server = server.replace("http://", "")
                server = server.replace(":8091", "")

            if server_to_add.host != server:
                known_nodes += "ns_1@{},".format(server)

        # Add server_to_add to known nodes
        data = "{}ns_1@{}".format(known_nodes, server_to_add.host)

        # Rebalance nodes
        log_info("Starting rebalance in for {}".format(server_to_add.host))
        log_info("Known nodes: {}".format(data))

        # Override session headers for this one off request
        resp = self._session.post(
            "{}/controller/rebalance".format(self.url),
            headers={"Content-Type": "application/x-www-form-urlencoded"},
            data=data)
        log_r(resp)
        resp.raise_for_status()

        self._wait_for_rebalance_complete()

        return True
    def add_node(self, server_to_add):
        """
        Add the server_to_add to a Couchbase Server cluster
        """

        if not isinstance(server_to_add, CouchbaseServer):
            raise TypeError("'server_to_add' must be a 'CouchbaseServer'")

        log_info("Adding server node {} to cluster ...".format(server_to_add))
        data = "hostname={}&user=Administrator&password=password&services=kv".format(
            server_to_add.host
        )

        # HACK: Retry below addresses the following problem:
        #  1. Rebalance a node out
        #  2. Try to to immediately add node back into the cluster
        #  3. Fails because node is in state where it can't be add in yet
        # To work around this:
        #  1. Retry / wait until add node POST command is successful
        start = time.time()
        while True:

            if time.time() - start > keywords.constants.CLIENT_REQUEST_TIMEOUT:
                raise Exception("wait_for_rebalance_complete: TIMEOUT")

            # Override session headers for this one off request
            resp = self._session.post(
                "{}/controller/addNode".format(self.url),
                headers={"Content-Type": "application/x-www-form-urlencoded"},
                data=data
            )

            log_r(resp)

            # If status of the POST is not 200, retry the request after a second
            if resp.status_code == 200:
                log_info("{} added to cluster successfully".format(server_to_add))
                break
            else:
                log_info("{}: Could not add {} to cluster. Retrying ...".format(resp.status_code, server_to_add))
                time.sleep(1)
    def _verify_stopped(self):
        """Polls until the server url is unreachable"""

        start = time.time()
        while True:
            if time.time() - start > keywords.constants.CLIENT_REQUEST_TIMEOUT:
                raise TimeoutError("Waiting for server to be unreachable but it never was!")
            try:
                resp = self._session.get("{}/pools".format(self.url))
                log_r(resp)
                resp.raise_for_status()
            except ConnectionError:
                # This is expected and used to determine if a server node has gone offline
                break

            except HTTPError as e:
                # 500 errors may happen as a result of the node going down
                log_error(e)
                continue

            time.sleep(1)
def get_sync_gateway_version(host):
    resp = requests.get("http://{}:4984".format(host))
    log_r(resp)
    resp.raise_for_status()
    resp_obj = resp.json()

    running_version = resp_obj["version"]
    running_version_parts = re.split("[ /(;)]", running_version)

    # Vendor version is parsed as a float, convert so it can be compared with full version strings
    running_vendor_version = str(resp_obj["vendor"]["version"])

    if running_version_parts[3] == "HEAD":
        # Example: resp_obj["version"] = Couchbase Sync Gateway/HEAD(nobranch)(e986c8a)
        running_version_formatted = running_version_parts[6]
    else:
        # Example: resp_obj["version"] = "Couchbase Sync Gateway/1.3.0(183;bfe61c7)"
        running_version_formatted = "{}-{}".format(running_version_parts[3], running_version_parts[4])

    # Returns the version as 338493 commit format or 1.2.1-4 version format
    return running_version_formatted, running_vendor_version
    def wait_for_ready_state(self):
        """
        Verify all server node is in are in a "healthy" state to avoid sync_gateway startup failures
        Work around for this - https://github.com/couchbase/sync_gateway/issues/1745
        """
        start = time.time()
        while True:

            elapsed = time.time()
            if elapsed - start > keywords.constants.CLIENT_REQUEST_TIMEOUT:
                raise Exception(
                    "Timeout: Server not in ready state! {}s".format(elapsed))

            # Verfy the server is in a "healthy", not "warmup" state
            try:
                resp = self._session.get("{}/pools/nodes".format(self.url))
                log_r(resp)
            except ConnectionError:
                # If bringing a server online, there may be some connnection issues. Continue and try again.
                time.sleep(1)
                continue

            resp_obj = resp.json()

            all_nodes_healthy = True
            for node in resp_obj["nodes"]:
                if node["status"] != "healthy":
                    all_nodes_healthy = False
                    log_info(
                        "Node is still not healthy. Status: {} Retrying ...".
                        format(node["status"]))
                    time.sleep(1)

            if not all_nodes_healthy:
                continue

            log_info("All nodes are healthy")
            log_debug(resp_obj)
            # All nodes are heathy if it made it to here
            break
    def verfiy_no_running_services(self, cluster_config):

        with open("{}.json".format(cluster_config)) as f:
            cluster_obj = json.loads(f.read())

        running_services = []
        for host in cluster_obj["hosts"]:

            # Couchbase Server
            try:
                resp = requests.get("http://*****:*****@{}:8091/pools".format(host["ip"]))
                log_r(resp)
                running_services.append(resp.url)
            except ConnectionError as he:
                log_info(he)

            # Sync Gateway
            try:
                resp = requests.get("http://{}:4984".format(host["ip"]))
                log_r(resp)
                running_services.append(resp.url)
            except ConnectionError as he:
                log_info(he)

            # Sg Accel
            try:
                resp = requests.get("http://{}:4985".format(host["ip"]))
                log_r(resp)
                running_services.append(resp.url)
            except ConnectionError as he:
                log_info(he)

        assert len(running_services) == 0, "Running Services Found: {}".format(running_services)
def get_sync_gateway_version(host):
    resp = requests.get("http://{}:4984".format(host))
    log_r(resp)
    resp.raise_for_status()
    resp_obj = resp.json()

    running_version = resp_obj["version"]
    running_version_parts = re.split("[ /(;)]", running_version)

    # Vendor version is parsed as a float, convert so it can be compared with full version strings
    running_vendor_version = str(resp_obj["vendor"]["version"])

    if running_version_parts[3] == "HEAD":
        # Example: resp_obj["version"] = Couchbase Sync Gateway/HEAD(nobranch)(e986c8a)
        running_version_formatted = running_version_parts[6]
    else:
        # Example: resp_obj["version"] = "Couchbase Sync Gateway/1.3.0(183;bfe61c7)"
        running_version_formatted = "{}-{}".format(running_version_parts[3],
                                                   running_version_parts[4])

    # Returns the version as 338493 commit format or 1.2.1-4 version format
    return running_version_formatted, running_vendor_version
def verify_sync_gateway_product_info(host):
    """ Get the product information from host and verify for Sync Gateway:
    - vendor name in GET / request
    - Server header in response
    """

    resp = requests.get("http://{}:4984".format(host))
    log_r(resp)
    resp.raise_for_status()
    resp_obj = resp.json()

    server_header = resp.headers["server"]
    log_info("'server' header: {}".format(server_header))
    if not server_header.startswith("Couchbase Sync Gateway"):
        raise ProvisioningError(
            "Wrong product info. Expected 'Couchbase Sync Gateway'")

    vendor_name = resp_obj["vendor"]["name"]
    log_info("vendor name: {}".format(vendor_name))
    if vendor_name != "Couchbase Sync Gateway":
        raise ProvisioningError(
            "Wrong vendor name. Expected 'Couchbase Sync Gateway'")
    def _verify_stopped(self):
        """Polls until the server url is unreachable"""

        start = time.time()
        while True:
            if time.time() - start > keywords.constants.CLIENT_REQUEST_TIMEOUT:
                raise TimeoutError(
                    "Waiting for server to be unreachable but it never was!")
            try:
                resp = self._session.get("{}/pools".format(self.url))
                log_r(resp)
                resp.raise_for_status()
            except ConnectionError:
                # This is expected and used to determine if a server node has gone offline
                break

            except HTTPError as e:
                # 500 errors may happen as a result of the node going down
                log_error(e)
                continue

            time.sleep(1)
    def rebalance_in(self, cluster_servers, server_to_add):
        """
        Adds a server from a pool and waits for rebalance to complete.
        cluster_servers should be a list of endpoints running Couchbase server.
            ex. ["http:192.168.33.10:8091", "http:192.168.33.11:8091", ...]
        """

        if not isinstance(server_to_add, CouchbaseServer):
            raise TypeError("'server_to_add' must be a 'CouchbaseServer'")

        # Add all servers except server_to_add to known nodes
        known_nodes = "knownNodes="
        for server in cluster_servers:
            server = server.replace("http://", "")
            server = server.replace(":8091", "")

            if server_to_add.host != server:
                known_nodes += "ns_1@{},".format(server)

        # Add server_to_add to known nodes
        data = "{}ns_1@{}".format(known_nodes, server_to_add.host)

        # Rebalance nodes
        log_info("Starting rebalance in for {}".format(server_to_add))
        log_info("Known nodes: {}".format(data))

        # Override session headers for this one off request
        resp = self._session.post(
            "{}/controller/rebalance".format(self.url),
            headers={"Content-Type": "application/x-www-form-urlencoded"},
            data=data
        )
        log_r(resp)
        resp.raise_for_status()

        self._wait_for_rebalance_complete()

        return True
    def _delete_internal_rbac_bucket_user(self, bucketname):
        # Delete user with username=bucketname
        data_user_params = {"name": bucketname}

        log_info("Deleting RBAC user {}".format(bucketname))

        rbac_url = "{}/settings/rbac/users/local/{}".format(
            self.url, bucketname)

        resp = None
        try:
            resp = self._session.delete(rbac_url,
                                        data=data_user_params,
                                        auth=('Administrator', 'password'))
            log_r(resp)
            resp.raise_for_status()
        except HTTPError as h:
            log_info("resp code: {}; error: {}".format(resp, h))
            if '404 Client Error: Object Not Found for url' in h.message:
                log_info(
                    "RBAC user does not exist, no need to delete RBAC bucket user {}"
                    .format(bucketname))
            else:
                raise RBACUserDeletionError(h)
    def rebalance_out(self, cluster_servers, server_to_remove):
        """
        Issues a call to the admin_serve to remove a server from a pool.
        Then wait for rebalance to complete.
        """
        if not isinstance(server_to_remove, CouchbaseServer):
            raise TypeError("'server_to_remove' must be a 'CouchbaseServer'")

        # Add all servers except server_to_add to known nodes
        known_nodes = "knownNodes="
        for server in cluster_servers:
            if "https" in server:
                server = server.replace("https://", "")
                server = server.replace(":18091", "")
            else:
                server = server.replace("http://", "")
                server = server.replace(":8091", "")
            known_nodes += "ns_1@{},".format(server)

        # Add server_to_remove to ejected_node
        ejected_node = "ejectedNodes=ns_1@{}".format(server_to_remove.host)
        data = "{}&{}".format(ejected_node, known_nodes)

        log_info("Starting rebalance out: {} with nodes {}".format(
            server_to_remove.host, data))
        # Override session headers for this one off request
        resp = self._session.post(
            "{}/controller/rebalance".format(self.url),
            headers={"Content-Type": "application/x-www-form-urlencoded"},
            data=data)
        log_r(resp)
        resp.raise_for_status()

        self._wait_for_rebalance_complete()

        return True
    def wait_for_ready_state(self):
        """
        Verify all server node is in are in a "healthy" state to avoid sync_gateway startup failures
        Work around for this - https://github.com/couchbase/sync_gateway/issues/1745
        """
        start = time.time()
        while True:

            if time.time() - start > keywords.constants.CLIENT_REQUEST_TIMEOUT:
                raise Exception("Verify Docs Present: TIMEOUT")

            # Verfy the server is in a "healthy", not "warmup" state
            try:
                resp = self._session.get("{}/pools/nodes".format(self.url))
                log_r(resp)
            except ConnectionError:
                # If bringing a server online, there may be some connnection issues. Continue and try again.
                time.sleep(1)
                continue

            resp_obj = resp.json()

            all_nodes_healthy = True
            for node in resp_obj["nodes"]:
                if node["status"] != "healthy":
                    all_nodes_healthy = False
                    log_info("Node is still not healthy. Status: {} Retrying ...".format(node["status"]))
                    time.sleep(1)

            if not all_nodes_healthy:
                continue

            log_info("All nodes are healthy")
            log_debug(resp_obj)
            # All nodes are heathy if it made it to here
            break
def get_server_version(host, cbs_ssl=False):
    """ Gets the server version in the format '4.1.1-5487' for a running Couchbase Server"""

    server_scheme = "http"
    server_port = 8091

    if cbs_ssl:
        server_scheme = "https"
        server_port = 18091

    resp = requests.get("{}://Administrator:password@{}:{}/pools".format(
        server_scheme, host, server_port),
                        verify=False)
    log_r(resp)
    resp.raise_for_status()
    resp_obj = resp.json()

    # Actual version is the following format 4.1.1-5914-enterprise
    running_server_version = resp_obj["implementationVersion"]
    running_server_version_parts = running_server_version.split("-")

    # Return version in the formatt 4.1.1-5487
    return "{}-{}".format(running_server_version_parts[0],
                          running_server_version_parts[1])
Beispiel #39
0
    def verfiy_no_running_services(self, cluster_config):

        with open("{}.json".format(cluster_config)) as f:
            cluster_obj = json.loads(f.read())

        server_port = 8091
        server_scheme = "http"

        if cluster_obj["cbs_ssl_enabled"]:
            server_port = 18091
            server_scheme = "https"

        running_services = []
        for host in cluster_obj["hosts"]:

            # Couchbase Server
            try:
                resp = requests.get("{}://Administrator:password@{}:{}/pools".format(server_scheme, host["ip"], server_port))
                log_r(resp)
                running_services.append(resp.url)
            except ConnectionError as he:
                log_info(he)

            # Sync Gateway
            try:
                resp = requests.get("http://{}:4984".format(host["ip"]))
                log_r(resp)
                running_services.append(resp.url)
            except ConnectionError as he:
                log_info(he)

            # Sg Accel
            try:
                resp = requests.get("http://{}:4985".format(host["ip"]))
                log_r(resp)
                running_services.append(resp.url)
            except ConnectionError as he:
                log_info(he)

        assert len(running_services) == 0, "Running Services Found: {}".format(running_services)
Beispiel #40
0
    def start_device(self, logfile_name):
        """
        1. Starts a LiteServ with logging to provided logfile file object.
           The running LiteServ process will be stored in the self.process property.
        2. The method will poll on the endpoint to make sure LiteServ is available.
        3. The expected version will be compared with the version reported by http://<host>:<port>
        4. Return the url of the running LiteServ
        """

        data = {}
        encryption_enabled = False
        self.logfile_name = logfile_name

        package_name = "LiteServ-iOS-Device.app"
        app_dir = "LiteServ-iOS"

        if self.storage_engine == "SQLCipher":
            package_name = "LiteServ-iOS-SQLCipher-Device.app"
            app_dir = "LiteServ-iOS-SQLCipher"

        self.app_path = "{}/{}/{}".format(BINARY_DIR, app_dir, package_name)

        output = subprocess.check_output([
            "ios-deploy", "--justlaunch", "--bundle", self.app_path
        ])
        log_info(output)

        if self.storage_engine == "SQLite" or self.storage_engine == "SQLCipher":
            data["storage"] = "SQLite"
        elif self.storage_engine == "ForestDB" or self.storage_engine == "ForestDB+Encryption":
            data["storage"] = "ForestDB"

        if self.storage_engine == "ForestDB+Encryption" or self.storage_engine == "SQLCipher":
            encryption_enabled = True

        self._verify_not_running()

        if self.port == 59850:
            raise LiteServError("On iOS, port 59850 is reserved for the admin port")

        data["port"] = int(self.port)

        if encryption_enabled:
            log_info("Encryption enabled ...")

            db_flags = []
            for db_name in REGISTERED_CLIENT_DBS:
                db_flags.append("{}:pass".format(db_name))
            db_flags = ",".join(db_flags)

            log_info("Running with db_flags: {}".format(db_flags))
            data["dbpasswords"] = db_flags

        self._wait_until_reachable(port=59850)
        log_info("Starting LiteServ: {}".format(self.liteserv_admin_url))
        resp = self.session.put("{}/start".format(self.liteserv_admin_url), data=json.dumps(data))
        log_r(resp)
        resp.raise_for_status()
        self._verify_launched()

        return "http://{}:{}".format(self.host, self.port)
def test_openidconnect_expired_token(params_from_base_test_setup, sg_conf_name):
    """Authenticate and create an ID token that only lasts for 5 seconds, wait 10 seconds
       and make sure the token is rejected
    """

    cluster_config = params_from_base_test_setup["cluster_config"]
    mode = params_from_base_test_setup["mode"]
    sg_conf = sync_gateway_config_path_for_mode(sg_conf_name, mode)

    cluster_helper = ClusterKeywords()
    topology = cluster_helper.get_cluster_topology(cluster_config)
    sg_url = topology["sync_gateways"][0]["public"]
    sg_db = "db"

    log_info("Running 'test_openidconnect_expired_token'")
    log_info("Using cluster_config: {}".format(cluster_config))
    log_info("Using sg_url: {}".format(sg_url))
    log_info("Using sg_db: {}".format(sg_db))

    cluster_helper = ClusterKeywords()
    cluster_helper.reset_cluster(
        cluster_config=cluster_config,
        sync_gateway_config=sg_conf
    )

    token_expiry_seconds = 5

    # multipart/form data content
    formdata = {
        'username': ('', 'testuser'),
        'authenticated': ('', 'Return a valid authorization code for this user'),
        'tokenttl': ('', "{}".format(token_expiry_seconds)),
    }

    # get the authenticate endpoint and query params, should look something like:
    #     authenticate?client_id=sync_gateway&redirect_uri= ...
    authenticate_endpoint = discover_authenticate_endpoint(sg_url, sg_db, DEFAULT_PROVIDER)

    # build the full url
    url = "{}/{}/_oidc_testing/{}".format(
        sg_url,
        sg_db,
        authenticate_endpoint
    )

    # Make the request to _oidc_testing
    response = requests.post(url, files=formdata)
    log_r(response)

    # extract the token from the response
    response_json = response.json()
    id_token = response_json["id_token"]

    # wait until token expires
    time.sleep(token_expiry_seconds + 1)

    # make a request using the ID token against the db and expect a 200 response
    headers = {"Authorization": "Bearer {}".format(id_token)}
    db_url = "{}/{}".format(sg_url, sg_db)
    resp = requests.get(db_url, headers=headers)
    log_r(resp)
    assert resp.status_code != 200, "Expected non-200 response"
def test_openidconnect_garbage_token(params_from_base_test_setup, sg_conf_name):
    """Send a garbage/invalid token and make sure it cannot be used"""

    # WARNING!!!! SHOULD THERE BE A RESET?

    cluster_config = params_from_base_test_setup["cluster_config"]

    cluster_helper = ClusterKeywords()
    topology = cluster_helper.get_cluster_topology(cluster_config)
    sg_url = topology["sync_gateways"][0]["public"]
    sg_db = "db"

    log_info("Running 'test_openidconnect_garbage_token'")
    log_info("Using cluster_config: {}".format(cluster_config))
    log_info("Using sg_url: {}".format(sg_url))
    log_info("Using sg_db: {}".format(sg_db))

    token_expiry_seconds = 5

    # multipart/form data content
    formdata = {
        'username': ('', 'testuser'),
        'authenticated': ('', 'Return a valid authorization code for this user'),
        'tokenttl': ('', "{}".format(token_expiry_seconds)),
    }

    # get the authenticate endpoint and query params, should look something like:
    #     authenticate?client_id=sync_gateway&redirect_uri= ...
    authenticate_endpoint = discover_authenticate_endpoint(sg_url, sg_db, DEFAULT_PROVIDER)

    # build the full url
    url = "{}/{}/_oidc_testing/{}".format(
        sg_url,
        sg_db,
        authenticate_endpoint
    )

    # Make the request to _oidc_testing
    response = requests.post(url, files=formdata)
    log_r(response)

    # extract the token from the response
    response_json = response.json()
    id_token = response_json["id_token"]

    # Complete garbage Token
    # make a request using the ID token against the db and expect a 200 response
    headers = {"Authorization": "Bearer {}".format("garbage")}
    db_url = "{}/{}".format(sg_url, sg_db)
    resp = requests.get(db_url, headers=headers)
    log_r(resp)
    assert resp.status_code != 200, "Expected non-200 response"

    # Partial garbage Token

    # get all the components split by "."
    token_components = id_token.split(".")

    # get subset of components except for last one
    all_components_except_last = token_components[:-1]

    # add a garbage last component
    all_components_except_last.append("garbage")

    # create a string out of the components
    partial_garbage_token = ".".join(all_components_except_last)

    headers = {"Authorization": "Bearer {}".format(partial_garbage_token)}
    db_url = "{}/{}".format(sg_url, sg_db)
    resp = requests.get(db_url, headers=headers)
    log_r(resp)
    assert resp.status_code != 200, "Expected non-200 response"
def test_openidconnect_public_session_endpoint(params_from_base_test_setup, sg_conf_name):
    """Create a new session from the OpenID Connect token returned by hitting
    the public _session endpoint and make sure the response contains the Set-Cookie header."""

    cluster_config = params_from_base_test_setup["cluster_config"]
    mode = params_from_base_test_setup["mode"]
    sg_conf = sync_gateway_config_path_for_mode(sg_conf_name, mode)

    cluster_helper = ClusterKeywords()
    topology = cluster_helper.get_cluster_topology(cluster_config)
    sg_url = topology["sync_gateways"][0]["public"]
    sg_db = "db"

    log_info("Running 'test_openidconnect_public_session_endpoint'")
    log_info("Using cluster_config: {}".format(cluster_config))
    log_info("Using sg_url: {}".format(sg_url))
    log_info("Using sg_db: {}".format(sg_db))

    cluster_helper = ClusterKeywords()
    cluster_helper.reset_cluster(
        cluster_config=cluster_config,
        sync_gateway_config=sg_conf
    )

    # multipart/form data content
    formdata = {
        'username': ('', 'testuser'),
        'authenticated': ('', 'Return a valid authorization code for this user')
    }

    # get the authenticate endpoint and query params, should look something like:
    #     authenticate?client_id=sync_gateway&redirect_uri= ...
    authenticate_endpoint = discover_authenticate_endpoint(sg_url, sg_db, DEFAULT_PROVIDER)

    # build the full url
    url = "{}/{}/_oidc_testing/{}".format(
        sg_url,
        sg_db,
        authenticate_endpoint
    )

    # Make the request to _oidc_testing
    response = requests.post(url, files=formdata)
    log_r(response)

    # extract the token from the response
    response_json = response.json()
    id_token = response_json["id_token"]

    headers = {
        "Authorization": "Bearer {}".format(id_token),
        "Content-Type": "application/json"
    }
    url = "{}/{}/_session".format(
        sg_url,
        sg_db
    )

    response = requests.post(url, headers=headers)
    assert "Set-Cookie" in response.headers.keys()
    set_cookie_response = response.headers['Set-Cookie']
    assert "SyncGatewaySession" in set_cookie_response
Beispiel #44
0
def test_openidconnect_expired_token(params_from_base_test_setup, sg_conf_name):
    """Authenticate and create an ID token that only lasts for 5 seconds, wait 10 seconds
       and make sure the token is rejected
    """

    cluster_config = params_from_base_test_setup["cluster_config"]
    mode = params_from_base_test_setup["mode"]
    sg_conf = sync_gateway_config_path_for_mode(sg_conf_name, mode)

    cluster_helper = ClusterKeywords()
    topology = cluster_helper.get_cluster_topology(cluster_config)
    sg_url = topology["sync_gateways"][0]["public"]
    sg_db = "db"

    log_info("Running 'test_openidconnect_expired_token'")
    log_info("Using cluster_config: {}".format(cluster_config))
    log_info("Using sg_url: {}".format(sg_url))
    log_info("Using sg_db: {}".format(sg_db))

    cluster_helper = ClusterKeywords()
    cluster_helper.reset_cluster(
        cluster_config=cluster_config,
        sync_gateway_config=sg_conf
    )

    token_expiry_seconds = 5

    # multipart/form data content
    formdata = {
        'username': ('', 'testuser'),
        'authenticated': ('', 'Return a valid authorization code for this user'),
        'tokenttl': ('', "{}".format(token_expiry_seconds)),
    }

    # get the authenticate endpoint and query params, should look something like:
    #     authenticate?client_id=sync_gateway&redirect_uri= ...
    authenticate_endpoint = discover_authenticate_endpoint(sg_url, sg_db, DEFAULT_PROVIDER)

    # build the full url
    url = "{}/{}/_oidc_testing/{}".format(
        sg_url,
        sg_db,
        authenticate_endpoint
    )

    # Make the request to _oidc_testing
    response = requests.post(url, files=formdata)
    log_r(response)

    # extract the token from the response
    response_json = response.json()
    id_token = response_json["id_token"]

    # wait until token expires
    time.sleep(token_expiry_seconds + 1)

    # make a request using the ID token against the db and expect a 200 response
    headers = {"Authorization": "Bearer {}".format(id_token)}
    db_url = "{}/{}".format(sg_url, sg_db)
    resp = requests.get(db_url, headers=headers)
    log_r(resp)
    assert resp.status_code != 200, "Expected non-200 response"
Beispiel #45
0
def test_openidconnect_garbage_token(params_from_base_test_setup, sg_conf_name):
    """Send a garbage/invalid token and make sure it cannot be used"""

    # WARNING!!!! SHOULD THERE BE A RESET?

    cluster_config = params_from_base_test_setup["cluster_config"]

    cluster_helper = ClusterKeywords()
    topology = cluster_helper.get_cluster_topology(cluster_config)
    sg_url = topology["sync_gateways"][0]["public"]
    sg_db = "db"

    log_info("Running 'test_openidconnect_garbage_token'")
    log_info("Using cluster_config: {}".format(cluster_config))
    log_info("Using sg_url: {}".format(sg_url))
    log_info("Using sg_db: {}".format(sg_db))

    token_expiry_seconds = 5

    # multipart/form data content
    formdata = {
        'username': ('', 'testuser'),
        'authenticated': ('', 'Return a valid authorization code for this user'),
        'tokenttl': ('', "{}".format(token_expiry_seconds)),
    }

    # get the authenticate endpoint and query params, should look something like:
    #     authenticate?client_id=sync_gateway&redirect_uri= ...
    authenticate_endpoint = discover_authenticate_endpoint(sg_url, sg_db, DEFAULT_PROVIDER)

    # build the full url
    url = "{}/{}/_oidc_testing/{}".format(
        sg_url,
        sg_db,
        authenticate_endpoint
    )

    # Make the request to _oidc_testing
    response = requests.post(url, files=formdata)
    log_r(response)

    # extract the token from the response
    response_json = response.json()
    id_token = response_json["id_token"]

    # Complete garbage Token
    # make a request using the ID token against the db and expect a 200 response
    headers = {"Authorization": "Bearer {}".format("garbage")}
    db_url = "{}/{}".format(sg_url, sg_db)
    resp = requests.get(db_url, headers=headers)
    log_r(resp)
    assert resp.status_code != 200, "Expected non-200 response"

    # Partial garbage Token

    # get all the components split by "."
    token_components = id_token.split(".")

    # get subset of components except for last one
    all_components_except_last = token_components[:-1]

    # add a garbage last component
    all_components_except_last.append("garbage")

    # create a string out of the components
    partial_garbage_token = ".".join(all_components_except_last)

    headers = {"Authorization": "Bearer {}".format(partial_garbage_token)}
    db_url = "{}/{}".format(sg_url, sg_db)
    resp = requests.get(db_url, headers=headers)
    log_r(resp)
    assert resp.status_code != 200, "Expected non-200 response"
    def start(self, timeout=1000, heartbeat=None, request_timeout=None):
        """
        Start a longpoll changes feed and and store the results in self.processed changes
        """

        # convert to seconds for use with requests lib api
        if request_timeout is not None:
            request_timeout /= 1000

        auth_type = get_auth_type(self.auth)
        current_seq_num = 0

        start = time.time()
        if timeout > 1000:
            loop_timeout = (timeout // 1000) * 10
        else:
            loop_timeout = 60

        log_info(
            "[Changes Tracker] Changes Tracker Starting for {} ...".format(
                loop_timeout))

        while not self.cancel:
            # This if condition will run this method until the timeout and break and come out of this method.
            if time.time() - start > loop_timeout:
                logging.info("[Changes Tracker] : TIMEOUT")
                break
            data = {
                "feed": "longpoll",
                "style": "all_docs",
                "since": current_seq_num
            }

            if timeout is not None:
                data["timeout"] = timeout

            if heartbeat is not None:
                data["heartbeat"] = heartbeat

            if auth_type == AuthType.session:
                try:
                    resp = requests.post(
                        "{}/_changes".format(self.endpoint),
                        data=json.dumps(data),
                        cookies=dict(SyncGatewaySession=self.auth[1]),
                        timeout=request_timeout)
                except Timeout as to:
                    log_info("Request timed out. Exiting longpoll loop ...")
                    logging.debug(to)
                    break
            elif auth_type == AuthType.http_basic:
                try:
                    resp = requests.post("{}/_changes".format(self.endpoint),
                                         data=json.dumps(data),
                                         auth=self.auth,
                                         timeout=request_timeout)
                except Timeout as to:
                    log_info("Request timed out. Exiting longpoll loop ...")
                    logging.debug(to)
                    break
            else:
                try:
                    resp = requests.post("{}/_changes".format(self.endpoint),
                                         data=json.dumps(data),
                                         timeout=request_timeout)
                except Timeout as to:
                    log_info("Request timed out. Exiting longpoll loop ...")
                    logging.debug(to)
                    break

            log_r(resp)
            resp.raise_for_status()
            resp_obj = resp.json()

            self.process_changes(resp_obj["results"])
            current_seq_num = resp_obj["last_seq"]

        log_info("[Changes Tracker] End of longpoll changes loop")
    def create_bucket(self, name, ram_quota_mb=1024):
        """
        1. Create CBS bucket via REST
        2. Create client connection and poll until bucket is available
           Catch all connection exception and break when KeyNotFound error is thrown
        3. Verify all server nodes are in a 'healthy' state before proceeding

        Followed the docs below that suggested this approach.
        http://docs.couchbase.com/admin/admin/REST/rest-bucket-create.html
        """

        log_info("Creating bucket {} with RAM {}".format(name, ram_quota_mb))

        server_version = get_server_version(self.host, self.cbs_ssl)
        server_major_version = int(server_version.split(".")[0])

        data = {
            "name": name,
            "ramQuotaMB": str(ram_quota_mb),
            "authType": "sasl",
            "bucketType": "couchbase",
            "flushEnabled": "1"
        }

        if server_major_version <= 4:
            # Create a bucket with password for server_major_version < 5
            # proxyPort should not be passed for 5.0.0 onwards for bucket creation
            data["saslPassword"] = "******"
            data["proxyPort"] = "11211"

        resp = None
        try:
            resp = self._session.post("{}/pools/default/buckets".format(
                self.url),
                                      data=data)
            log_r(resp)
            resp.raise_for_status()
        except HTTPError as h:
            log_info("resp code: {}; resp text: {}; error: {}".format(
                resp, resp.json(), h))
            raise

        # Create a user with username=bucketname
        if server_major_version >= 5:
            self._create_internal_rbac_bucket_user(name)

        # Create client an retry until KeyNotFound error is thrown
        start = time.time()
        while True:

            if time.time() - start > keywords.constants.CLIENT_REQUEST_TIMEOUT:
                raise Exception(
                    "TIMEOUT while trying to create server buckets.")
            try:
                bucket = Bucket("couchbase://{}/{}".format(self.host, name),
                                password='******')
                bucket.get('foo')
            except NotFoundError:
                log_info("Key not found error: Bucket is ready!")
                break
            except CouchbaseError as e:
                log_info("Error from server: {}, Retrying ...".format(e))
                time.sleep(1)
                continue

        self.wait_for_ready_state()

        return name
Beispiel #48
0
def test_openidconnect_public_session_endpoint(params_from_base_test_setup, sg_conf_name):
    """Create a new session from the OpenID Connect token returned by hitting
    the public _session endpoint and make sure the response contains the Set-Cookie header."""

    cluster_config = params_from_base_test_setup["cluster_config"]
    mode = params_from_base_test_setup["mode"]
    sg_conf = sync_gateway_config_path_for_mode(sg_conf_name, mode)

    cluster_helper = ClusterKeywords()
    topology = cluster_helper.get_cluster_topology(cluster_config)
    sg_url = topology["sync_gateways"][0]["public"]
    sg_db = "db"

    log_info("Running 'test_openidconnect_public_session_endpoint'")
    log_info("Using cluster_config: {}".format(cluster_config))
    log_info("Using sg_url: {}".format(sg_url))
    log_info("Using sg_db: {}".format(sg_db))

    cluster_helper = ClusterKeywords()
    cluster_helper.reset_cluster(
        cluster_config=cluster_config,
        sync_gateway_config=sg_conf
    )

    # multipart/form data content
    formdata = {
        'username': ('', 'testuser'),
        'authenticated': ('', 'Return a valid authorization code for this user')
    }

    # get the authenticate endpoint and query params, should look something like:
    #     authenticate?client_id=sync_gateway&redirect_uri= ...
    authenticate_endpoint = discover_authenticate_endpoint(sg_url, sg_db, DEFAULT_PROVIDER)

    # build the full url
    url = "{}/{}/_oidc_testing/{}".format(
        sg_url,
        sg_db,
        authenticate_endpoint
    )

    # Make the request to _oidc_testing
    response = requests.post(url, files=formdata)
    log_r(response)

    # extract the token from the response
    response_json = response.json()
    id_token = response_json["id_token"]

    headers = {
        "Authorization": "Bearer {}".format(id_token),
        "Content-Type": "application/json"
    }
    url = "{}/{}/_session".format(
        sg_url,
        sg_db
    )

    response = requests.post(url, headers=headers)
    assert "Set-Cookie" in response.headers.keys()
    set_cookie_response = response.headers['Set-Cookie']
    assert "SyncGatewaySession" in set_cookie_response
Beispiel #49
0
def test_openidconnect_basic_test(params_from_base_test_setup, sg_conf_name, is_admin_port, expect_signed_id_token):
    """Tests the basic OpenIDConnect login flow against the non-admin port when is_admin_port=False
    Tests the basic OpenIDConnect login flow against the admin port when is_admin_port=True
    """

    cluster_config = params_from_base_test_setup["cluster_config"]
    mode = params_from_base_test_setup["mode"]
    sg_conf = sync_gateway_config_path_for_mode(sg_conf_name, mode)

    cluster_helper = ClusterKeywords()
    topology = cluster_helper.get_cluster_topology(cluster_config)
    sg_url = topology["sync_gateways"][0]["public"]
    sg_db = "db"

    log_info("Running 'test_openidconnect_basic_test'")
    log_info("Using cluster_config: {}".format(cluster_config))
    log_info("Using sg_url: {}".format(sg_url))
    log_info("Using sg_db: {}".format(sg_db))
    log_info("Using is_admin_port: {}".format(is_admin_port))
    log_info("Using expect_signed_id_token: {}".format(expect_signed_id_token))

    cluster_helper = ClusterKeywords()
    cluster_helper.reset_cluster(
        cluster_config=cluster_config,
        sync_gateway_config=sg_conf
    )

    # make a request against the db and expect a 401 response since we haven't authenticated yet.
    # (but there's no point in doing this on the admin port since we'll never get a 401)
    if not is_admin_port:
        db_url = "{}/{}".format(sg_url, sg_db)
        resp = requests.get(db_url)
        assert resp.status_code == 401, "Expected 401 response"

    # get the authenticate endpoint and query params, should look something like:
    #     authenticate?client_id=sync_gateway&redirect_uri= ...
    authenticate_endpoint = discover_authenticate_endpoint(sg_url, sg_db, DEFAULT_PROVIDER)

    # build the full url
    authenticate_endpoint_url = "{}/{}/_oidc_testing/{}".format(
        sg_url,
        sg_db,
        authenticate_endpoint
    )

    # Make the request to _oidc_testing
    # multipart/form data content
    formdata = {
        'username': ('', 'testuser'),
        'authenticated': ('', 'Return a valid authorization code for this user')
    }
    authenticate_response = requests.post(authenticate_endpoint_url, files=formdata)
    set_cookie_response_header = authenticate_response.headers['Set-Cookie']
    log_r(authenticate_response)

    # extract the token from the response
    authenticate_response_json = authenticate_response.json()
    id_token = authenticate_response_json["id_token"]
    refresh_token = authenticate_response_json["refresh_token"]

    # make sure the id token has the email field in it
    decoded_id_token = jwt.decode(id_token, verify=False)
    assert "email" in decoded_id_token.keys()

    # make a request using the ID token against the db and expect a 200 response
    headers = {"Authorization": "Bearer {}".format(id_token)}
    db_url = "{}/{}".format(sg_url, sg_db)
    resp = requests.get(db_url, headers=headers)
    log_r(resp)
    if expect_signed_id_token:
        assert resp.status_code == 200, "Expected 200 response for bearer ID token"
    else:
        assert resp.status_code == 401, "Expected 401 response for bearer ID token"

    # make a request using the cookie against the db and expect a 200 response
    db_url = "{}/{}".format(sg_url, sg_db)
    resp = requests.get(db_url, cookies=extract_cookie(set_cookie_response_header))
    log_r(resp)
    assert resp.status_code == 200, "Expected 200 response when using session cookie"

    # make a request using the session_id that's sent in the body
    resp = requests.get(db_url, cookies={"SyncGatewaySession": authenticate_response_json["session_id"]})
    assert resp.status_code == 200, "Expected 200 response using session_id from body"

    # try to use the refresh token to get a few new id_tokens
    id_tokens = [id_token]
    for i in xrange(3):

        # This pause is required because according to @ajres:
        # The id_token will only be unique if the two calls are more than a second apart.
        # It would be easy to add an atomically incrementing nonce claim to each token to ensure that they are always unique
        time.sleep(2)

        refresh_token_url = "{}/{}/_oidc_refresh?refresh_token={}&provider={}".format(sg_url, sg_db, refresh_token, "test")
        authenticate_response = requests.get(refresh_token_url)
        authenticate_response_json = authenticate_response.json()
        id_token_refresh = authenticate_response_json["id_token"]
        # make sure we get a unique id token each time
        assert id_token_refresh not in id_tokens

        # make a request using the ID token against the db and expect a 200 response
        headers = {"Authorization": "Bearer {}".format(id_token_refresh)}
        resp = requests.get(db_url, headers=headers)
        log_r(resp)
        if expect_signed_id_token:
            assert resp.status_code == 200, "Expected 200 response for bearer ID token on refresh"
        else:
            assert resp.status_code == 401, "Expected 401 response for bearer ID token on refresh"

        id_tokens.append(id_token_refresh)
def test_openidconnect_basic_test(params_from_base_test_setup, sg_conf_name, is_admin_port, expect_signed_id_token):
    """Tests the basic OpenIDConnect login flow against the non-admin port when is_admin_port=False
    Tests the basic OpenIDConnect login flow against the admin port when is_admin_port=True
    """

    cluster_config = params_from_base_test_setup["cluster_config"]
    mode = params_from_base_test_setup["mode"]
    sg_conf = sync_gateway_config_path_for_mode(sg_conf_name, mode)

    cluster_helper = ClusterKeywords()
    topology = cluster_helper.get_cluster_topology(cluster_config)
    sg_url = topology["sync_gateways"][0]["public"]
    sg_db = "db"

    log_info("Running 'test_openidconnect_basic_test'")
    log_info("Using cluster_config: {}".format(cluster_config))
    log_info("Using sg_url: {}".format(sg_url))
    log_info("Using sg_db: {}".format(sg_db))
    log_info("Using is_admin_port: {}".format(is_admin_port))
    log_info("Using expect_signed_id_token: {}".format(expect_signed_id_token))

    cluster_helper = ClusterKeywords()
    cluster_helper.reset_cluster(
        cluster_config=cluster_config,
        sync_gateway_config=sg_conf
    )

    # make a request against the db and expect a 401 response since we haven't authenticated yet.
    # (but there's no point in doing this on the admin port since we'll never get a 401)
    if not is_admin_port:
        db_url = "{}/{}".format(sg_url, sg_db)
        resp = requests.get(db_url)
        assert resp.status_code == 401, "Expected 401 response"

    # get the authenticate endpoint and query params, should look something like:
    #     authenticate?client_id=sync_gateway&redirect_uri= ...
    authenticate_endpoint = discover_authenticate_endpoint(sg_url, sg_db, DEFAULT_PROVIDER)

    # build the full url
    authenticate_endpoint_url = "{}/{}/_oidc_testing/{}".format(
        sg_url,
        sg_db,
        authenticate_endpoint
    )

    # Make the request to _oidc_testing
    # multipart/form data content
    formdata = {
        'username': ('', 'testuser'),
        'authenticated': ('', 'Return a valid authorization code for this user')
    }
    authenticate_response = requests.post(authenticate_endpoint_url, files=formdata)
    set_cookie_response_header = authenticate_response.headers['Set-Cookie']
    log_r(authenticate_response)

    # extract the token from the response
    authenticate_response_json = authenticate_response.json()
    id_token = authenticate_response_json["id_token"]
    refresh_token = authenticate_response_json["refresh_token"]

    # make sure the id token has the email field in it
    decoded_id_token = jwt.decode(id_token, verify=False)
    assert "email" in decoded_id_token.keys()

    # make a request using the ID token against the db and expect a 200 response
    headers = {"Authorization": "Bearer {}".format(id_token)}
    db_url = "{}/{}".format(sg_url, sg_db)
    resp = requests.get(db_url, headers=headers)
    log_r(resp)
    if expect_signed_id_token:
        assert resp.status_code == 200, "Expected 200 response for bearer ID token"
    else:
        assert resp.status_code == 401, "Expected 401 response for bearer ID token"

    # make a request using the cookie against the db and expect a 200 response
    db_url = "{}/{}".format(sg_url, sg_db)
    resp = requests.get(db_url, cookies=extract_cookie(set_cookie_response_header))
    log_r(resp)
    assert resp.status_code == 200, "Expected 200 response when using session cookie"

    # make a request using the session_id that's sent in the body
    resp = requests.get(db_url, cookies={"SyncGatewaySession": authenticate_response_json["session_id"]})
    assert resp.status_code == 200, "Expected 200 response using session_id from body"

    # try to use the refresh token to get a few new id_tokens
    id_tokens = [id_token]
    for i in xrange(3):

        # This pause is required because according to @ajres:
        # The id_token will only be unique if the two calls are more than a second apart.
        # It would be easy to add an atomically incrementing nonce claim to each token to ensure that they are always unique
        time.sleep(2)

        refresh_token_url = "{}/{}/_oidc_refresh?refresh_token={}&provider={}".format(sg_url, sg_db, refresh_token, "test")
        authenticate_response = requests.get(refresh_token_url)
        authenticate_response_json = authenticate_response.json()
        id_token_refresh = authenticate_response_json["id_token"]
        # make sure we get a unique id token each time
        assert id_token_refresh not in id_tokens

        # make a request using the ID token against the db and expect a 200 response
        headers = {"Authorization": "Bearer {}".format(id_token_refresh)}
        resp = requests.get(db_url, headers=headers)
        log_r(resp)
        if expect_signed_id_token:
            assert resp.status_code == 200, "Expected 200 response for bearer ID token on refresh"
        else:
            assert resp.status_code == 401, "Expected 401 response for bearer ID token on refresh"

        id_tokens.append(id_token_refresh)