Esempio n. 1
0
 def monitor_bucket(self, bucket_name, staging_UUID):
     # To monitor the replication
     logger.debug(
         "Monitoring the replication for bucket {} ".format(bucket_name))
     kwargs = {
         ENV_VAR_KEY: {
             'password': self.staged_source.parameters.xdcr_admin_password
         }
     }
     command = CommandFactory.monitor_replication(
         source_username=self.staged_source.parameters.xdcr_admin,
         source_hostname=self.source_config.couchbase_src_host,
         source_port=self.source_config.couchbase_src_port,
         bucket_name=bucket_name,
         uuid=staging_UUID)
     stdout, stderr, exit_code = utilities.execute_bash(
         self.connection, command, **kwargs)
     logger.debug("stdout: {}".format(stdout))
     content = json.loads(stdout)
     pending_docs = self._get_last_value_of_node_stats(
         content["nodeStats"].values()[0])
     while pending_docs != 0:
         logger.debug(
             "Documents pending for replication: {}".format(pending_docs))
         helper_lib.sleepForSecond(30)
         stdout, stderr, exit_code = utilities.execute_bash(
             self.connection, command, **kwargs)
         content = json.loads(stdout)
         pending_docs = self._get_last_value_of_node_stats(
             content["nodeStats"].values()[0])
     else:
         logger.debug(
             "Replication for bucket {} completed".format(bucket_name))
Esempio n. 2
0
def _do_provision(provision_process, snapshot):
    bucket_list_and_size = snapshot.bucket_list

    if not bucket_list_and_size:
        raise FailedToReadBucketDataFromSnapshot("Snapshot Data is empty.")
    else:
        logger.debug(
            "snapshot bucket data is: {}".format(bucket_list_and_size))

    for item in bucket_list_and_size.split(':'):
        logger.debug("Creating bucket is: {}".format(item))
        # try:
        bucket_name = item.split(',')[0]
        bkt_size_mb = int(item.split(',')[1].strip()) // 1024 // 1024
        provision_process.bucket_create(bucket_name, bkt_size_mb)
        helper_lib.sleepForSecond(2)

    # getting config directory path
    directory = provision_process.get_config_directory()

    # making directory and changing permission to 755.
    provision_process.make_directory(directory)
    # This file path is being used to store the bucket information coming in snapshot
    config_file_path = provision_process.get_config_file_path()

    content = "BUCKET_LIST=" + _find_bucket_name_from_snapshot(snapshot)

    # Adding bucket list in config file path .config file, inside .delphix folder
    helper_lib.write_file(provision_process.connection, content,
                          config_file_path)
Esempio n. 3
0
 def bucket_remove(self, bucket_name):
     logger.debug("Removing bucket: {} ".format(bucket_name))
     self.bucket_edit(bucket_name, flush_value=1)
     self.bucket_flush(bucket_name)
     self.bucket_edit(bucket_name, flush_value=0)
     self.bucket_delete(bucket_name)
     helper_lib.sleepForSecond(2)
Esempio n. 4
0
    def check_index_build(self):
        # env = {ENV_VAR_KEY: {'password': self.parameters.couchbase_admin_password}}
        # cmd = CommandFactory.check_index_build(helper_lib.get_base_directory_of_given_path(self.repository.cb_shell_path),self.connection.environment.host.name, self.parameters.couchbase_port, self.parameters.couchbase_admin)
        # logger.debug("check_index_build cmd: {}".format(cmd))

        # set timeout to 12 hours
        end_time = time.time() + 3660 * 12

        tobuild = 1

        #break the loop either end_time is exceeding from 1 minute or server is successfully started
        while time.time() < end_time and tobuild <> 0:

            command_output, std_err, exit_code = self.run_couchbase_command(
                couchbase_command='check_index_build',
                base_path=helper_lib.get_base_directory_of_given_path(
                    self.repository.cb_shell_path))

            #command_output, std_err, exit_code = utilities.execute_bash(self.connection, command_name=cmd, **env)
            logger.debug("command_output is {}".format(command_output))
            logger.debug("std_err is {}".format(std_err))
            logger.debug("exit_code is {}".format(exit_code))
            try:
                command_output_dict = json.loads(command_output)
                logger.debug("dict {}".format(command_output_dict))
                tobuild = command_output_dict['results'][0]['unbuilt']
                logger.debug("to_build is {}".format(tobuild))
                helper_lib.sleepForSecond(30)  # waiting for 1 second
            except Exception as e:
                logger.debug(str(e))
Esempio n. 5
0
    def stop_couchbase(self):
        """ stop the couchbase service"""
        try:
            logger.debug("Stopping couchbase services")
            self.run_couchbase_command('stop_couchbase')

            end_time = time.time() + 60
            server_status = Status.ACTIVE
            while time.time() < end_time and server_status == Status.ACTIVE:
                helper_lib.sleepForSecond(1)  # waiting for 1 second
                server_status = self.status()  # fetching status

            logger.debug("Leaving stop loop")
            if server_status == Status.ACTIVE:
                logger.debug("Have failed to stop couchbase server")
                raise CouchbaseServicesError(
                    "Have failed to stop couchbase server")
        except CouchbaseServicesError as err:
            logger.debug("Error: {}".format(err))
            raise err
        except Exception as err:
            logger.debug("Exception Error: {}".format(err))
            if self.status() == Status.INACTIVE:
                logger.debug(
                    "Seems like couchbase service is not running. {}".format(
                        err.message))
            else:
                raise CouchbaseServicesError(err.message)
Esempio n. 6
0
    def start_couchbase(self, provision=False, no_wait=False):
        """ start the couchbase service"""
        logger.debug("Starting couchbase services")

        self.run_couchbase_command('start_couchbase')
        server_status = Status.INACTIVE

        helper_lib.sleepForSecond(10)

        if no_wait:
            logger.debug("no wait - leaving start procedure")
            return

        #Waiting for one minute to start the server
        # for prox to investigate
        end_time = time.time() + 3660

        #break the loop either end_time is exceeding from 1 minute or server is successfully started
        while time.time() < end_time and server_status == Status.INACTIVE:
            helper_lib.sleepForSecond(1)  # waiting for 1 second
            server_status = self.status(provision)  # fetching status
            logger.debug("server status {}".format(server_status))

        # if the server is not running even in 60 seconds, then stop the further execution
        if server_status == Status.INACTIVE:
            raise CouchbaseServicesError(
                "Have failed to start couchbase server")
Esempio n. 7
0
def _bucket_common_task(provision_process, bucket_list):
    for bkt in bucket_list:
        bkt = bkt.strip()
        logger.debug("Deletion of bucket {} started".format(bkt))
        provision_process.bucket_edit(bkt, flush_value=1)
        provision_process.bucket_flush(bkt)
        provision_process.bucket_edit(bkt, flush_value=0)
        provision_process.bucket_delete(bkt)
        helper_lib.sleepForSecond(2)
Esempio n. 8
0
def _do_provision(provision_process, snapshot):
    bucket_list_and_size = snapshot.bucket_list

    if not bucket_list_and_size:
        raise FailedToReadBucketDataFromSnapshot("Snapshot Data is empty.")
    else:
        logger.debug(
            "snapshot bucket data is: {}".format(bucket_list_and_size))

    bucket_list_and_size = json.loads(bucket_list_and_size)

    try:
        bucket_list = provision_process.bucket_list()
        bucket_list = helper_lib.filter_bucket_name_from_output(bucket_list)
        logger.debug(bucket_list)
    except Exception as err:
        logger.debug("Failed to get bucket list. Error is " + err.message)

    renamed_folders = []

    for item in bucket_list_and_size:
        logger.debug("Checking bucket: {}".format(item))
        # try:
        bucket_name = item['name']
        bkt_size = item['ram']
        bkt_type = item['bucketType']
        bkt_compression = item['compressionMode']
        bkt_size_mb = helper_lib.get_bucket_size_in_MB(0, bkt_size)
        if bucket_name not in bucket_list:
            # a new bucket needs to be created
            logger.debug("Creating bucket: {}".format(bucket_name))
            provision_process.bucket_create(bucket_name, bkt_size_mb, bkt_type,
                                            bkt_compression)
            helper_lib.sleepForSecond(2)
        else:
            logger.debug(
                "Bucket {} exist - no need to rename directory".format(
                    bucket_name))

    provision_process.stop_couchbase()

    for item in helper_lib.filter_bucket_name_from_output(
            bucket_list_and_size):
        logger.debug("Checking bucket: {}".format(item))
        bucket_name = item.split(',')[0]
        logger.debug("restoring folders")
        provision_process.move_bucket(bucket_name, 'restore')

    provision_process.start_couchbase()

    # getting config directory path
    directory = provision_process.get_config_directory()

    # making directory and changing permission to 755.
    provision_process.make_directory(directory)
    # This file path is being used to store the bucket information coming in snapshot
    config_file_path = provision_process.get_config_file_path()
Esempio n. 9
0
def _bucket_modify_task(provision_process, bucket_list,
                        snapshot_bucket_list_and_size):
    for bkt in bucket_list:
        bkt = bkt.strip()
        logger.debug("Modification of bucket {} started".format(bkt))
        ramquotasize = _find_bucket_size_byname(bkt,
                                                snapshot_bucket_list_and_size)
        logger.debug("Update bucket {} with ramsize {}MB".format(
            bkt, ramquotasize))
        provision_process.bucket_edit_ramquota(bkt, _ramsize=ramquotasize)
        helper_lib.sleepForSecond(2)
Esempio n. 10
0
    def start_node_bootstrap(self):
        logger.debug("start start_node_bootstrap")
        self.start_couchbase(no_wait=True)
        end_time = time.time() + 3660
        server_status = Status.INACTIVE

        #break the loop either end_time is exceeding from 1 minute or server is successfully started
        while time.time() < end_time and server_status <> Status.ACTIVE:
            helper_lib.sleepForSecond(1)  # waiting for 1 second
            server_status = self.staging_bootstrap_status()  # fetching status
            logger.debug("server status {}".format(server_status))
Esempio n. 11
0
 def xdcr_setup(self):
     logger.debug("Started XDCR set up ...")
     kwargs = {
         ENV_VAR_KEY: {
             'source_password': self.parameters.xdcr_admin_password,
             'password': self.parameters.couchbase_admin_password
         }
     }
     env = _XDCrMixin.generate_environment_map(self)
     cmd = CommandFactory.xdcr_setup(
         cluster_name=self.parameters.stg_cluster_name, **env)
     stdout, stderr, exit_code = utilities.execute_bash(
         self.connection, cmd, **kwargs)
     helper_lib.sleepForSecond(3)
Esempio n. 12
0
    def cluster_init(self):
        # Cluster initialization
        logger.debug("Cluster Initialization started")
        fts_service = self.parameters.fts_service
        #analytics_service = self.parameters.analytics_service
        eventing_service = self.parameters.eventing_service
        cluster_name = self._get_cluster_name()
        kwargs = {
            ENV_VAR_KEY: {
                'password': self.parameters.couchbase_admin_password
            }
        }
        additional_service = "query"
        if fts_service == True:
            additional_service = additional_service + ",fts"
        # if analytics_service:
        #     additional_service = additional_service + ",analytics"
        if eventing_service == True:
            additional_service = additional_service + ",eventing"

        logger.debug("additional services : {}".format(additional_service))
        lambda_expr = lambda output: bool(
            re.search(ALREADY_CLUSTER_INIT, output))
        env = _ClusterMixin.generate_environment_map(self)
        env['additional_services'] = additional_service
        cmd = CommandFactory.cluster_init(cluster_name=cluster_name, **env)
        logger.debug("Cluster init: {}".format(cmd))
        stdout, stderr, exit_code = utilities.execute_bash(
            self.connection,
            command_name=cmd,
            callback_func=lambda_expr,
            **kwargs)
        if re.search(r"ERROR", str(stdout)):
            if re.search(r"ERROR: Cluster is already initialized", stdout):
                logger.debug(
                    "Performing cluster setting as cluster is already initialized"
                )
                self.cluster_setting()
            else:
                logger.error("Cluster init failed. Throwing exception")
                raise Exception(stdout)
        else:
            logger.debug("Cluster init succeeded")

        # here we should wait for indexer to start
        sleepForSecond(10)
        return [stdout, stderr, exit_code]
Esempio n. 13
0
    def start_couchbase(self):
        """ start the couchbase service"""
        logger.debug("Starting couchbase services")
        command = CommandFactory.start_couchbase(self.repository.cb_install_path)
        utilities.execute_bash(self.connection, command)
        server_status = Status.INACTIVE

        #Waiting for one minute to start the server
        end_time = time.time() + 60

        #break the loop either end_time is exceeding from 1 minute or server is successfully started
        while time.time() < end_time and server_status == Status.INACTIVE:
            helper_lib.sleepForSecond(1) # waiting for 1 second
            server_status = self.status() # fetching status

        # if the server is not running even in 60 seconds, then stop the further execution
        if server_status == Status.INACTIVE:
            raise CouchbaseServicesError("Have failed to start couchbase server")
Esempio n. 14
0
 def stop_couchbase(self):
     """ stop the couchbase service"""
     try:
         logger.debug("Stopping couchbase services")
         command = CommandFactory.stop_couchbase(self.repository.cb_install_path)
         utilities.execute_bash(self.connection, command)
         end_time = time.time() + 60
         server_status = Status.ACTIVE
         while time.time() < end_time and server_status == Status.ACTIVE:
             helper_lib.sleepForSecond(1)  # waiting for 1 second
             server_status = self.status()  # fetching status
         if server_status == Status.ACTIVE:
             raise CouchbaseServicesError("Have failed to stop couchbase server")
     except CouchbaseServicesError as err:
         raise err
     except Exception as err:
         if self.status() == Status.INACTIVE:
             logger.debug("Seems like couchbase service is not running. {}".format(err.message))
         else:
             raise CouchbaseServicesError(err.message)
Esempio n. 15
0
    def bucket_create(self, bucket_name, ram_size, bucket_type,
                      bucket_compression):
        logger.debug("Creating bucket: {} ".format(bucket_name))
        # To create the bucket with given ram size
        self.__validate_bucket_name(bucket_name)
        if ram_size is None:
            logger.debug(
                "Needed ramsize for bucket_create. Currently it is: {}".format(
                    ram_size))
            return

        if bucket_type == 'membase':
            # API return different type
            bucket_type = 'couchbase'

        if bucket_compression is not None:
            bucket_compression = '--compression-mode {}'.format(
                bucket_compression)
        else:
            bucket_compression = ''

        policy = self.parameters.bucket_eviction_policy
        env = _BucketMixin.generate_environment_map(self)
        command = CommandFactory.bucket_create(
            bucket_name=bucket_name,
            ramsize=ram_size,
            evictionpolicy=policy,
            bucket_type=bucket_type,
            bucket_compression=bucket_compression,
            **env)
        kwargs = {
            ENV_VAR_KEY: {
                'password': self.parameters.couchbase_admin_password
            }
        }
        logger.debug("create bucket {}".format(command))
        output, error, exit_code = utilities.execute_bash(
            self.connection, command, **kwargs)
        logger.debug("create bucket output: {} {} {}".format(
            output, error, exit_code))
        helper_lib.sleepForSecond(2)
Esempio n. 16
0
def configure_cluster(couchbase_obj):
    # configure Couchbase cluster

    logger.debug("Checking cluster config")
    if couchbase_obj.check_config():
        logger.debug("cluster config found - restoring")
        couchbase_obj.stop_couchbase()
        couchbase_obj.restore_config()
        couchbase_obj.start_couchbase()
    else:
        logger.debug("cluster config not found - preparing node")
        # no config in delphix directory
        # initial cluster setup
        couchbase_obj.stop_couchbase()
        # we can't use normal monitor as server is not configured yet 
        couchbase_obj.start_couchbase(no_wait=True)

        end_time = time.time() + 3660

        server_status = Status.INACTIVE

        #break the loop either end_time is exceeding from 1 hour or server is successfully started
        while time.time() < end_time and server_status<>Status.ACTIVE:
            helper_lib.sleepForSecond(1) # waiting for 1 second
            server_status = couchbase_obj.staging_bootstrap_status() # fetching status
            logger.debug("server status {}".format(server_status))

        # check if cluster not configured and raise an issue
        if couchbase_obj.check_cluster_notconfigured():
            logger.debug("Node not configured - creating a new cluster")
            couchbase_obj.node_init()
            couchbase_obj.cluster_init()
            logger.debug("Cluster configured")
        else:
            logger.debug("Node configured but no configuration in Delphix - ???????")
            if couchbase_obj.check_cluster_configured():
                logger.debug("Configured with staging user/password and alive so not a problem - continue")
            else:
                logger.debug("Cluster configured but not with user/password given in Delphix potentially another cluster")
                raise UserError("Cluster configured but not with user/password given in Delphix potentially another cluster")
Esempio n. 17
0
 def bucket_create(self, bucket_name, ram_size=0):
     logger.debug("Creating bucket: {} ".format(bucket_name))
     # To create the bucket with given ram size
     self.__validate_bucket_name(bucket_name)
     if ram_size is None:
         logger.debug(
             "Needed ramsize for bucket_create. Currently it is: {}".format(
                 ram_size))
         return
     policy = self.parameters.bucket_eviction_policy
     env = _BucketMixin.generate_environment_map(self)
     command = CommandFactory.bucket_create(bucket_name=bucket_name,
                                            ramsize=ram_size,
                                            evictionpolicy=policy,
                                            **env)
     kwargs = {
         ENV_VAR_KEY: {
             'password': self.parameters.couchbase_admin_password
         }
     }
     output, error, exit_code = utilities.execute_bash(
         self.connection, command, **kwargs)
     helper_lib.sleepForSecond(2)
Esempio n. 18
0
 def xdcr_replicate(self, src, tgt):
     try:
         logger.debug("Started XDCR replication for bucket {}".format(src))
         kwargs = {
             ENV_VAR_KEY: {
                 'source_password': self.parameters.xdcr_admin_password
             }
         }
         env = _XDCrMixin.generate_environment_map(self)
         cmd = CommandFactory.xdcr_replicate(
             source_bucket_name=src,
             target_bucket_name=tgt,
             cluster_name=self.parameters.stg_cluster_name,
             **env)
         stdout, stderr, exit_code = utilities.execute_bash(
             self.connection, cmd, **kwargs)
         if exit_code != 0:
             logger.debug("XDCR replication create failed")
             raise Exception(stdout)
         logger.debug("{} : XDCR replication create succeeded".format(tgt))
         helper_lib.sleepForSecond(2)
     except Exception as e:
         logger.debug("XDCR error {}".format(e.message))
Esempio n. 19
0
def resync_cbbkpmgr(staged_source, repository, source_config,
                    input_parameters):
    dsource_type = input_parameters.d_source_type
    dsource_name = source_config.pretty_name
    couchbase_host = input_parameters.couchbase_host
    bucket_size = staged_source.parameters.bucket_size
    rx_connection = staged_source.staged_connection
    resync_process = CouchbaseOperation(
        Resource.ObjectBuilder.set_staged_source(staged_source).set_repository(
            repository).set_source_config(source_config).build())

    linking.check_for_concurrent(resync_process, dsource_type, dsource_name,
                                 couchbase_host)

    # validate if this works as well for backup
    linking.configure_cluster(resync_process)

    logger.debug("Finding source and staging bucket list")
    bucket_details_source = resync_process.source_bucket_list_offline()
    bucket_details_staged = helper_lib.filter_bucket_name_from_output(
        resync_process.bucket_list())

    buckets_toprocess = linking.buckets_precreation(resync_process,
                                                    bucket_details_source,
                                                    bucket_details_staged)

    csv_bucket_list = ",".join(buckets_toprocess)
    logger.debug("Started CB backup manager")
    helper_lib.sleepForSecond(30)
    resync_process.cb_backup_full(csv_bucket_list)
    helper_lib.sleepForSecond(30)

    linking.build_indexes(resync_process)
    logger.info("Stopping Couchbase")
    resync_process.stop_couchbase()
    resync_process.save_config('parent')
Esempio n. 20
0
def vdb_reconfigure(virtual_source, repository, source_config, snapshot):
    # delete all buckets
    # calll configure

    logger.debug("In vdb_reconfigure...")
    provision_process = CouchbaseOperation(
        Resource.ObjectBuilder.set_virtual_source(virtual_source).
        set_repository(repository).set_source_config(source_config).build())

    provision_process.stop_couchbase()

    if provision_process.parameters.node_list is not None and len(
            provision_process.parameters.node_list) > 0:
        multinode = True
        server_count = len(provision_process.parameters.node_list) + 1
    else:
        multinode = False

    nodeno = 1
    provision_process.restore_config(what='current', nodeno=nodeno)
    provision_process.start_couchbase(no_wait=multinode)

    if provision_process.parameters.node_list is not None and len(
            provision_process.parameters.node_list) > 0:
        for node in provision_process.parameters.node_list:
            nodeno = nodeno + 1
            logger.debug("+++++++++++++++++++++++++++")
            logger.debug(node)
            logger.debug(nodeno)
            logger.debug("+++++++++++++++++++++++++++")
            addnode = CouchbaseOperation(
                Resource.ObjectBuilder.set_virtual_source(
                    virtual_source).set_repository(
                        repository).set_source_config(source_config).build(),
                make_nonprimary_connection(provision_process.connection,
                                           node['environment'],
                                           node['environmentUser']))
            addnode.stop_couchbase()
            addnode.restore_config(what='current', nodeno=nodeno)
            addnode.start_couchbase(no_wait=multinode)

    logger.debug("reconfigure for multinode: {}".format(multinode))

    if multinode == True:

        active_servers = {}
        logger.debug("wait for nodes")
        logger.debug("server count: {} active servers: {}".format(
            server_count, sum(active_servers.values())))

        end_time = time.time() + 3660

        #break the loop either end_time is exceeding from 1 minute or server is successfully started
        while time.time() < end_time and sum(
                active_servers.values()) <> server_count:
            logger.debug("server count 2: {} active servers: {}".format(
                server_count, sum(active_servers.values())))
            nodeno = 1
            helper_lib.sleepForSecond(1)  # waiting for 1 second
            server_status = provision_process.status()  # fetching status
            logger.debug("server status {}".format(server_status))
            if server_status == Status.ACTIVE:
                active_servers[nodeno] = 1

            for node in provision_process.parameters.node_list:
                nodeno = nodeno + 1
                logger.debug("+++++++++++++++++++++++++++")
                logger.debug(node)
                logger.debug(nodeno)
                logger.debug("+++++++++++++++++++++++++++")
                addnode = CouchbaseOperation(
                    Resource.ObjectBuilder.set_virtual_source(virtual_source).
                    set_repository(repository).set_source_config(
                        source_config).build(),
                    make_nonprimary_connection(provision_process.connection,
                                               node['environment'],
                                               node['environmentUser']))
                server_status = addnode.status()  # fetching status
                logger.debug("server status {}".format(server_status))
                if server_status == Status.ACTIVE:
                    active_servers[nodeno] = 1

    return _source_config(virtual_source, repository, source_config, snapshot)
Esempio n. 21
0
    def addnode(self, nodeno, node_def):
        logger.debug("start addnode")

        self.delete_config()

        self.start_node_bootstrap()

        self.node_init(nodeno)

        helper_lib.sleepForSecond(10)

        services = ['data', 'index', 'query']

        if "fts_service" in node_def and node_def["fts_service"] == True:
            services.append('fts')

        if "eventing_service" in node_def and node_def[
                "eventing_service"] == True:
            services.append('eventing')

        if "analytics_service" in node_def and node_def[
                "analytics_service"] == True:
            services.append('analytics')

        logger.debug("services to add: {}".format(services))

        # hostip_command = CommandFactory.get_ip_of_hostname()
        # logger.debug("host ip command: {}".format(hostip_command))
        # host_ip_output, std_err, exit_code = utilities.execute_bash(self.connection, hostip_command)
        # logger.debug("host ip Output {} ".format(host_ip_output))

        logger.debug("node host name / IP: {}".format(node_def["node_addr"]))

        resolve_name_command = CommandFactory.resolve_name(
            hostname=node_def["node_addr"])
        logger.debug(
            "resolve_name_command command: {}".format(resolve_name_command))
        resolve_name_output, std_err, exit_code = utilities.execute_bash(
            self.connection, resolve_name_command)
        logger.debug(
            "resolve_name_command Output {} ".format(resolve_name_output))

        command_output, std_err, exit_code = self.run_couchbase_command(
            couchbase_command='server_add',
            hostname=self.connection.environment.host.name,
            newhost=resolve_name_output,
            services=','.join(services))

        logger.debug("Add node Output {} stderr: {} exit_code: {} ".format(
            command_output, std_err, exit_code))

        if exit_code != 0:
            logger.debug("Adding node error")
            raise UserError(
                "Problem with adding node",
                "Check an output and fix problem before retrying to provision a VDB",
                "stdout: {} stderr:{}".format(command_output, std_err))

        command_output, std_err, exit_code = self.run_couchbase_command(
            couchbase_command='rebalance',
            hostname=self.connection.environment.host.name)

        logger.debug("Rebalance Output {} stderr: {} exit_code: {} ".format(
            command_output, std_err, exit_code))

        if exit_code != 0:
            logger.debug("Rebalancing error")
            raise UserError(
                "Problem with rebalancing cluster",
                "Check an output and fix problem before retrying to provision a VDB",
                "stdout: {} stderr:{}".format(command_output, std_err))