def move_bucket(self, bucket_name, direction): logger.debug("Rename folder") if direction == 'save': src = join(self.virtual_source.parameters.mount_path, 'data', bucket_name) dst = join(self.virtual_source.parameters.mount_path, 'data', ".{}.delphix".format(bucket_name)) command = CommandFactory.os_mv(src, dst, self.need_sudo, self.uid) logger.debug("rename command: {}".format(command)) stdout, error, exit_code = utilities.execute_bash( self.connection, command) elif direction == 'restore': dst = join(self.virtual_source.parameters.mount_path, 'data', bucket_name) src = join(self.virtual_source.parameters.mount_path, 'data', ".{}.delphix".format(bucket_name)) command = CommandFactory.delete_dir(dst, self.need_sudo, self.uid) logger.debug("delete command: {}".format(command)) stdout, error, exit_code = utilities.execute_bash( self.connection, command) command = CommandFactory.os_mv(src, dst, self.need_sudo, self.uid) logger.debug("rename command: {}".format(command)) stdout, error, exit_code = utilities.execute_bash( self.connection, command)
def monitor_bucket(self, bucket_name, staging_UUID): # To monitor the replication logger.debug( "Monitoring the replication for bucket {} ".format(bucket_name)) kwargs = { ENV_VAR_KEY: { 'password': self.staged_source.parameters.xdcr_admin_password } } command = CommandFactory.monitor_replication( source_username=self.staged_source.parameters.xdcr_admin, source_hostname=self.source_config.couchbase_src_host, source_port=self.source_config.couchbase_src_port, bucket_name=bucket_name, uuid=staging_UUID) stdout, stderr, exit_code = utilities.execute_bash( self.connection, command, **kwargs) logger.debug("stdout: {}".format(stdout)) content = json.loads(stdout) pending_docs = self._get_last_value_of_node_stats( content["nodeStats"].values()[0]) while pending_docs != 0: logger.debug( "Documents pending for replication: {}".format(pending_docs)) helper_lib.sleepForSecond(30) stdout, stderr, exit_code = utilities.execute_bash( self.connection, command, **kwargs) content = json.loads(stdout) pending_docs = self._get_last_value_of_node_stats( content["nodeStats"].values()[0]) else: logger.debug( "Replication for bucket {} completed".format(bucket_name))
def unmount_file_system(rx_connection, path): """ unmount the file system which will use in cbbackup manager after post snapshot""" try: utilities.execute_bash(rx_connection, CommandFactory.unmount_file_system(path)) except Exception as err: logger.debug("error here {}".format(err.message)) raise UnmountFileSystemError(err.message)
def write_file(connection, content, filename): """Add given data into passed filename""" logger.debug("writing data {} in file {}".format(content, filename)) try: utilities.execute_bash( connection, CommandFactory.write_file(data=content, filename=filename)) except Exception as e: logger.debug("Failed to Write into file") raise FileIOError("Failed to Write into file ")
def make_directory(self, directory_path): """ Create a directory and set the permission level 775 :param directory_path: The directory path :return: None """ logger.debug("Creating Directory {} ".format(directory_path)) env = {'directory_path': directory_path} command = CommandFactory.make_directory(directory_path) utilities.execute_bash(self.connection, command) logger.debug("Changing permission of directory path {}".format(directory_path)) command = CommandFactory.change_permission(directory_path) utilities.execute_bash(self.connection, command) logger.debug("Changed the permission of directory")
def cb_backup_full(self, csv_bucket): logger.debug("Starting Restore via Backup file...") logger.debug("csv_bucket_list: {}".format(csv_bucket)) kwargs = { ENV_VAR_KEY: { 'password': self.parameters.couchbase_admin_password } } env = _CBBackupMixin.generate_environment_map(self) cmd = CommandFactory.cb_backup_full( backup_location=self.parameters.couchbase_bak_loc, csv_bucket_list=csv_bucket, backup_repo=self.parameters.couchbase_bak_repo, **env) utilities.execute_bash(self.connection, cmd, **kwargs)
def delete_config(self): # TODO: # error handling logger.debug("start delete_config") filename = "{}/../var/lib/couchbase/config/config.dat".format( helper_lib.get_base_directory_of_given_path( self.repository.cb_shell_path)) cmd = CommandFactory.check_file(filename, self.need_sudo, self.uid) logger.debug("check file cmd: {}".format(cmd)) command_output, std_err, exit_code = utilities.execute_bash( self.connection, command_name=cmd, callback_func=self.ignore_err) if exit_code == 0 and "Found" in command_output: cmd = CommandFactory.os_mv(filename, "{}.bak".format(filename), self.need_sudo, self.uid) logger.debug("rename config cmd: {}".format(cmd)) command_output, std_err, exit_code = utilities.execute_bash( self.connection, command_name=cmd) logger.debug( "rename config.dat to bak - exit_code: {} stdout: {} std_err: {}" .format(exit_code, command_output, std_err)) filename = "{}/../etc/couchdb/local.ini".format( helper_lib.get_base_directory_of_given_path( self.repository.cb_shell_path)) command_output, std_err, exit_code = self.run_os_command( os_command='sed', filename=filename, regex='s/view_index_dir.*//') logger.debug( "clean local.ini index - exit_code: {} stdout: {} std_err: {}". format(exit_code, command_output, std_err)) command_output, std_err, exit_code = self.run_os_command( os_command='sed', filename=filename, regex='s/database_dir.*//') logger.debug( "clean local.ini data - exit_code: {} stdout: {} std_err: {}". format(exit_code, command_output, std_err)) command_output, std_err, exit_code = self.run_os_command( os_command='change_permission', path=filename) logger.debug( "fix local.ini permission - exit_code: {} stdout: {} std_err: {}". format(exit_code, command_output, std_err))
def check_server_is_used(connection, path): ret = Status.INACTIVE output, stderr, exit_code = utilities.execute_bash(connection, CommandFactory.mount()) if exit_code != 0: logger.error("mount retured error") logger.error("stdout: {} stderr: {} exit_code: {}".format( output.encode('utf-8'), stderr.encode('utf-8'), exit_code)) raise UserError("Problem with reading mounted file systems", "Ask OS admin to check mount", stderr) else: # parse a mount output to find another Delphix mount points fs_re = re.compile(r'(\S*)\son\s(\S*)\stype\s(\S*)') for i in output.split("\n"): match = re.search(fs_re, i) if match is not None: groups = match.groups() if groups[2] == 'nfs': if path == groups[1]: # this is our mount point - skip it ret = Status.ACTIVE continue if "domain0" in groups[0] and "timeflow" in groups[0]: # this is a delphix mount point but it's not ours # raise an exception raise UserError( "Another database (VDB or staging) is using this server.", "Disable another one to provision or enable this one", "{} {}".format(groups[0], groups[1])) return ret
def delete_replication(self): logger.debug("Deleting replication...") stream_id, cluster_name = self.get_stream_id() logger.debug("stream_id: {} and cluster_name : {}".format( stream_id, cluster_name)) if stream_id is None or stream_id == "": logger.debug("No Replication is found to delete.") return False, cluster_name kwargs = { ENV_VAR_KEY: { 'source_password': self.parameters.xdcr_admin_password } } env = _ReplicationMixin.generate_environment_map(self) for id in stream_id: cmd = CommandFactory.delete_replication(cluster_name=cluster_name, id=id, **env) stdout, stderr, exit_code = utilities.execute_bash( self.connection, cmd, **kwargs) if exit_code != 0: logger.warn("stream_id: {} deletion failed".format(id)) else: logger.debug("stream_id: {} deletion succeeded".format(id)) return True, cluster_name
def find_version(source_connection, install_path): """ return the couchbase version installed on the host""" cb_version, std_err, exit_code = utilities.execute_bash( source_connection, CommandFactory.get_version(install_path)) version = re.search(r"\d.*$", cb_version).group() logger.debug("Couchbase version installed {}".format(version)) return version
def get_replication_uuid(self): # False for string logger.debug("Finding the replication uuid through host name") is_ip_or_string = False kwargs = { ENV_VAR_KEY: { 'source_password': self.parameters.xdcr_admin_password } } cluster_name = self.parameters.stg_cluster_name env = _ReplicationMixin.generate_environment_map(self) cmd = CommandFactory.get_replication_uuid(**env) try: stdout, stderr, exit_code = utilities.execute_bash( self.connection, cmd, **kwargs) if stdout is None or stdout == "": logger.debug("No Replication ID identified") return None, None logger.debug("xdcr remote references : {}".format(stdout)) hostname = self.connection.environment.host.name logger.debug("Environment hostname {}".format(hostname)) host_ip = "" if not re.search(r"{}".format(hostname), stdout): logger.debug( "cluster for hostname {} doesn't exist".format(hostname)) logger.debug("Finding the ip for this host") host_ip = self.get_ip() logger.debug("Finding the replication uuid through host ip") if not re.search(r"{}".format(host_ip), stdout): logger.debug("cluster for host_ip {} doesn't exist".format( hostname)) return None, None else: is_ip_or_string = True logger.debug( "cluster for host_ip {} exist".format(host_ip)) else: logger.debug("cluster for hostname {} exist".format(host_ip)) if is_ip_or_string == False: uuid = re.search(r"uuid:.*(?=\s.*{})".format(hostname), stdout).group() else: uuid = re.search(r"uuid:.*(?=\s.*{})".format(host_ip), stdout).group() uuid = uuid.split(":")[1].strip() cluster_name_staging = re.search( r"cluster name:.*(?=\s.*{})".format(uuid), stdout).group() cluster_name_staging = cluster_name_staging.split(":")[1].strip() logger.debug("uuid for {} cluster : {}".format( uuid, cluster_name_staging)) if cluster_name_staging == cluster_name: return uuid, cluster_name else: return uuid, cluster_name_staging except Exception as err: logger.warn("Error identified: {} ".format(err.message)) logger.warn("UUID is None. Not able to find any cluster") return None, None
def check_duplicate_replication(self, cluster_name): logger.debug("Searching cluster name") kwargs = {ENV_VAR_KEY: {'source_password': self.parameters.xdcr_admin_password}} env = _ReplicationMixin.generate_environment_map(self) cmd = CommandFactory.get_replication_uuid(**env) try: stdout, stderr, exit_code = utilities.execute_bash(self.connection, cmd, **kwargs) all_clusters = re.findall(r'cluster name:.*', stdout) stream_id, cluster = self.get_stream_id() logger.debug("stream_id:{} and cluster s:{} ".format(stream_id, cluster)) if stream_id: # cluster is already set up between these nodes# No setup and no mis match logger.debug("Already XDCR set up have been between source and staging server") return True, False logger.debug("No XDCR for staging host. Now validating the cluster name... ") for each_cluster_pair in all_clusters: each_cluster = each_cluster_pair.split(':')[1].strip() logger.debug("Listed cluster: {} and input is:{} ".format(each_cluster, cluster_name)) if each_cluster == cluster_name: logger.debug("Duplicate cluster name issue identified ") # no setup but mismatch return False, True return False, False except Exception as err: logger.debug("Failed to verify the duplicate name: {} ".format(err.message))
def bucket_list(self, return_type=list): # See the all bucket. # It will return also other information like ramused, ramsize etc logger.debug("Finding staged bucket list") env = _BucketMixin.generate_environment_map(self) command = CommandFactory.bucket_list(**env) kwargs = { ENV_VAR_KEY: { 'password': self.parameters.couchbase_admin_password } } logger.debug("list bucket {}".format(command)) bucket_list, error, exit_code = utilities.execute_bash( self.connection, command, **kwargs) logger.debug("list bucket output{}".format(bucket_list)) if return_type == list: #bucket_list = bucket_list.split("\n") if bucket_list == "[]" or bucket_list is None: logger.debug("empty list") return [] else: logger.debug("clean up json") bucket_list = bucket_list.replace("u'", "'") bucket_list = bucket_list.replace("'", "\"") bucket_list = bucket_list.replace("True", "\"True\"") bucket_list = bucket_list.replace("False", "\"False\"") logger.debug("parse json") bucket_list_dict = json.loads(bucket_list) logger.debug("remap json") bucket_list_dict = map(helper_lib.remap_bucket_json, bucket_list_dict) logger.debug( "Bucket details in staged environment: {}".format(bucket_list)) return bucket_list_dict
def build_index(self, index_name): logger.debug("Building indexes....") env = _CBBackupMixin.generate_environment_map(self) cmd = CommandFactory.build_index(index_name=index_name, **env) command_output, std_err, exit_code = utilities.execute_bash( self.connection, command_name=cmd, **env) logger.debug("command_output is ".format(command_output)) return command_output
def start_couchbase(self): """ start the couchbase service""" logger.debug("Starting couchbase services") command = CommandFactory.start_couchbase(self.repository.cb_install_path) utilities.execute_bash(self.connection, command) server_status = Status.INACTIVE #Waiting for one minute to start the server end_time = time.time() + 60 #break the loop either end_time is exceeding from 1 minute or server is successfully started while time.time() < end_time and server_status == Status.INACTIVE: helper_lib.sleepForSecond(1) # waiting for 1 second server_status = self.status() # fetching status # if the server is not running even in 60 seconds, then stop the further execution if server_status == Status.INACTIVE: raise CouchbaseServicesError("Have failed to start couchbase server")
def run_os_command(self, os_command, **kwargs): method_to_call = getattr(CommandFactory, os_command) command = method_to_call(sudo=self.need_sudo, uid=self.uid, **kwargs) logger.debug("os command to run: {}".format(command)) stdout, stderr, exit_code = utilities.execute_bash( self.connection, command) return [stdout.encode('utf-8'), stderr.encode('utf-8'), exit_code]
def pause_replication(self): logger.debug("Pausing replication ...") stream_id, cluster_name = self.get_stream_id() kwargs = {ENV_VAR_KEY: {'source_password': self.parameters.xdcr_admin_password}} env = _ReplicationMixin.generate_environment_map(self) cmd = CommandFactory.pause_replication(cluster_name=cluster_name, **env) for replication_id in stream_id: kwargs["id"] = replication_id stdout, stderr, exit_code = utilities.execute_bash(self.connection, cmd, **kwargs) logger.debug(stdout)
def clean_stale_mountpoint(connection, path): umount_std, umount_stderr, umount_exit_code = utilities.execute_bash( connection, CommandFactory.unmount_file_system(mount_path=path, options='-lf')) if umount_exit_code != 0: logger.error("Problem with cleaning mount path") logger.error("stderr {}".format(umount_stderr)) raise UserError("Problem with cleaning mount path", "Ask OS admin to check mount points", umount_stderr)
def node_init(self): """ This method initializes couchbase server node. Where user sets different required paths :return: None """ logger.debug("Initializing the NODE") kwargs = {ENV_VAR_KEY: {'password': self.parameters.couchbase_admin_password}} command = CommandFactory.node_init(self.repository.cb_shell_path, self.parameters.couchbase_port, self.parameters.couchbase_admin, self.parameters.mount_path) command_output, std_err, exit_code = utilities.execute_bash(self.connection, command, **kwargs) logger.debug("Command Output {} ".format(command_output))
def check_file_present(connection, config_file_path): """ return True if file is present else return False""" try: stdout, stderr, exit_code = utilities.execute_bash( connection, CommandFactory.check_file(config_file_path)) if stdout == "Found": logger.debug("file path exist {}".format(config_file_path)) return True except Exception as e: logger.debug("File path not exist {}".format(config_file_path)) return False
def check_dir_present(connection, dir): """ return True if directory is present else return False""" try: stdout, stderr, exit_code = utilities.execute_bash( connection, CommandFactory.check_directory(dir)) if stdout == "Found": logger.debug("dir path found {} ".format(dir)) return True except Exception as err: logger.debug("directory path is absent: {}".format(err.message)) return False
def stop_couchbase(self): """ stop the couchbase service""" try: logger.debug("Stopping couchbase services") command = CommandFactory.stop_couchbase(self.repository.cb_install_path) utilities.execute_bash(self.connection, command) end_time = time.time() + 60 server_status = Status.ACTIVE while time.time() < end_time and server_status == Status.ACTIVE: helper_lib.sleepForSecond(1) # waiting for 1 second server_status = self.status() # fetching status if server_status == Status.ACTIVE: raise CouchbaseServicesError("Have failed to stop couchbase server") except CouchbaseServicesError as err: raise err except Exception as err: if self.status() == Status.INACTIVE: logger.debug("Seems like couchbase service is not running. {}".format(err.message)) else: raise CouchbaseServicesError(err.message)
def bucket_flush(self, bucket_name): # It requires the before bucket delete logger.debug("Flushing bucket: {} ".format(bucket_name)) self.__validate_bucket_name(bucket_name) env = _BucketMixin.generate_environment_map(self) command = CommandFactory.bucket_flush(bucket_name=bucket_name, **env) kwargs = { ENV_VAR_KEY: { 'password': self.parameters.couchbase_admin_password } } return utilities.execute_bash(self.connection, command, **kwargs)
def check_config(self): filename = os.path.join(self.get_config_directory(), "config.dat") cmd = CommandFactory.check_file(filename) logger.debug("check file cmd: {}".format(cmd)) command_output, std_err, exit_code = utilities.execute_bash( self.connection, command_name=cmd, callback_func=self.ignore_err) if exit_code == 0 and "Found" in command_output: return True else: return False
def find_shell_path(source_connection, binary_path): """ :param source_connection:Connection for the source environment :param binary_path: Couchbase binary path :return:path of cluster management utility: {couchbase-cli} """ logger.debug("Finding Shell Path...") shell_path, std_err, exit_code = utilities.execute_bash( source_connection, CommandFactory.find_shell_path(binary_path)) if shell_path == "": message = "Shell path {}/couchbase-cli not found".format(binary_path) raise RepositoryDiscoveryError(message) return shell_path
def bucket_delete(self, bucket_name): # To delete the bucket logger.debug("Deleting bucket: {} ".format(bucket_name)) self.__validate_bucket_name(bucket_name) env = _BucketMixin.generate_environment_map(self) command = CommandFactory.bucket_delete(bucket_name=bucket_name, **env) kwargs = { ENV_VAR_KEY: { 'password': self.parameters.couchbase_admin_password } } logger.debug("delete bucket {}".format(command)) return utilities.execute_bash(self.connection, command, **kwargs)
def get_indexes_name(self, index_name): logger.debug("Finding indexes....") env = { ENV_VAR_KEY: { 'password': self.parameters.couchbase_admin_password } } env = _CBBackupMixin.generate_environment_map(self) cmd = CommandFactory.get_indexes_name(**env) logger.debug("env detail is : ".format(env)) command_output, std_err, exit_code = utilities.execute_bash( self.connection, command_name=cmd, **env) logger.debug("Indexes are {}".format(command_output)) return command_output
def find_whoami(source_connection): """ return the user env id""" std_out, std_err, exit_code = utilities.execute_bash( source_connection, CommandFactory.whoami()) logger.debug("find whoami output: {}".format(std_out)) ids = re.search(r"uid=([\d]+).*gid=([\d]+)", std_out) if ids: uid = int(ids.group(1)) gid = int(ids.group(2)) else: uid = -1 gid = -1 logger.debug("Delphix user uid {} gid {}".format(uid, gid)) return (uid, gid)
def find_ids(source_connection, install_path): """ return the couchbase uid and gid""" std_out, std_err, exit_code = utilities.execute_bash( source_connection, CommandFactory.get_ids(install_path)) logger.debug("find ids output: {}".format(std_out)) ids = re.search(r"[-rwx.]+\s\d\s([\d]+)\s([\d]+).*", std_out) if ids: uid = int(ids.group(1)) gid = int(ids.group(2)) else: uid = -1 gid = -1 logger.debug("Couchbase user uid {} gid {}".format(uid, gid)) return (uid, gid)
def xdcr_setup(self): logger.debug("Started XDCR set up ...") kwargs = { ENV_VAR_KEY: { 'source_password': self.parameters.xdcr_admin_password, 'password': self.parameters.couchbase_admin_password } } env = _XDCrMixin.generate_environment_map(self) cmd = CommandFactory.xdcr_setup( cluster_name=self.parameters.stg_cluster_name, **env) stdout, stderr, exit_code = utilities.execute_bash( self.connection, cmd, **kwargs) helper_lib.sleepForSecond(3)