Esempio n. 1
0
def vdb_status(virtual_source, repository, source_config):
    provision_process = CouchbaseOperation(
        Resource.ObjectBuilder.set_virtual_source(virtual_source).
        set_repository(repository).set_source_config(source_config).build())
    cb_status = provision_process.status()
    logger.debug("VDB Status is {}".format(cb_status))
    return cb_status
Esempio n. 2
0
def post_snapshot_xdcr(staged_source, repository, source_config, dsource_type):
    logger.info("In Post snapshot...")
    post_snapshot_process = CouchbaseOperation(
        Resource.ObjectBuilder.set_staged_source(staged_source).set_repository(
            repository).set_source_config(source_config).build())

    post_snapshot_process.start_couchbase()
    snapshot = SnapshotDefinition(validate=False)
    bucket_details = post_snapshot_process.bucket_list()

    if len(staged_source.parameters.config_settings_prov) != 0:
        bucket_list = []
        for config_setting in staged_source.parameters.config_settings_prov:
            bucket_list.append(
                helper_lib.get_bucket_name_with_size(
                    bucket_details, config_setting["bucketName"]))
    else:
        bucket_list = helper_lib.get_stg_all_bucket_list_with_ramquota_size(
            bucket_details)

    snapshot.db_path = staged_source.parameters.mount_path
    snapshot.couchbase_port = source_config.couchbase_src_port
    snapshot.couchbase_host = source_config.couchbase_src_host
    snapshot.bucket_list = ":".join(bucket_list)
    snapshot.time_stamp = helper_lib.current_time()
    snapshot.snapshot_id = str(helper_lib.get_snapshot_id())
    logger.debug("snapshot schema: {}".format(snapshot))
    logger.debug("Deleting the snap sync lock file {}".format(
        config.SNAP_SYNC_FILE_NAME))
    helper_lib.delete_file(staged_source.staged_connection,
                           config.SNAP_SYNC_FILE_NAME)
    return snapshot
Esempio n. 3
0
def d_source_status_xdcr(staged_source, repository, source_config):
    status_obj = CouchbaseOperation(
        Resource.ObjectBuilder.set_staged_source(staged_source).set_repository(
            repository).set_source_config(source_config).build())
    logger.debug("Checking status for D_SOURCE: {}".format(
        source_config.pretty_name))
    return status_obj.status()
Esempio n. 4
0
def post_snapshot(virtual_source, repository, source_config):
    try:
        logger.debug("Taking Post Snapshot...")
        provision_process = CouchbaseOperation(
            Resource.ObjectBuilder.set_virtual_source(
                virtual_source).set_repository(repository).set_source_config(
                    source_config).build())
        config_file = provision_process.get_config_file_path()

        stdout, stderr, exit_code = helper_lib.read_file(
            virtual_source.connection, config_file)
        bucket_list = re.sub('BUCKET_LIST=', '', stdout)
        logger.debug("BUCKET_LIST={}".format(bucket_list))
        db_path = virtual_source.parameters.mount_path
        time_stamp = helper_lib.current_time()
        couchbase_port = virtual_source.parameters.couchbase_port
        couchbase_host = virtual_source.connection.environment.host.name
        snapshot_id = str(helper_lib.get_snapshot_id())
        snapshot = SnapshotDefinition(db_path=db_path,
                                      couchbase_port=couchbase_port,
                                      couchbase_host=couchbase_host,
                                      bucket_list=bucket_list,
                                      time_stamp=time_stamp,
                                      snapshot_id=snapshot_id)
        logger.info("snapshot schema: {}".format(snapshot))
        return snapshot
    except Exception as err:
        logger.debug("Snap shot is failed with error {}".format(err.message))
        raise
Esempio n. 5
0
def vdb_start(virtual_source, repository, source_config):
    provision_process = CouchbaseOperation(
        Resource.ObjectBuilder.set_virtual_source(virtual_source).
        set_repository(repository).set_source_config(source_config).build())
    logger.debug("Starting couchbase server")
    try:
        provision_process.start_couchbase()
    except Exception:
        raise CouchbaseServicesError(
            " Start").to_user_error(), None, sys.exc_info()[2]
Esempio n. 6
0
def d_source_status_cbbkpmgr(staged_source, repository, source_config):
    # if helper_lib.check_dir_present(staged_source.staged_connection, staged_source.parameters.couchbase_bak_loc):
    #     return Status.ACTIVE
    # return Status.INACTIVE
    status_obj = CouchbaseOperation(
        Resource.ObjectBuilder.set_staged_source(staged_source).set_repository(
            repository).set_source_config(source_config).build())
    logger.debug("Checking status for D_SOURCE: {}".format(
        source_config.pretty_name))
    return status_obj.status()
Esempio n. 7
0
def unmount_file_system_in_error_case(staged_source, repository, source_config):
    try:
        logger.debug("Un-mounting file system as last operation was not successful")
        obj = CouchbaseOperation(
            Resource.ObjectBuilder.set_staged_source(staged_source).set_repository(repository).set_source_config(
                source_config).build())
        obj.stop_couchbase()
        helper_lib.unmount_file_system(staged_source.staged_connection, staged_source.parameters.mount_path)
        logger.debug("Un mounting completed")
    except Exception as err:
        logger.debug("Un-mounting failed, reason: "+err.message)
Esempio n. 8
0
def vdb_pre_snapshot(virtual_source, repository, source_config):
    logger.debug("In Pre snapshot...")
    provision_process = CouchbaseOperation(
        Resource.ObjectBuilder.set_virtual_source(virtual_source).
        set_repository(repository).set_source_config(source_config).build())

    nodeno = 1

    provision_process.save_config(what='current', nodeno=nodeno)

    if provision_process.parameters.node_list is not None and len(
            provision_process.parameters.node_list) > 0:
        for node in provision_process.parameters.node_list:
            nodeno = nodeno + 1
            logger.debug("+++++++++++++++++++++++++++")
            logger.debug(node)
            logger.debug(nodeno)
            logger.debug("+++++++++++++++++++++++++++")
            addnode = CouchbaseOperation(
                Resource.ObjectBuilder.set_virtual_source(
                    virtual_source).set_repository(
                        repository).set_source_config(source_config).build(),
                make_nonprimary_connection(provision_process.connection,
                                           node['environment'],
                                           node['environmentUser']))
            addnode.save_config(what='current', nodeno=nodeno)
Esempio n. 9
0
def stop_staging_cbbkpmgr(staged_source, repository, source_config):
    stop_staging = CouchbaseOperation(
        Resource.ObjectBuilder.set_staged_source(staged_source).set_repository(
            repository).set_source_config(source_config).build())
    stop_staging.stop_couchbase()
    stop_staging.save_config(what='current')
    stop_staging.delete_config()
Esempio n. 10
0
def check_mount_path(staged_source, repository):
    mount_path_check = CouchbaseOperation(
        Resource.ObjectBuilder.set_staged_source(staged_source).set_repository(
            repository).build())
    snapsync_filename = mount_path_check.create_config_dir(
    ) + "/" + db_commands.constants.LOCK_SNAPSYNC_OPERATION
    sync_filename = mount_path_check.create_config_dir(
    ) + "/" + db_commands.constants.LOCK_SYNC_OPERATION
    if helper_lib.check_file_present(staged_source.staged_connection,
                                     snapsync_filename):
        raise MountPathError("Another Snap-Sync process is in progress "
                             ).to_user_error(), None, sys.exc_info()[2]
    if helper_lib.check_file_present(staged_source.staged_connection,
                                     sync_filename):
        raise MountPathError("Another Sync process is in progress "
                             ).to_user_error(), None, sys.exc_info()[2]
    return True
Esempio n. 11
0
def vdb_status(virtual_source, repository, source_config):
    provision_process = CouchbaseOperation(
        Resource.ObjectBuilder.set_virtual_source(virtual_source).
        set_repository(repository).set_source_config(source_config).build())
    cb_status = provision_process.status()
    logger.debug("VDB Status is {}".format(cb_status))

    if cb_status == Status.ACTIVE:
        logger.debug("Checking mount point")
        if helper_lib.check_stale_mountpoint(
                provision_process.connection,
                virtual_source.parameters.mount_path):
            logger.debug("error with mount point - report inactive")
            return Status.INACTIVE
        else:
            return Status.ACTIVE

    return cb_status
Esempio n. 12
0
def virtual_mount_specification(virtual_source, repository):
    mount_path = virtual_source.parameters.mount_path

    if check_stale_mountpoint(virtual_source.connection, mount_path):
        cleanup_process = CouchbaseOperation(
            Resource.ObjectBuilder.set_virtual_source(
                virtual_source).set_repository(repository).build())
        cleanup_process.stop_couchbase()
        clean_stale_mountpoint(virtual_source.connection, mount_path)

    check_server_is_used(virtual_source.connection, mount_path)

    mounts = [Mount(virtual_source.connection.environment, mount_path)]
    logger.debug("Mounting path {}".format(mount_path))
    logger.debug("Setting ownership to uid {} and gid {}".format(
        repository.uid, repository.gid))
    ownership_spec = OwnershipSpecification(repository.uid, repository.gid)

    logger.debug("in mounting: {}".format(
        str(virtual_source.parameters.node_list)))

    if virtual_source.parameters.node_list is not None and len(
            virtual_source.parameters.node_list) > 0:
        # more nodes
        for m in virtual_source.parameters.node_list:
            logger.debug("in loop: {}".format(str(m)))
            node_host = RemoteHost(name='foo',
                                   reference=m["environment"].replace(
                                       '_ENVIRONMENT', ''),
                                   binary_path="",
                                   scratch_path="")
            e = RemoteEnvironment("foo", m["environment"], node_host)
            mount = Mount(e, mount_path)
            mounts.append(mount)

            user = RemoteUser(name="unused", reference=m['environmentUser'])
            environment = RemoteEnvironment(name="unused",
                                            reference=m['environment'],
                                            host=node_host)
            clean_node_conn = RemoteConnection(environment=environment,
                                               user=user)

            if check_stale_mountpoint(clean_node_conn, mount_path):
                clean_node = CouchbaseOperation(
                    Resource.ObjectBuilder.set_virtual_source(
                        virtual_source).set_repository(repository).build(),
                    clean_node_conn)
                clean_node.stop_couchbase()
                clean_stale_mountpoint(clean_node_conn, mount_path)

            check_server_is_used(clean_node_conn, mount_path)

    return MountSpecification(mounts, ownership_spec)
Esempio n. 13
0
def linked_mount_specification(staged_source, repository):
    mount_path = staged_source.parameters.mount_path

    if check_stale_mountpoint(staged_source.staged_connection, mount_path):
        cleanup_process = CouchbaseOperation(
            Resource.ObjectBuilder.set_staged_source(
                staged_source).set_repository(repository).build())
        cleanup_process.stop_couchbase()
        clean_stale_mountpoint(staged_source.staged_connection, mount_path)

    check_server_is_used(staged_source.staged_connection, mount_path)

    environment = staged_source.staged_connection.environment
    linked.check_mount_path(staged_source, repository)
    logger.debug("Mounting path {}".format(mount_path))
    mounts = [Mount(environment, mount_path)]
    logger.debug("Setting ownership to uid {} and gid {}".format(
        repository.uid, repository.gid))
    ownership_spec = OwnershipSpecification(repository.uid, repository.gid)
    return MountSpecification(mounts, ownership_spec)
Esempio n. 14
0
def pre_snapshot_xdcr(staged_source, repository, source_config,
                      input_parameters):
    logger.info("In Pre snapshot...")
    pre_snapshot_process = CouchbaseOperation(
        Resource.ObjectBuilder.set_staged_source(staged_source).set_repository(
            repository).set_source_config(source_config).build())
    config.SNAP_SYNC_FILE_NAME = pre_snapshot_process.create_config_dir(
    ) + "/" + db_commands.constants.LOCK_SNAPSYNC_OPERATION
    # Don't care of sync.lck file as it will never de deleted even in post snapshot.
    if helper_lib.check_file_present(staged_source.staged_connection,
                                     config.SNAP_SYNC_FILE_NAME):
        config.SNAP_SYNC_FLAG_TO_USE_CLEANUP_ONLY_IF_CURRENT_JOB_CREATED = False
        raise MultipleSyncError()
    else:
        logger.debug("Creating lock file...")
        msg = db_commands.constants.RESYNCE_OR_SNAPSYNC_FOR_OTHER_OBJECT_IN_PROGRESS.format(
            source_config.pretty_name, input_parameters.couchbase_host)
        helper_lib.write_file(staged_source.staged_connection, msg,
                              config.SNAP_SYNC_FILE_NAME)
    logger.info("Stopping Couchbase")
    pre_snapshot_process.stop_couchbase()
Esempio n. 15
0
def post_snapshot(virtual_source, repository, source_config):
    try:
        logger.debug("Taking Post Snapshot...")
        provision_process = CouchbaseOperation(
            Resource.ObjectBuilder.set_virtual_source(
                virtual_source).set_repository(repository).set_source_config(
                    source_config).build())
        # config_file = provision_process.get_config_file_path()

        # stdout, stderr, exit_code = helper_lib.read_file(virtual_source.connection, config_file)
        # bucket_list = re.sub('BUCKET_LIST=', '', stdout)

        ind = []

        #ind = provision_process.get_indexes_definition()
        #logger.debug("indexes definition : {}".format(ind))

        bucket_details = json.dumps(provision_process.bucket_list())
        logger.debug("BUCKET_LIST={}".format(bucket_details))
        db_path = virtual_source.parameters.mount_path
        time_stamp = helper_lib.current_time()
        couchbase_port = virtual_source.parameters.couchbase_port
        couchbase_host = virtual_source.connection.environment.host.name
        snapshot_id = str(helper_lib.get_snapshot_id())
        snapshot = SnapshotDefinition(db_path=db_path,
                                      couchbase_port=couchbase_port,
                                      couchbase_host=couchbase_host,
                                      bucket_list=bucket_details,
                                      time_stamp=time_stamp,
                                      snapshot_id=snapshot_id,
                                      indexes=ind)

        snapshot.couchbase_admin = provision_process.parameters.couchbase_admin
        snapshot.couchbase_admin_password = provision_process.parameters.couchbase_admin_password

        return snapshot
    except Exception as err:
        logger.debug("Snap shot is failed with error {}".format(err.message))
        raise
Esempio n. 16
0
def vdb_unconfigure(virtual_source, repository, source_config):
    # delete all buckets
    provision_process = CouchbaseOperation(
        Resource.ObjectBuilder.set_virtual_source(virtual_source).
        set_repository(repository).set_source_config(source_config).build())

    vdb_stop(virtual_source, repository, source_config)
    provision_process.delete_config()

    if provision_process.parameters.node_list is not None and len(
            provision_process.parameters.node_list) > 0:
        for node in provision_process.parameters.node_list:
            logger.debug("+++++++++++++++++++++++++++")
            logger.debug(node)
            logger.debug("+++++++++++++++++++++++++++")
            addnode = CouchbaseOperation(
                Resource.ObjectBuilder.set_virtual_source(
                    virtual_source).set_repository(
                        repository).set_source_config(source_config).build(),
                make_nonprimary_connection(provision_process.connection,
                                           node['environment'],
                                           node['environmentUser']))
            addnode.delete_config()
            addnode.stop_couchbase()
Esempio n. 17
0
def resync_cbbkpmgr(staged_source, repository, source_config,
                    input_parameters):
    dsource_type = input_parameters.d_source_type
    dsource_name = source_config.pretty_name
    couchbase_host = input_parameters.couchbase_host
    bucket_size = staged_source.parameters.bucket_size
    rx_connection = staged_source.staged_connection
    resync_process = CouchbaseOperation(
        Resource.ObjectBuilder.set_staged_source(staged_source).set_repository(
            repository).set_source_config(source_config).build())

    linking.check_for_concurrent(resync_process, dsource_type, dsource_name,
                                 couchbase_host)

    # validate if this works as well for backup
    linking.configure_cluster(resync_process)

    logger.debug("Finding source and staging bucket list")
    bucket_details_source = resync_process.source_bucket_list_offline()
    bucket_details_staged = helper_lib.filter_bucket_name_from_output(
        resync_process.bucket_list())

    buckets_toprocess = linking.buckets_precreation(resync_process,
                                                    bucket_details_source,
                                                    bucket_details_staged)

    csv_bucket_list = ",".join(buckets_toprocess)
    logger.debug("Started CB backup manager")
    helper_lib.sleepForSecond(30)
    resync_process.cb_backup_full(csv_bucket_list)
    helper_lib.sleepForSecond(30)

    linking.build_indexes(resync_process)
    logger.info("Stopping Couchbase")
    resync_process.stop_couchbase()
    resync_process.save_config('parent')
Esempio n. 18
0
def stop_staging_xdcr(staged_source, repository, source_config):
    stop_staging = CouchbaseOperation(
        Resource.ObjectBuilder.set_staged_source(staged_source).set_repository(
            repository).set_source_config(source_config).build())

    logger.debug("Disabling the D_SOURCE:{}".format(source_config.pretty_name))
    dsource_type = staged_source.parameters.d_source_type
    rx_connection = staged_source.staged_connection
    logger.info("Deleting Existing Replication")
    is_xdcr_setup, cluster_name = stop_staging.delete_replication()
    if is_xdcr_setup:
        logger.info("Deleting XDCR")
        stop_staging.xdcr_delete(cluster_name)
    config_dir = stop_staging.create_config_dir()
    helper_lib.delete_file(
        rx_connection, config_dir + "/" + helper_lib.get_sync_lock_file_name(
            dsource_type, source_config.pretty_name))
    stop_staging.stop_couchbase()
    logger.debug("D_SOURCE:{} disabled".format(source_config.pretty_name))
Esempio n. 19
0
def vdb_configure(virtual_source, snapshot, repository):
    try:
        provision_process = CouchbaseOperation(
            Resource.ObjectBuilder.set_virtual_source(virtual_source).
            set_repository(repository).set_snapshot(snapshot).build())

        provision_process.restart_couchbase()
        provision_process.node_init()
        provision_process.cluster_init()
        _do_provision(provision_process, snapshot)
        _cleanup(provision_process, snapshot)
        src_cfg_obj = _source_config(virtual_source, repository, None,
                                     snapshot)

        return src_cfg_obj
    except FailedToReadBucketDataFromSnapshot as err:
        raise FailedToReadBucketDataFromSnapshot("Provision is failed. " + err.message).to_user_error(), None, \
            sys.exc_info()[2]
    except Exception as err:
        logger.debug("Provision is failed {}".format(err.message))
        raise
Esempio n. 20
0
def vdb_start(virtual_source, repository, source_config):
    provision_process = CouchbaseOperation(
        Resource.ObjectBuilder.set_virtual_source(virtual_source).
        set_repository(repository).set_source_config(source_config).build())
    logger.debug("Starting couchbase server")
    try:
        provision_process.start_couchbase()
        if provision_process.parameters.node_list is not None and len(
                provision_process.parameters.node_list) > 0:
            for node in provision_process.parameters.node_list:
                logger.debug("+++++++++++++++++++++++++++")
                logger.debug(node)
                logger.debug("+++++++++++++++++++++++++++")
                addnode = CouchbaseOperation(
                    Resource.ObjectBuilder.set_virtual_source(virtual_source).
                    set_repository(repository).set_source_config(
                        source_config).build(),
                    make_nonprimary_connection(provision_process.connection,
                                               node['environment'],
                                               node['environmentUser']))
                addnode.start_couchbase()
    except Exception:
        raise CouchbaseServicesError(
            " Start").to_user_error(), None, sys.exc_info()[2]
Esempio n. 21
0
def post_snapshot_cbbkpmgr(staged_source, repository, source_config,
                           dsource_type):
    logger.info("In Post snapshot...")
    post_snapshot_process = CouchbaseOperation(
        Resource.ObjectBuilder.set_staged_source(staged_source).set_repository(
            repository).set_source_config(source_config).build())
    rx_connection = staged_source.staged_connection
    post_snapshot_process.start_couchbase()
    snapshot = SnapshotDefinition(validate=False)
    bucket_list = []
    bucket_details = post_snapshot_process.bucket_list()

    # if len(staged_source.parameters.config_settings_prov) != 0:
    #     bucket_list = []
    #     for config_setting in staged_source.parameters.config_settings_prov:
    #         bucket_list.append(helper_lib.get_bucket_name_with_size(bucket_details, config_setting["bucketName"]))
    # else:
    #     bucket_list = helper_lib.get_stg_all_bucket_list_with_ramquota_size(bucket_details)

    # extract index

    ind = post_snapshot_process.get_indexes_definition()
    logger.debug("indexes definition : {}".format(ind))

    snapshot.indexes = ind
    snapshot.db_path = staged_source.parameters.mount_path
    snapshot.couchbase_port = source_config.couchbase_src_port
    snapshot.couchbase_host = source_config.couchbase_src_host
    snapshot.bucket_list = json.dumps(bucket_details)
    snapshot.time_stamp = helper_lib.current_time()
    snapshot.snapshot_id = str(helper_lib.get_snapshot_id())
    snapshot.couchbase_admin = post_snapshot_process.parameters.couchbase_admin
    snapshot.couchbase_admin_password = post_snapshot_process.parameters.couchbase_admin_password
    #logger.debug("snapshot schema: {}".format(snapshot))
    logger.debug("Deleting the lock files")
    helper_lib.delete_file(rx_connection, config.SNAP_SYNC_FILE_NAME)
    helper_lib.delete_file(rx_connection, config.SYNC_FILE_NAME)
    # for Prox investigation
    #post_snapshot_process.stop_couchbase()
    #helper_lib.unmount_file_system(rx_connection, staged_source.parameters.mount_path)
    #logger.debug("Un mounting completed")
    return snapshot
Esempio n. 22
0
def pre_snapshot_cbbkpmgr(staged_source, repository, source_config,
                          input_parameters):

    # this is for normal snapshot
    #logger.info("Do nothing version  Couchbase")

    pre_snapshot_process = CouchbaseOperation(
        Resource.ObjectBuilder.set_staged_source(staged_source).set_repository(
            repository).set_source_config(source_config).build())
    bucket_size = input_parameters.bucket_size
    rx_connection = staged_source.staged_connection

    dsource_type = input_parameters.d_source_type
    dsource_name = source_config.pretty_name
    couchbase_host = input_parameters.couchbase_host
    linking.check_for_concurrent(pre_snapshot_process, dsource_type,
                                 dsource_name, couchbase_host)

    logger.debug("Finding source and staging bucket list")
    bucket_details_source = pre_snapshot_process.source_bucket_list_offline()
    bucket_details_staged = helper_lib.filter_bucket_name_from_output(
        pre_snapshot_process.bucket_list())

    bucket_details_staged = pre_snapshot_process.bucket_list()
    filter_bucket_list = helper_lib.filter_bucket_name_from_output(
        bucket_details_staged)
    csv_bucket_list = ",".join(filter_bucket_list)
    pre_snapshot_process.cb_backup_full(csv_bucket_list)
    logger.info("Re-ingesting from latest backup complete.")

    linking.build_indexes(pre_snapshot_process)
    logger.info("Stopping Couchbase")
    pre_snapshot_process.stop_couchbase()
    pre_snapshot_process.save_config('parent')
Esempio n. 23
0
def resync_cbbkpmgr(staged_source, repository, source_config, input_parameters):
    dsource_type = input_parameters.d_source_type
    bucket_size = staged_source.parameters.bucket_size
    rx_connection = staged_source.staged_connection
    resync_process = CouchbaseOperation(
        Resource.ObjectBuilder.set_staged_source(staged_source).set_repository(repository).set_source_config(
            source_config).build())

    config_dir = resync_process.create_config_dir()
    config.SYNC_FILE_NAME = config_dir + "/" + get_sync_lock_file_name(dsource_type, source_config.pretty_name)
    src_bucket_info_filename = db_commands.constants.SRC_BUCKET_INFO_FILENAME
    src_bucket_info_filename = os.path.dirname(config_dir) + "/" + src_bucket_info_filename
    logger.debug("src_bucket_info_filename = {}".format(src_bucket_info_filename))

    if helper_lib.check_file_present(rx_connection, config.SYNC_FILE_NAME):
        logger.debug("Sync file is already created by other process")
        config.SYNC_FLAG_TO_USE_CLEANUP_ONLY_IF_CURRENT_JOB_CREATED = False
        raise MultipleSyncError("Sync file is already created by other process")
    else:
        # creating sync  file
        msg = db_commands.constants.RESYNCE_OR_SNAPSYNC_FOR_OTHER_OBJECT_IN_PROGRESS.format(source_config.pretty_name,
                                                                                            input_parameters.couchbase_host)
        helper_lib.write_file(rx_connection, msg, config.SYNC_FILE_NAME)

    resync_process.restart_couchbase()
    resync_process.node_init()
    resync_process.cluster_init()
    logger.debug("Finding source and staging bucket list")
    bucket_details_source = resync_process.source_bucket_list_offline(filename=src_bucket_info_filename)
    bucket_details_staged = resync_process.bucket_list()

    config_setting = staged_source.parameters.config_settings_prov
    logger.debug("Bucket names passed for configuration: {}".format(config_setting))

    bucket_configured_staged = []
    if len(config_setting) > 0:
        logger.debug("Getting bucket information from config")
        for config_bucket in config_setting:
            bucket_configured_staged.append(config_bucket["bucketName"])
            logger.debug("Filtering bucket name with size only from above output")
            bkt_name_size = helper_lib.get_bucket_name_with_size(bucket_details_source, config_bucket["bucketName"])
            bkt_size_mb = get_bucket_size_in_MB(bucket_size, bkt_name_size.split(",")[1])

            if config_bucket["bucketName"] not in bucket_details_staged:
                resync_process.bucket_create(config_bucket["bucketName"], bkt_size_mb)
            else:
                logger.debug("Bucket {} already present in staged environment. Recreating bucket ".format(
                    config_bucket["bucketName"]))
                resync_process.bucket_remove(config_bucket["bucketName"])
                resync_process.bucket_create(config_bucket["bucketName"], bkt_size_mb)

        logger.debug("Finding buckets present at staged server")
        bucket_details_staged = resync_process.bucket_list()
        filter_bucket_list = helper_lib.filter_bucket_name_from_output(bucket_details_staged)
        extra_bucket = list(set(filter_bucket_list) - set(bucket_configured_staged))

        logger.debug("Extra bucket found to delete:{} ".format(extra_bucket))
        for bucket in extra_bucket:
            resync_process.bucket_remove(bucket)
    else:
        logger.debug("Finding buckets present at staged server with size")
        all_bkt_list_with_size = helper_lib.get_all_bucket_list_with_size(bucket_details_source)
        logger.debug("Filtering bucket name with size only from above output")
        filter_source_bucket = helper_lib.filter_bucket_name_from_output(bucket_details_source)
        for items in all_bkt_list_with_size:
            if items:
                logger.debug("Running bucket operations for {}".format(items))
                bkt_name, bkt_size = items.split(',')

                bkt_size_mb = get_bucket_size_in_MB(bucket_size, bkt_size)
                if bkt_name not in bucket_details_staged:
                    resync_process.bucket_create(bkt_name, bkt_size_mb)
                else:
                    logger.debug(
                        "Bucket {} already present in staged environment. Recreating bucket ".format(bkt_name))
                    resync_process.bucket_remove(bkt_name)
                    resync_process.bucket_create(bkt_name, bkt_size_mb)

        bucket_details_staged = resync_process.bucket_list()
        filter_staged_bucket = helper_lib.filter_bucket_name_from_output(bucket_details_staged)
        extra_bucket = list(set(filter_staged_bucket) - set(filter_source_bucket))
        logger.info("Extra bucket found to delete:{}".format(extra_bucket))
        for bucket in extra_bucket:
            resync_process.bucket_remove(bucket)

    bucket_details_staged = resync_process.bucket_list()
    filter_bucket_list = helper_lib.filter_bucket_name_from_output(bucket_details_staged)
    csv_bucket_list = ",".join(filter_bucket_list)
    logger.debug("Started CB backup manager")
    resync_process.cb_backup_full(csv_bucket_list)
Esempio n. 24
0
def vdb_stop(virtual_source, repository, source_config):
    provision_process = CouchbaseOperation(
        Resource.ObjectBuilder.set_virtual_source(virtual_source).
        set_repository(repository).set_source_config(source_config).build())
    logger.debug("Stopping couchbase server")
    provision_process.stop_couchbase()
Esempio n. 25
0
def resync_xdcr(staged_source, repository, source_config, input_parameters):
    logger.debug("START resync_xdcr")
    dsource_type = input_parameters.d_source_type
    dsource_name = source_config.pretty_name
    bucket_size = staged_source.parameters.bucket_size
    rx_connection = staged_source.staged_connection
    resync_process = CouchbaseOperation(
        Resource.ObjectBuilder.set_staged_source(staged_source).set_repository(
            repository).set_source_config(source_config).build())

    couchbase_host = input_parameters.couchbase_host

    linking.check_for_concurrent(resync_process, dsource_type, dsource_name,
                                 couchbase_host)

    linking.configure_cluster(resync_process)

    # common steps for both XDCR & CB back up

    bucket_details_source = resync_process.source_bucket_list()
    bucket_details_staged = resync_process.bucket_list()
    buckets_toprocess = linking.buckets_precreation(resync_process,
                                                    bucket_details_source,
                                                    bucket_details_staged)

    # run this for all buckets
    resync_process.setup_replication()

    logger.debug("Finding staging_uuid & cluster_name on staging")
    staging_uuid = resync_process.get_replication_uuid()

    if staging_uuid is None:
        logger.debug("Can't find a replication UUID after setting it up")
        raise UserError("Can't find a replication UUID after setting it up")

    # bucket_details_staged = resync_process.bucket_list()
    # logger.debug("Filtering bucket name from output")
    # filter_bucket_list = helper_lib.filter_bucket_name_from_output(bucket_details_staged)
    for bkt in buckets_toprocess:
        resync_process.monitor_bucket(bkt, staging_uuid)

    linking.build_indexes(resync_process)

    logger.info("Stopping Couchbase")
    resync_process.stop_couchbase()
    resync_process.save_config('parent')
Esempio n. 26
0
def start_staging_xdcr(staged_source, repository, source_config):
    start_staging = CouchbaseOperation(
        Resource.ObjectBuilder.set_staged_source(staged_source).set_repository(
            repository).set_source_config(source_config).build())

    logger.debug("Enabling the D_SOURCE:{}".format(source_config.pretty_name))
    dsource_type = staged_source.parameters.d_source_type
    rx_connection = staged_source.staged_connection

    start_staging.stop_couchbase()
    start_staging.delete_config()
    # TODO error handling
    start_staging.restore_config(what='current')
    start_staging.start_couchbase()

    # already_set_up_done, name_conflict = start_staging.check_duplicate_replication(
    #     start_staging.parameters.stg_cluster_name)
    # if already_set_up_done:
    #     logger.info("No need to XDCR setup again")
    # elif name_conflict:
    #     raise DuplicateClusterError("Already cluster is present")
    # else:
    #     logger.info("First time XDCR set up")
    #     start_staging.xdcr_setup()

    start_staging.setup_replication()

    config_dir = start_staging.create_config_dir()
    msg = "dSource Creation / Snapsync for dSource {} is in progress".format(
        source_config.pretty_name)
    helper_lib.write_file(
        rx_connection, msg,
        config_dir + "/" + helper_lib.get_sync_lock_file_name(
            dsource_type, source_config.pretty_name))
    logger.debug("D_SOURCE:{} enabled".format(source_config.pretty_name))
Esempio n. 27
0
def resync_xdcr(staged_source, repository, source_config, input_parameters):
    dsource_type = input_parameters.d_source_type
    dsource_name = source_config.pretty_name
    bucket_size = staged_source.parameters.bucket_size
    rx_connection = staged_source.staged_connection
    resync_process = CouchbaseOperation(
        Resource.ObjectBuilder.set_staged_source(staged_source).set_repository(
            repository).set_source_config(source_config).build())
    config_dir = resync_process.create_config_dir()
    config.SYNC_FILE_NAME = config_dir + "/" + helper_lib.get_sync_lock_file_name(
        dsource_type, dsource_name)

    if not verify_sync_lock_file_for_this_job(rx_connection,
                                              config.SYNC_FILE_NAME):
        config.SYNC_FLAG_TO_USE_CLEANUP_ONLY_IF_CURRENT_JOB_CREATED = False
        logger.debug("Sync file is already created by other dSource")
        raise MultipleXDCRSyncError(
            "Sync file is already created by other dSource")
    else:
        # creating sync  file
        msg = db_commands.constants.RESYNCE_OR_SNAPSYNC_FOR_OTHER_OBJECT_IN_PROGRESS.format(
            dsource_name, input_parameters.couchbase_host)
        helper_lib.write_file(rx_connection, msg, config.SYNC_FILE_NAME)

    resync_process.restart_couchbase()
    resync_process.node_init()
    resync_process.cluster_init()
    already_set_up_done, name_conflict = resync_process.check_duplicate_replication(
        resync_process.parameters.stg_cluster_name)
    if already_set_up_done:
        logger.info("No need to XDCR setup again")
    elif name_conflict:
        raise DuplicateClusterError("Already cluster is present")
    else:
        logger.info("First time XDCR set up")
        resync_process.xdcr_setup()
    # common steps for both XDCR & CB back up
    logger.debug("Finding source and staging bucket list")
    bucket_details_source = resync_process.source_bucket_list()
    bucket_details_staged = resync_process.bucket_list()
    config_setting = staged_source.parameters.config_settings_prov
    logger.debug(
        "Bucket names passed for configuration: {}".format(config_setting))
    bucket_configured_staged = []
    if len(config_setting) > 0:
        logger.debug("Getting bucket information from config")
        for config_bucket in config_setting:
            bucket_configured_staged.append(config_bucket["bucketName"])
            logger.debug(
                "Filtering bucket name with size only from above output")
            bkt_name_size = helper_lib.get_bucket_name_with_size(
                bucket_details_source, config_bucket["bucketName"])
            bkt_size_mb = helper_lib.get_bucket_size_in_MB(
                bucket_size,
                bkt_name_size.split(",")[1])

            if config_bucket["bucketName"] not in bucket_details_staged:
                resync_process.bucket_create(config_bucket["bucketName"],
                                             bkt_size_mb)
            else:
                logger.debug(
                    "Bucket {} already present in staged environment. Recreating bucket "
                    .format(config_bucket["bucketName"]))
                resync_process.bucket_remove(config_bucket["bucketName"])
                resync_process.bucket_create(config_bucket["bucketName"],
                                             bkt_size_mb)
            resync_process.xdcr_replicate(config_bucket["bucketName"],
                                          config_bucket["bucketName"])

        logger.debug("Finding buckets present at staged server")
        bucket_details_staged = resync_process.bucket_list()
        filter_bucket_list = helper_lib.filter_bucket_name_from_output(
            bucket_details_staged)
        extra_bucket = list(
            set(filter_bucket_list) - set(bucket_configured_staged))

        logger.debug("Extra bucket found to delete:{} ".format(extra_bucket))
        for bucket in extra_bucket:
            resync_process.bucket_remove(bucket)
    else:
        logger.debug("Finding buckets present at staged server with size")
        all_bkt_list_with_size = helper_lib.get_all_bucket_list_with_size(
            bucket_details_source)
        logger.debug("Filtering bucket name with size only from above output")
        filter_source_bucket = helper_lib.filter_bucket_name_from_output(
            bucket_details_source)
        for items in all_bkt_list_with_size:
            if items:
                logger.debug("Running bucket operations for {}".format(items))
                bkt_name, bkt_size = items.split(',')

                bkt_size_mb = helper_lib.get_bucket_size_in_MB(
                    bucket_size, bkt_size)
                if bkt_name not in bucket_details_staged:
                    resync_process.bucket_create(bkt_name, bkt_size_mb)
                else:
                    logger.debug(
                        "Bucket {} already present in staged environment. Recreating bucket "
                        .format(bkt_name))
                    resync_process.bucket_remove(bkt_name)
                    resync_process.bucket_create(bkt_name, bkt_size_mb)
                resync_process.xdcr_replicate(bkt_name, bkt_name)

        bucket_details_staged = resync_process.bucket_list()
        filter_staged_bucket = helper_lib.filter_bucket_name_from_output(
            bucket_details_staged)
        extra_bucket = list(
            set(filter_staged_bucket) - set(filter_source_bucket))
        logger.info("Extra bucket found to delete:{}".format(extra_bucket))
        for bucket in extra_bucket:
            resync_process.bucket_remove(bucket)

    logger.debug("Finding staging_uuid & cluster_name on staging")
    staging_uuid, cluster_name_staging = resync_process.get_replication_uuid()
    bucket_details_staged = resync_process.bucket_list()
    logger.debug("Filtering bucket name from output")
    filter_bucket_list = helper_lib.filter_bucket_name_from_output(
        bucket_details_staged)
    for bkt in filter_bucket_list:
        resync_process.monitor_bucket(bkt, staging_uuid)
Esempio n. 28
0
def start_staging_xdcr(staged_source, repository, source_config):
    start_staging = CouchbaseOperation(
        Resource.ObjectBuilder.set_staged_source(staged_source).set_repository(
            repository).set_source_config(source_config).build())

    logger.debug("Enabling the D_SOURCE:{}".format(source_config.pretty_name))
    dsource_type = staged_source.parameters.d_source_type
    rx_connection = staged_source.staged_connection
    start_staging.start_couchbase()

    already_set_up_done, name_conflict = start_staging.check_duplicate_replication(
        start_staging.parameters.stg_cluster_name)
    if already_set_up_done:
        logger.info("No need to XDCR setup again")
    elif name_conflict:
        raise DuplicateClusterError("Already cluster is present")
    else:
        logger.info("First time XDCR set up")
        start_staging.xdcr_setup()

    config_setting = staged_source.parameters.config_settings_prov

    if len(config_setting) > 0:
        for config_bucket in config_setting:
            logger.debug("Creating replication for {}".format(
                config_bucket["bucketName"]))
            start_staging.xdcr_replicate(config_bucket["bucketName"],
                                         config_bucket["bucketName"])
    else:
        bucket_details_source = start_staging.source_bucket_list()
        all_bkt_list_with_size = helper_lib.get_all_bucket_list_with_size(
            bucket_details_source)
        for items in all_bkt_list_with_size:
            bkt_name, bkt_size = items.split(',')
            logger.debug("Creating replication for {}".format(bkt_name))
            start_staging.xdcr_replicate(bkt_name, bkt_name)

        config_dir = start_staging.create_config_dir()
        msg = "dSource Creation / Snapsync for dSource {} is in progress".format(
            source_config.pretty_name)
        helper_lib.write_file(
            rx_connection, msg,
            config_dir + "/" + helper_lib.get_sync_lock_file_name(
                dsource_type, source_config.pretty_name))
    logger.debug("D_SOURCE:{} enabled".format(source_config.pretty_name))
Esempio n. 29
0
def vdb_configure(virtual_source, snapshot, repository):
    # try:

    logger.debug("VDB CONFIG START")

    provision_process = CouchbaseOperation(
        Resource.ObjectBuilder.set_virtual_source(virtual_source).
        set_repository(repository).set_snapshot(snapshot).build())

    # TODO:
    # fail if already has cluster ?

    # to make sure there is no config
    provision_process.delete_config()

    provision_process.restore_config(what='parent')

    # if bucket doesn't existing in target cluster
    # couchbase will delete directory while starting
    # so we have to rename it before start

    bucket_list_and_size = json.loads(snapshot.bucket_list)

    if not bucket_list_and_size:
        raise FailedToReadBucketDataFromSnapshot("Snapshot Data is empty.")
    else:
        logger.debug(
            "snapshot bucket data is: {}".format(bucket_list_and_size))

    # for item in helper_lib.filter_bucket_name_from_output(bucket_list_and_size):
    #     logger.debug("Checking bucket: {}".format(item))
    #     bucket_name = item.split(',')[0]
    #     # rename folder
    #     provision_process.move_bucket(bucket_name, 'save')

    provision_process.restart_couchbase(provision=True)
    provision_process.rename_cluster()
    #provision_process.node_init()
    #provision_process.cluster_init()

    #_do_provision(provision_process, snapshot)
    #_cleanup(provision_process, snapshot)

    #_build_indexes(provision_process, snapshot)

    #     if self.__node_local:
    #         logger.debug("it will start on main envioronment")
    #         connection = self.config.connection
    #     else:
    #         logger.debug("it will start on an additional environment {}".format(str(self.__node_environment)))
    #         connection=make_nonprimary_connection(self.config.connection, self.__node_environment, self.__node_envuser)

    nodeno = 1

    logger.debug("MAIN CONNECTION HOST: {}".format(
        provision_process.connection.environment.host.name))

    if provision_process.parameters.node_list is not None and len(
            provision_process.parameters.node_list) > 0:
        for node in provision_process.parameters.node_list:
            nodeno = nodeno + 1
            logger.debug("+++++++++++++++++++++++++++")
            logger.debug(node)
            logger.debug(nodeno)
            logger.debug("+++++++++++++++++++++++++++")
            addnode = CouchbaseOperation(
                Resource.ObjectBuilder.set_virtual_source(virtual_source).
                set_repository(repository).set_snapshot(snapshot).build(),
                make_nonprimary_connection(provision_process.connection,
                                           node['environment'],
                                           node['environmentUser']))
            logger.debug("ADDITIONAL CONNECTION HOST: {}".format(
                provision_process.connection.environment.host.name))
            addnode.addnode(nodeno, node)
            # TODO
            # FINISH HERE
            # addnode.delete_config()
            # addnode.stop_couchbase()

    src_cfg_obj = _source_config(virtual_source, repository, None, snapshot)

    return src_cfg_obj
Esempio n. 30
0
def vdb_reconfigure(virtual_source, repository, source_config, snapshot):
    # delete all buckets
    # calll configure

    logger.debug("In vdb_reconfigure...")
    provision_process = CouchbaseOperation(
        Resource.ObjectBuilder.set_virtual_source(virtual_source).
        set_repository(repository).set_source_config(source_config).build())

    provision_process.stop_couchbase()

    if provision_process.parameters.node_list is not None and len(
            provision_process.parameters.node_list) > 0:
        multinode = True
        server_count = len(provision_process.parameters.node_list) + 1
    else:
        multinode = False

    nodeno = 1
    provision_process.restore_config(what='current', nodeno=nodeno)
    provision_process.start_couchbase(no_wait=multinode)

    if provision_process.parameters.node_list is not None and len(
            provision_process.parameters.node_list) > 0:
        for node in provision_process.parameters.node_list:
            nodeno = nodeno + 1
            logger.debug("+++++++++++++++++++++++++++")
            logger.debug(node)
            logger.debug(nodeno)
            logger.debug("+++++++++++++++++++++++++++")
            addnode = CouchbaseOperation(
                Resource.ObjectBuilder.set_virtual_source(
                    virtual_source).set_repository(
                        repository).set_source_config(source_config).build(),
                make_nonprimary_connection(provision_process.connection,
                                           node['environment'],
                                           node['environmentUser']))
            addnode.stop_couchbase()
            addnode.restore_config(what='current', nodeno=nodeno)
            addnode.start_couchbase(no_wait=multinode)

    logger.debug("reconfigure for multinode: {}".format(multinode))

    if multinode == True:

        active_servers = {}
        logger.debug("wait for nodes")
        logger.debug("server count: {} active servers: {}".format(
            server_count, sum(active_servers.values())))

        end_time = time.time() + 3660

        #break the loop either end_time is exceeding from 1 minute or server is successfully started
        while time.time() < end_time and sum(
                active_servers.values()) <> server_count:
            logger.debug("server count 2: {} active servers: {}".format(
                server_count, sum(active_servers.values())))
            nodeno = 1
            helper_lib.sleepForSecond(1)  # waiting for 1 second
            server_status = provision_process.status()  # fetching status
            logger.debug("server status {}".format(server_status))
            if server_status == Status.ACTIVE:
                active_servers[nodeno] = 1

            for node in provision_process.parameters.node_list:
                nodeno = nodeno + 1
                logger.debug("+++++++++++++++++++++++++++")
                logger.debug(node)
                logger.debug(nodeno)
                logger.debug("+++++++++++++++++++++++++++")
                addnode = CouchbaseOperation(
                    Resource.ObjectBuilder.set_virtual_source(virtual_source).
                    set_repository(repository).set_source_config(
                        source_config).build(),
                    make_nonprimary_connection(provision_process.connection,
                                               node['environment'],
                                               node['environmentUser']))
                server_status = addnode.status()  # fetching status
                logger.debug("server status {}".format(server_status))
                if server_status == Status.ACTIVE:
                    active_servers[nodeno] = 1

    return _source_config(virtual_source, repository, source_config, snapshot)