Exemplo n.º 1
0
def resync_xdcr(staged_source, repository, source_config, input_parameters):
    logger.debug("START resync_xdcr")
    dsource_type = input_parameters.d_source_type
    dsource_name = source_config.pretty_name
    bucket_size = staged_source.parameters.bucket_size
    rx_connection = staged_source.staged_connection
    resync_process = CouchbaseOperation(
        Resource.ObjectBuilder.set_staged_source(staged_source).set_repository(
            repository).set_source_config(source_config).build())

    couchbase_host = input_parameters.couchbase_host

    linking.check_for_concurrent(resync_process, dsource_type, dsource_name,
                                 couchbase_host)

    linking.configure_cluster(resync_process)

    # common steps for both XDCR & CB back up

    bucket_details_source = resync_process.source_bucket_list()
    bucket_details_staged = resync_process.bucket_list()
    buckets_toprocess = linking.buckets_precreation(resync_process,
                                                    bucket_details_source,
                                                    bucket_details_staged)

    # run this for all buckets
    resync_process.setup_replication()

    logger.debug("Finding staging_uuid & cluster_name on staging")
    staging_uuid = resync_process.get_replication_uuid()

    if staging_uuid is None:
        logger.debug("Can't find a replication UUID after setting it up")
        raise UserError("Can't find a replication UUID after setting it up")

    # bucket_details_staged = resync_process.bucket_list()
    # logger.debug("Filtering bucket name from output")
    # filter_bucket_list = helper_lib.filter_bucket_name_from_output(bucket_details_staged)
    for bkt in buckets_toprocess:
        resync_process.monitor_bucket(bkt, staging_uuid)

    linking.build_indexes(resync_process)

    logger.info("Stopping Couchbase")
    resync_process.stop_couchbase()
    resync_process.save_config('parent')
Exemplo n.º 2
0
def resync_xdcr(staged_source, repository, source_config, input_parameters):
    dsource_type = input_parameters.d_source_type
    dsource_name = source_config.pretty_name
    bucket_size = staged_source.parameters.bucket_size
    rx_connection = staged_source.staged_connection
    resync_process = CouchbaseOperation(
        Resource.ObjectBuilder.set_staged_source(staged_source).set_repository(
            repository).set_source_config(source_config).build())
    config_dir = resync_process.create_config_dir()
    config.SYNC_FILE_NAME = config_dir + "/" + helper_lib.get_sync_lock_file_name(
        dsource_type, dsource_name)

    if not verify_sync_lock_file_for_this_job(rx_connection,
                                              config.SYNC_FILE_NAME):
        config.SYNC_FLAG_TO_USE_CLEANUP_ONLY_IF_CURRENT_JOB_CREATED = False
        logger.debug("Sync file is already created by other dSource")
        raise MultipleXDCRSyncError(
            "Sync file is already created by other dSource")
    else:
        # creating sync  file
        msg = db_commands.constants.RESYNCE_OR_SNAPSYNC_FOR_OTHER_OBJECT_IN_PROGRESS.format(
            dsource_name, input_parameters.couchbase_host)
        helper_lib.write_file(rx_connection, msg, config.SYNC_FILE_NAME)

    resync_process.restart_couchbase()
    resync_process.node_init()
    resync_process.cluster_init()
    already_set_up_done, name_conflict = resync_process.check_duplicate_replication(
        resync_process.parameters.stg_cluster_name)
    if already_set_up_done:
        logger.info("No need to XDCR setup again")
    elif name_conflict:
        raise DuplicateClusterError("Already cluster is present")
    else:
        logger.info("First time XDCR set up")
        resync_process.xdcr_setup()
    # common steps for both XDCR & CB back up
    logger.debug("Finding source and staging bucket list")
    bucket_details_source = resync_process.source_bucket_list()
    bucket_details_staged = resync_process.bucket_list()
    config_setting = staged_source.parameters.config_settings_prov
    logger.debug(
        "Bucket names passed for configuration: {}".format(config_setting))
    bucket_configured_staged = []
    if len(config_setting) > 0:
        logger.debug("Getting bucket information from config")
        for config_bucket in config_setting:
            bucket_configured_staged.append(config_bucket["bucketName"])
            logger.debug(
                "Filtering bucket name with size only from above output")
            bkt_name_size = helper_lib.get_bucket_name_with_size(
                bucket_details_source, config_bucket["bucketName"])
            bkt_size_mb = helper_lib.get_bucket_size_in_MB(
                bucket_size,
                bkt_name_size.split(",")[1])

            if config_bucket["bucketName"] not in bucket_details_staged:
                resync_process.bucket_create(config_bucket["bucketName"],
                                             bkt_size_mb)
            else:
                logger.debug(
                    "Bucket {} already present in staged environment. Recreating bucket "
                    .format(config_bucket["bucketName"]))
                resync_process.bucket_remove(config_bucket["bucketName"])
                resync_process.bucket_create(config_bucket["bucketName"],
                                             bkt_size_mb)
            resync_process.xdcr_replicate(config_bucket["bucketName"],
                                          config_bucket["bucketName"])

        logger.debug("Finding buckets present at staged server")
        bucket_details_staged = resync_process.bucket_list()
        filter_bucket_list = helper_lib.filter_bucket_name_from_output(
            bucket_details_staged)
        extra_bucket = list(
            set(filter_bucket_list) - set(bucket_configured_staged))

        logger.debug("Extra bucket found to delete:{} ".format(extra_bucket))
        for bucket in extra_bucket:
            resync_process.bucket_remove(bucket)
    else:
        logger.debug("Finding buckets present at staged server with size")
        all_bkt_list_with_size = helper_lib.get_all_bucket_list_with_size(
            bucket_details_source)
        logger.debug("Filtering bucket name with size only from above output")
        filter_source_bucket = helper_lib.filter_bucket_name_from_output(
            bucket_details_source)
        for items in all_bkt_list_with_size:
            if items:
                logger.debug("Running bucket operations for {}".format(items))
                bkt_name, bkt_size = items.split(',')

                bkt_size_mb = helper_lib.get_bucket_size_in_MB(
                    bucket_size, bkt_size)
                if bkt_name not in bucket_details_staged:
                    resync_process.bucket_create(bkt_name, bkt_size_mb)
                else:
                    logger.debug(
                        "Bucket {} already present in staged environment. Recreating bucket "
                        .format(bkt_name))
                    resync_process.bucket_remove(bkt_name)
                    resync_process.bucket_create(bkt_name, bkt_size_mb)
                resync_process.xdcr_replicate(bkt_name, bkt_name)

        bucket_details_staged = resync_process.bucket_list()
        filter_staged_bucket = helper_lib.filter_bucket_name_from_output(
            bucket_details_staged)
        extra_bucket = list(
            set(filter_staged_bucket) - set(filter_source_bucket))
        logger.info("Extra bucket found to delete:{}".format(extra_bucket))
        for bucket in extra_bucket:
            resync_process.bucket_remove(bucket)

    logger.debug("Finding staging_uuid & cluster_name on staging")
    staging_uuid, cluster_name_staging = resync_process.get_replication_uuid()
    bucket_details_staged = resync_process.bucket_list()
    logger.debug("Filtering bucket name from output")
    filter_bucket_list = helper_lib.filter_bucket_name_from_output(
        bucket_details_staged)
    for bkt in filter_bucket_list:
        resync_process.monitor_bucket(bkt, staging_uuid)