Ejemplo n.º 1
0
def start_staging_xdcr(staged_source, repository, source_config):
    start_staging = CouchbaseOperation(
        Resource.ObjectBuilder.set_staged_source(staged_source).set_repository(
            repository).set_source_config(source_config).build())

    logger.debug("Enabling the D_SOURCE:{}".format(source_config.pretty_name))
    dsource_type = staged_source.parameters.d_source_type
    rx_connection = staged_source.staged_connection

    start_staging.stop_couchbase()
    start_staging.delete_config()
    # TODO error handling
    start_staging.restore_config(what='current')
    start_staging.start_couchbase()

    # already_set_up_done, name_conflict = start_staging.check_duplicate_replication(
    #     start_staging.parameters.stg_cluster_name)
    # if already_set_up_done:
    #     logger.info("No need to XDCR setup again")
    # elif name_conflict:
    #     raise DuplicateClusterError("Already cluster is present")
    # else:
    #     logger.info("First time XDCR set up")
    #     start_staging.xdcr_setup()

    start_staging.setup_replication()

    config_dir = start_staging.create_config_dir()
    msg = "dSource Creation / Snapsync for dSource {} is in progress".format(
        source_config.pretty_name)
    helper_lib.write_file(
        rx_connection, msg,
        config_dir + "/" + helper_lib.get_sync_lock_file_name(
            dsource_type, source_config.pretty_name))
    logger.debug("D_SOURCE:{} enabled".format(source_config.pretty_name))
Ejemplo n.º 2
0
def _do_provision(provision_process, snapshot):
    bucket_list_and_size = snapshot.bucket_list

    if not bucket_list_and_size:
        raise FailedToReadBucketDataFromSnapshot("Snapshot Data is empty.")
    else:
        logger.debug(
            "snapshot bucket data is: {}".format(bucket_list_and_size))

    for item in bucket_list_and_size.split(':'):
        logger.debug("Creating bucket is: {}".format(item))
        # try:
        bucket_name = item.split(',')[0]
        bkt_size_mb = int(item.split(',')[1].strip()) // 1024 // 1024
        provision_process.bucket_create(bucket_name, bkt_size_mb)
        helper_lib.sleepForSecond(2)

    # getting config directory path
    directory = provision_process.get_config_directory()

    # making directory and changing permission to 755.
    provision_process.make_directory(directory)
    # This file path is being used to store the bucket information coming in snapshot
    config_file_path = provision_process.get_config_file_path()

    content = "BUCKET_LIST=" + _find_bucket_name_from_snapshot(snapshot)

    # Adding bucket list in config file path .config file, inside .delphix folder
    helper_lib.write_file(provision_process.connection, content,
                          config_file_path)
Ejemplo n.º 3
0
def start_staging_xdcr(staged_source, repository, source_config):
    start_staging = CouchbaseOperation(
        Resource.ObjectBuilder.set_staged_source(staged_source).set_repository(
            repository).set_source_config(source_config).build())

    logger.debug("Enabling the D_SOURCE:{}".format(source_config.pretty_name))
    dsource_type = staged_source.parameters.d_source_type
    rx_connection = staged_source.staged_connection
    start_staging.start_couchbase()

    already_set_up_done, name_conflict = start_staging.check_duplicate_replication(
        start_staging.parameters.stg_cluster_name)
    if already_set_up_done:
        logger.info("No need to XDCR setup again")
    elif name_conflict:
        raise DuplicateClusterError("Already cluster is present")
    else:
        logger.info("First time XDCR set up")
        start_staging.xdcr_setup()

    config_setting = staged_source.parameters.config_settings_prov

    if len(config_setting) > 0:
        for config_bucket in config_setting:
            logger.debug("Creating replication for {}".format(
                config_bucket["bucketName"]))
            start_staging.xdcr_replicate(config_bucket["bucketName"],
                                         config_bucket["bucketName"])
    else:
        bucket_details_source = start_staging.source_bucket_list()
        all_bkt_list_with_size = helper_lib.get_all_bucket_list_with_size(
            bucket_details_source)
        for items in all_bkt_list_with_size:
            bkt_name, bkt_size = items.split(',')
            logger.debug("Creating replication for {}".format(bkt_name))
            start_staging.xdcr_replicate(bkt_name, bkt_name)

        config_dir = start_staging.create_config_dir()
        msg = "dSource Creation / Snapsync for dSource {} is in progress".format(
            source_config.pretty_name)
        helper_lib.write_file(
            rx_connection, msg,
            config_dir + "/" + helper_lib.get_sync_lock_file_name(
                dsource_type, source_config.pretty_name))
    logger.debug("D_SOURCE:{} enabled".format(source_config.pretty_name))
Ejemplo n.º 4
0
def check_for_concurrent(couchbase_obj, dsource_type, dsource_name, couchbase_host):
    config_dir = couchbase_obj.create_config_dir()

    config.SYNC_FILE_NAME = config_dir + "/" + helper_lib.get_sync_lock_file_name(dsource_type, dsource_name)


    delphix_config_dir = couchbase_obj.get_config_directory()
    logger.debug("Check if we have config dir in Delphix storage")
    if not helper_lib.check_dir_present(couchbase_obj.connection, delphix_config_dir):
        logger.debug("make a Delphix storage dir {}".format(delphix_config_dir))
        couchbase_obj.make_directory(delphix_config_dir)

    if not verify_sync_lock_file_for_this_job(couchbase_obj.connection, config.SYNC_FILE_NAME):
        config.SYNC_FLAG_TO_USE_CLEANUP_ONLY_IF_CURRENT_JOB_CREATED = False
        logger.debug("Sync file is already created by other dSource")
        raise MultipleXDCRSyncError("Sync file is already created by other dSource")
    else:
        # creating sync  file
        msg = db_commands.constants.RESYNCE_OR_SNAPSYNC_FOR_OTHER_OBJECT_IN_PROGRESS.format(dsource_name, couchbase_host)
        helper_lib.write_file(couchbase_obj.connection, msg, config.SYNC_FILE_NAME)
Ejemplo n.º 5
0
def pre_snapshot_xdcr(staged_source, repository, source_config,
                      input_parameters):
    logger.info("In Pre snapshot...")
    pre_snapshot_process = CouchbaseOperation(
        Resource.ObjectBuilder.set_staged_source(staged_source).set_repository(
            repository).set_source_config(source_config).build())
    config.SNAP_SYNC_FILE_NAME = pre_snapshot_process.create_config_dir(
    ) + "/" + db_commands.constants.LOCK_SNAPSYNC_OPERATION
    # Don't care of sync.lck file as it will never de deleted even in post snapshot.
    if helper_lib.check_file_present(staged_source.staged_connection,
                                     config.SNAP_SYNC_FILE_NAME):
        config.SNAP_SYNC_FLAG_TO_USE_CLEANUP_ONLY_IF_CURRENT_JOB_CREATED = False
        raise MultipleSyncError()
    else:
        logger.debug("Creating lock file...")
        msg = db_commands.constants.RESYNCE_OR_SNAPSYNC_FOR_OTHER_OBJECT_IN_PROGRESS.format(
            source_config.pretty_name, input_parameters.couchbase_host)
        helper_lib.write_file(staged_source.staged_connection, msg,
                              config.SNAP_SYNC_FILE_NAME)
    logger.info("Stopping Couchbase")
    pre_snapshot_process.stop_couchbase()
Ejemplo n.º 6
0
def resync_xdcr(staged_source, repository, source_config, input_parameters):
    dsource_type = input_parameters.d_source_type
    dsource_name = source_config.pretty_name
    bucket_size = staged_source.parameters.bucket_size
    rx_connection = staged_source.staged_connection
    resync_process = CouchbaseOperation(
        Resource.ObjectBuilder.set_staged_source(staged_source).set_repository(
            repository).set_source_config(source_config).build())
    config_dir = resync_process.create_config_dir()
    config.SYNC_FILE_NAME = config_dir + "/" + helper_lib.get_sync_lock_file_name(
        dsource_type, dsource_name)

    if not verify_sync_lock_file_for_this_job(rx_connection,
                                              config.SYNC_FILE_NAME):
        config.SYNC_FLAG_TO_USE_CLEANUP_ONLY_IF_CURRENT_JOB_CREATED = False
        logger.debug("Sync file is already created by other dSource")
        raise MultipleXDCRSyncError(
            "Sync file is already created by other dSource")
    else:
        # creating sync  file
        msg = db_commands.constants.RESYNCE_OR_SNAPSYNC_FOR_OTHER_OBJECT_IN_PROGRESS.format(
            dsource_name, input_parameters.couchbase_host)
        helper_lib.write_file(rx_connection, msg, config.SYNC_FILE_NAME)

    resync_process.restart_couchbase()
    resync_process.node_init()
    resync_process.cluster_init()
    already_set_up_done, name_conflict = resync_process.check_duplicate_replication(
        resync_process.parameters.stg_cluster_name)
    if already_set_up_done:
        logger.info("No need to XDCR setup again")
    elif name_conflict:
        raise DuplicateClusterError("Already cluster is present")
    else:
        logger.info("First time XDCR set up")
        resync_process.xdcr_setup()
    # common steps for both XDCR & CB back up
    logger.debug("Finding source and staging bucket list")
    bucket_details_source = resync_process.source_bucket_list()
    bucket_details_staged = resync_process.bucket_list()
    config_setting = staged_source.parameters.config_settings_prov
    logger.debug(
        "Bucket names passed for configuration: {}".format(config_setting))
    bucket_configured_staged = []
    if len(config_setting) > 0:
        logger.debug("Getting bucket information from config")
        for config_bucket in config_setting:
            bucket_configured_staged.append(config_bucket["bucketName"])
            logger.debug(
                "Filtering bucket name with size only from above output")
            bkt_name_size = helper_lib.get_bucket_name_with_size(
                bucket_details_source, config_bucket["bucketName"])
            bkt_size_mb = helper_lib.get_bucket_size_in_MB(
                bucket_size,
                bkt_name_size.split(",")[1])

            if config_bucket["bucketName"] not in bucket_details_staged:
                resync_process.bucket_create(config_bucket["bucketName"],
                                             bkt_size_mb)
            else:
                logger.debug(
                    "Bucket {} already present in staged environment. Recreating bucket "
                    .format(config_bucket["bucketName"]))
                resync_process.bucket_remove(config_bucket["bucketName"])
                resync_process.bucket_create(config_bucket["bucketName"],
                                             bkt_size_mb)
            resync_process.xdcr_replicate(config_bucket["bucketName"],
                                          config_bucket["bucketName"])

        logger.debug("Finding buckets present at staged server")
        bucket_details_staged = resync_process.bucket_list()
        filter_bucket_list = helper_lib.filter_bucket_name_from_output(
            bucket_details_staged)
        extra_bucket = list(
            set(filter_bucket_list) - set(bucket_configured_staged))

        logger.debug("Extra bucket found to delete:{} ".format(extra_bucket))
        for bucket in extra_bucket:
            resync_process.bucket_remove(bucket)
    else:
        logger.debug("Finding buckets present at staged server with size")
        all_bkt_list_with_size = helper_lib.get_all_bucket_list_with_size(
            bucket_details_source)
        logger.debug("Filtering bucket name with size only from above output")
        filter_source_bucket = helper_lib.filter_bucket_name_from_output(
            bucket_details_source)
        for items in all_bkt_list_with_size:
            if items:
                logger.debug("Running bucket operations for {}".format(items))
                bkt_name, bkt_size = items.split(',')

                bkt_size_mb = helper_lib.get_bucket_size_in_MB(
                    bucket_size, bkt_size)
                if bkt_name not in bucket_details_staged:
                    resync_process.bucket_create(bkt_name, bkt_size_mb)
                else:
                    logger.debug(
                        "Bucket {} already present in staged environment. Recreating bucket "
                        .format(bkt_name))
                    resync_process.bucket_remove(bkt_name)
                    resync_process.bucket_create(bkt_name, bkt_size_mb)
                resync_process.xdcr_replicate(bkt_name, bkt_name)

        bucket_details_staged = resync_process.bucket_list()
        filter_staged_bucket = helper_lib.filter_bucket_name_from_output(
            bucket_details_staged)
        extra_bucket = list(
            set(filter_staged_bucket) - set(filter_source_bucket))
        logger.info("Extra bucket found to delete:{}".format(extra_bucket))
        for bucket in extra_bucket:
            resync_process.bucket_remove(bucket)

    logger.debug("Finding staging_uuid & cluster_name on staging")
    staging_uuid, cluster_name_staging = resync_process.get_replication_uuid()
    bucket_details_staged = resync_process.bucket_list()
    logger.debug("Filtering bucket name from output")
    filter_bucket_list = helper_lib.filter_bucket_name_from_output(
        bucket_details_staged)
    for bkt in filter_bucket_list:
        resync_process.monitor_bucket(bkt, staging_uuid)
Ejemplo n.º 7
0
def resync_cbbkpmgr(staged_source, repository, source_config, input_parameters):
    dsource_type = input_parameters.d_source_type
    bucket_size = staged_source.parameters.bucket_size
    rx_connection = staged_source.staged_connection
    resync_process = CouchbaseOperation(
        Resource.ObjectBuilder.set_staged_source(staged_source).set_repository(repository).set_source_config(
            source_config).build())

    config_dir = resync_process.create_config_dir()
    config.SYNC_FILE_NAME = config_dir + "/" + get_sync_lock_file_name(dsource_type, source_config.pretty_name)
    src_bucket_info_filename = db_commands.constants.SRC_BUCKET_INFO_FILENAME
    src_bucket_info_filename = os.path.dirname(config_dir) + "/" + src_bucket_info_filename
    logger.debug("src_bucket_info_filename = {}".format(src_bucket_info_filename))

    if helper_lib.check_file_present(rx_connection, config.SYNC_FILE_NAME):
        logger.debug("Sync file is already created by other process")
        config.SYNC_FLAG_TO_USE_CLEANUP_ONLY_IF_CURRENT_JOB_CREATED = False
        raise MultipleSyncError("Sync file is already created by other process")
    else:
        # creating sync  file
        msg = db_commands.constants.RESYNCE_OR_SNAPSYNC_FOR_OTHER_OBJECT_IN_PROGRESS.format(source_config.pretty_name,
                                                                                            input_parameters.couchbase_host)
        helper_lib.write_file(rx_connection, msg, config.SYNC_FILE_NAME)

    resync_process.restart_couchbase()
    resync_process.node_init()
    resync_process.cluster_init()
    logger.debug("Finding source and staging bucket list")
    bucket_details_source = resync_process.source_bucket_list_offline(filename=src_bucket_info_filename)
    bucket_details_staged = resync_process.bucket_list()

    config_setting = staged_source.parameters.config_settings_prov
    logger.debug("Bucket names passed for configuration: {}".format(config_setting))

    bucket_configured_staged = []
    if len(config_setting) > 0:
        logger.debug("Getting bucket information from config")
        for config_bucket in config_setting:
            bucket_configured_staged.append(config_bucket["bucketName"])
            logger.debug("Filtering bucket name with size only from above output")
            bkt_name_size = helper_lib.get_bucket_name_with_size(bucket_details_source, config_bucket["bucketName"])
            bkt_size_mb = get_bucket_size_in_MB(bucket_size, bkt_name_size.split(",")[1])

            if config_bucket["bucketName"] not in bucket_details_staged:
                resync_process.bucket_create(config_bucket["bucketName"], bkt_size_mb)
            else:
                logger.debug("Bucket {} already present in staged environment. Recreating bucket ".format(
                    config_bucket["bucketName"]))
                resync_process.bucket_remove(config_bucket["bucketName"])
                resync_process.bucket_create(config_bucket["bucketName"], bkt_size_mb)

        logger.debug("Finding buckets present at staged server")
        bucket_details_staged = resync_process.bucket_list()
        filter_bucket_list = helper_lib.filter_bucket_name_from_output(bucket_details_staged)
        extra_bucket = list(set(filter_bucket_list) - set(bucket_configured_staged))

        logger.debug("Extra bucket found to delete:{} ".format(extra_bucket))
        for bucket in extra_bucket:
            resync_process.bucket_remove(bucket)
    else:
        logger.debug("Finding buckets present at staged server with size")
        all_bkt_list_with_size = helper_lib.get_all_bucket_list_with_size(bucket_details_source)
        logger.debug("Filtering bucket name with size only from above output")
        filter_source_bucket = helper_lib.filter_bucket_name_from_output(bucket_details_source)
        for items in all_bkt_list_with_size:
            if items:
                logger.debug("Running bucket operations for {}".format(items))
                bkt_name, bkt_size = items.split(',')

                bkt_size_mb = get_bucket_size_in_MB(bucket_size, bkt_size)
                if bkt_name not in bucket_details_staged:
                    resync_process.bucket_create(bkt_name, bkt_size_mb)
                else:
                    logger.debug(
                        "Bucket {} already present in staged environment. Recreating bucket ".format(bkt_name))
                    resync_process.bucket_remove(bkt_name)
                    resync_process.bucket_create(bkt_name, bkt_size_mb)

        bucket_details_staged = resync_process.bucket_list()
        filter_staged_bucket = helper_lib.filter_bucket_name_from_output(bucket_details_staged)
        extra_bucket = list(set(filter_staged_bucket) - set(filter_source_bucket))
        logger.info("Extra bucket found to delete:{}".format(extra_bucket))
        for bucket in extra_bucket:
            resync_process.bucket_remove(bucket)

    bucket_details_staged = resync_process.bucket_list()
    filter_bucket_list = helper_lib.filter_bucket_name_from_output(bucket_details_staged)
    csv_bucket_list = ",".join(filter_bucket_list)
    logger.debug("Started CB backup manager")
    resync_process.cb_backup_full(csv_bucket_list)
Ejemplo n.º 8
0
def pre_snapshot_cbbkpmgr(staged_source, repository, source_config, input_parameters):
    pre_snapshot_process = CouchbaseOperation(
        Resource.ObjectBuilder.set_staged_source(staged_source).set_repository(repository).set_source_config(
            source_config).build())
    bucket_size = input_parameters.bucket_size
    rx_connection = staged_source.staged_connection
    config_dir = pre_snapshot_process.create_config_dir()
    config.SNAP_SYNC_FILE_NAME = config_dir + "/" + db_commands.constants.LOCK_SNAPSYNC_OPERATION
    src_bucket_info_filename = db_commands.constants.SRC_BUCKET_INFO_FILENAME
    src_bucket_info_filename = os.path.dirname(config_dir) + "/" + src_bucket_info_filename

    if helper_lib.check_file_present(rx_connection, config.SNAP_SYNC_FILE_NAME):
        logger.debug("File path is already created {}".format(config.SNAP_SYNC_FILE_NAME))
        config.SNAP_SYNC_FLAG_TO_USE_CLEANUP_ONLY_IF_CURRENT_JOB_CREATED = False
        raise MultipleSnapSyncError("SnapSync file is already created by other process")
    else:
        logger.debug("Creating lock file...")
        msg = "dSource Creation / Snapsync for dSource {} is in progress. Same staging server {} cannot be used for other operations".format(
            source_config.pretty_name, input_parameters.couchbase_host)
        helper_lib.write_file(rx_connection, msg, config.SNAP_SYNC_FILE_NAME)
        logger.debug("Re-ingesting from latest backup...")
        pre_snapshot_process.start_couchbase()
        pre_snapshot_process.node_init()
        pre_snapshot_process.cluster_init()
        bucket_details_source = pre_snapshot_process.source_bucket_list_offline(
            filename=src_bucket_info_filename)
        bucket_details_staged = pre_snapshot_process.bucket_list()
        config_setting = staged_source.parameters.config_settings_prov
        logger.debug("Buckets name passed for configuration: {}".format(config_setting))
        bucket_configured_staged = []
        if len(config_setting) != 0:
            logger.debug("Inside config")
            for config_bucket in config_setting:
                logger.debug("Adding bucket names provided in config settings")
                bucket_configured_staged.append(config_bucket["bucketName"])
                bkt_name_size = helper_lib.get_bucket_name_with_size(bucket_details_source,
                                                                     config_bucket["bucketName"])
                bkt_size_mb = get_bucket_size_in_MB(bucket_size, bkt_name_size.split(",")[1])

                if config_bucket["bucketName"] not in bucket_details_staged:
                    pre_snapshot_process.bucket_create(config_bucket["bucketName"], bkt_size_mb)
                else:
                    pre_snapshot_process.bucket_remove(config_bucket["bucketName"])
                    pre_snapshot_process.bucket_create(config_bucket["bucketName"], bkt_size_mb)
            bucket_details_staged = pre_snapshot_process.bucket_list()
            filter_bucket_list = helper_lib.filter_bucket_name_from_output(bucket_details_staged)
            extra_bucket = list(set(filter_bucket_list) - set(bucket_configured_staged))
            logger.debug("Extra bucket found :{}".format(extra_bucket))
            for bucket in extra_bucket:
                logger.debug("Deleting bucket {}".format(bucket))
                pre_snapshot_process.bucket_remove(bucket)
        else:
            all_bkt_list_with_size = helper_lib.get_all_bucket_list_with_size(bucket_details_source)
            filter_source_bucket = helper_lib.filter_bucket_name_from_output(bucket_details_source)
            logger.info("Creating the buckets")
            for items in all_bkt_list_with_size:
                if items:
                    bkt_name, bkt_size = items.split(',')
                    bkt_size_mb = get_bucket_size_in_MB(bucket_size, bkt_size)
                    if bkt_name not in bucket_details_staged:
                        pre_snapshot_process.bucket_create(bkt_name, bkt_size_mb)
                    else:
                        logger.info(
                            "Bucket {} already present in staged environment. Recreating bucket ".format(
                                bkt_name))
                        pre_snapshot_process.bucket_remove(bkt_name)
                        pre_snapshot_process.bucket_create(bkt_name, bkt_size_mb)

            bucket_details_staged = pre_snapshot_process.bucket_list()
            filter_staged_bucket = helper_lib.filter_bucket_name_from_output(bucket_details_staged)
            extra_bucket = list(set(filter_staged_bucket) - set(filter_source_bucket))
            logger.info("Extra bucket found :{}".format(extra_bucket))
            for bucket in extra_bucket:
                pre_snapshot_process.bucket_remove(bucket)

        bucket_details_staged = pre_snapshot_process.bucket_list()
        filter_bucket_list = helper_lib.filter_bucket_name_from_output(bucket_details_staged)
        csv_bucket_list = ",".join(filter_bucket_list)
        pre_snapshot_process.cb_backup_full(csv_bucket_list)
        logger.info("Re-ingesting from latest backup complete.")

    logger.info("Stopping Couchbase")
    pre_snapshot_process.stop_couchbase()