Пример #1
0
def main(argv):
    parse_ops(argv)
    init_config()

    logging.info('\n\n=============================================')
    logging.info('Start Time: {0}'.format(start_time))
    printWelcome()
    init_auth()
    logging.info('Configuration: \n{0}'.format(json.dumps(config, indent=4)))

    # locate all dbs for source
    print '\nReading dbs requested for replication...'
    dbs = config['db_list']
    if len(dbs) == 0:
        dbs = rm.get_dbs(config['source_url'], config['source_auth'])
    print 'Retrieved {0} dbs.  Beginning the replication process...'.format(
        len(dbs))

    # create the _replicator db on the source if it doesn't already exist
    rm.create_replicator(config['mediator_url'], config['mediator_auth'])

    # deploy the ddocs on the _replicator db
    running_repl_url = mm.create_repl_index(config['mediator_url'],
                                            config['mediator_auth'])

    # time to start posting replications
    batch_id = int(time.time())
    results = repl_dispatcher(dbs, running_repl_url, batch_id)
    num_failed_repl = results[0]
    num_failed_ddocs = results[1]

    # we're done replicating the list of databases
    print '\n============================================='
    print 'Processing complete.'
    print 'Start Time: {0}'.format(start_time)
    print 'End Time: {0}'.format(time.strftime('%c'))
    print 'Failed replications:  {0}'.format(num_failed_repl)
    print 'Failed filter ddocs:  {0}'.format(num_failed_ddocs)
    print '\nUse *{0}* for incremental replications built off this batch.\n'.format(
        batch_id)
    logging.info('\n=============================================')
    logging.info('Start Time: {0}'.format(start_time))
    logging.info('End Time: {0}'.format(time.strftime('%c')))
    logging.info('Failed replications:  {0}'.format(num_failed_repl))
    logging.info('Failed filter ddocs:  {0}'.format(num_failed_ddocs))
    logging.info(
        'Use {0} for incremental replications built off this batch.'.format(
            batch_id))
def main(argv):
	parse_ops(argv)
	init_config()

	logging.info('\n\n=============================================')
	logging.info('Start Time: {0}'.format(start_time))
	printWelcome()
	init_auth()
	logging.info('Configuration: \n{0}'.format(json.dumps(config, indent=4)))

	# locate all dbs for source
	print '\nReading dbs requested for replication...'
	dbs = config['db_list']
	if len(dbs) == 0:
		dbs = rm.get_dbs(config['source_url'], config['source_auth'])
	print 'Retrieved {0} dbs.  Beginning the replication process...'.format(len(dbs))

	# create the _replicator db on the source if it doesn't already exist
	rm.create_replicator(config['mediator_url'], config['mediator_auth'])

	# deploy the ddocs on the _replicator db
	running_repl_url = mm.create_repl_index(config['mediator_url'], config['mediator_auth'])

	# time to start posting replications
	batch_id = int(time.time())
	results = repl_dispatcher(dbs, running_repl_url, batch_id)
	num_failed_repl = results[0]
	num_failed_ddocs = results[1]

	# we're done replicating the list of databases
	print '\n============================================='
	print 'Processing complete.'
	print 'Start Time: {0}'.format(start_time)
	print 'End Time: {0}'.format(time.strftime('%c'))
	print 'Failed replications:  {0}'.format(num_failed_repl)
	print 'Failed filter ddocs:  {0}'.format(num_failed_ddocs)
	print '\nUse *{0}* for incremental replications built off this batch.\n'.format(batch_id)
	logging.info('\n=============================================')
	logging.info('Start Time: {0}'.format(start_time))
	logging.info('End Time: {0}'.format(time.strftime('%c')))
	logging.info('Failed replications:  {0}'.format(num_failed_repl))
	logging.info('Failed filter ddocs:  {0}'.format(num_failed_ddocs))
	logging.info('Use {0} for incremental replications built off this batch.'.format(batch_id))
def repl_dispatcher(dbs, running_repl_url, batch_id):
	db_index = 0
	num_failed_repl = num_failed_ddocs = 0
	db_date = int(time.time())
	replications = []

	repl_options = {
		'continuous': config['continuous'],
		'worker_processes': config['worker_processes'],
		'worker_batch_size': config['worker_batch_size'],
		'http_connections': config['http_connections'],
		'connection_timeout': config['connection_timeout']
	}

	while db_index < len(dbs):
		try:
			# we only spawn new replications if it's below the limit.  Don't want
			# to overload the cluster.
			if not config['force_concurrency_limit'] or mm.poll_replicator(running_repl_url, config['mediator_auth'], config['concurrency_limit']):

				source_db = target_db = dbs[db_index]

				# make the target db name unique if required
				if config['rename_dbs']:
					target_db += '-{0}'.format(db_date)

				# attempt to create the target db with the new q value if desired
				if not config['use_default_q']:
					rm.create_new_db(config['target_url'], target_db, config['target_auth'], config['new_q'])

				# build a replication doc
				repl_source = config['source_url'] + source_db
				repl_target = config['target_url'] + target_db
				doc = rm.create_repl_doc(repl_source, config['source_auth'], repl_target, 
					config['target_auth'], config['mediator'], repl_options, batch_id, config['incremental_id'])

				# create a design document for filtering ddocs if desired
				if config['skip_ddocs']:
					ddoc = fm.create_filter_func(config['source_url'] + source_db, config['source_auth'])
					doc.update({'filter': '{0}/{1}'.format(ddoc['name'], ddoc['func'])})

				# post the doc to the source db (i.e. the mediator)
				rm.post_repl_doc(config['mediator_url'], doc, config['mediator_auth'])
				replications.append(doc['_id'])

				# increment index in to array of dbs
				db_index += 1
				print '[INITIATED] [{0}/{1}] Replication for {2} has been POSTed...'.format(db_index, len(dbs), repl_source)

			else:
				# sleep for an arbitrary amount of time before polling again
				print 'Concurrent replication limit reached...waiting for replications to complete...'
				time.sleep(config['polling_delay'])

		# handle exceptions that may have happened
		except ReplError as re:
			logging.log(re.level, '{0}\n{1}'.format(re.msg, json.dumps(re.r, indent=4)))
			num_failed_repl += 1
			db_index += 1 
		except FilterError as fe:
			logging.log(fe.level, '{0}\n{1}'.format(fe.msg, json.dumps(fe.r, indent=4)))
			num_failed_ddocs += 1
			db_index += 1
		except FatalError as xe:
			logging.log(xe.level, '{0}\nAborting script!!'.format(xe.msg))
			sys.exit()
		except:
			print 'Unexpected Error!  View the log for details.'
			logging.error('Unexpected error occured! Error: {0}'.format(sys.exc_info()))
			sys.exit()

	# wait for any remaining replications to finish if not continuous
	if not config['continuous']:
		print '\nWaiting for any remaining replications to complete.\n'
		while not mm.poll_replicator(running_repl_url, config['mediator_auth'], 1):
			time.sleep(config['polling_delay'])

	# delete the filtering ddocs if they were created and if these replications are not continuous
	if config['skip_ddocs'] and not config['continuous']:
		print 'Deleting the ddocs used to filter the replications.\n'
		db_index = 0
		while db_index < len(dbs):
			source_db = dbs[db_index]
			try:
				fm.remove_filter_func(config['source_url'] + source_db, config['source_auth'])
				db_index += 1
			except FilterError as fe:
				logging.log(fe.level, '{0}\n{1}'.format(fe.msg, json.dumps(fe.r, indent=4)))
				num_failed_ddocs += 1
				db_index += 1
			except:
				print 'Unexpected Error!  View the log for details.'
				logging.error('Unexpected error occured! Error: {0}'.format(sys.exc_info()))
				sys.exit()

	# place the number of failures in to the output queue
	return [num_failed_repl, num_failed_ddocs]
Пример #4
0
def repl_dispatcher(dbs, running_repl_url, batch_id):
    db_index = 0
    num_failed_repl = num_failed_ddocs = 0
    db_date = int(time.time())
    replications = []

    repl_options = {
        'continuous': config['continuous'],
        'worker_processes': config['worker_processes'],
        'worker_batch_size': config['worker_batch_size'],
        'http_connections': config['http_connections'],
        'connection_timeout': config['connection_timeout']
    }

    while db_index < len(dbs):
        try:
            # we only spawn new replications if it's below the limit.  Don't want
            # to overload the cluster.
            if not config['force_concurrency_limit'] or mm.poll_replicator(
                    running_repl_url, config['mediator_auth'],
                    config['concurrency_limit']):

                source_db = target_db = dbs[db_index]

                # make the target db name unique if required
                if config['rename_dbs']:
                    target_db += '-{0}'.format(db_date)

                # attempt to create the target db with the new q value if desired
                if not config['use_default_q']:
                    rm.create_new_db(config['target_url'], target_db,
                                     config['target_auth'], config['new_q'])

                # build a replication doc
                repl_source = config['source_url'] + source_db
                repl_target = config['target_url'] + target_db
                doc = rm.create_repl_doc(repl_source, config['source_auth'],
                                         repl_target, config['target_auth'],
                                         config['mediator'], repl_options,
                                         batch_id, config['incremental_id'])

                # create a design document for filtering ddocs if desired
                if config['skip_ddocs']:
                    ddoc = fm.create_filter_func(
                        config['source_url'] + source_db,
                        config['source_auth'])
                    doc.update({
                        'filter':
                        '{0}/{1}'.format(ddoc['name'], ddoc['func'])
                    })

                # post the doc to the source db (i.e. the mediator)
                rm.post_repl_doc(config['mediator_url'], doc,
                                 config['mediator_auth'])
                replications.append(doc['_id'])

                # increment index in to array of dbs
                db_index += 1
                print '[INITIATED] [{0}/{1}] Replication for {2} has been POSTed...'.format(
                    db_index, len(dbs), repl_source)

            else:
                # sleep for an arbitrary amount of time before polling again
                print 'Concurrent replication limit reached...waiting for replications to complete...'
                time.sleep(config['polling_delay'])

        # handle exceptions that may have happened
        except ReplError as re:
            logging.log(re.level, '{0}\n{1}'.format(re.msg,
                                                    json.dumps(re.r,
                                                               indent=4)))
            num_failed_repl += 1
            db_index += 1
        except FilterError as fe:
            logging.log(fe.level, '{0}\n{1}'.format(fe.msg,
                                                    json.dumps(fe.r,
                                                               indent=4)))
            num_failed_ddocs += 1
            db_index += 1
        except FatalError as xe:
            logging.log(xe.level, '{0}\nAborting script!!'.format(xe.msg))
            sys.exit()
        except:
            print 'Unexpected Error!  View the log for details.'
            logging.error('Unexpected error occured! Error: {0}'.format(
                sys.exc_info()))
            sys.exit()

    # wait for any remaining replications to finish if not continuous
    if not config['continuous']:
        print '\nWaiting for any remaining replications to complete.\n'
        while not mm.poll_replicator(running_repl_url, config['mediator_auth'],
                                     1):
            time.sleep(config['polling_delay'])

    # delete the filtering ddocs if they were created and if these replications are not continuous
    if config['skip_ddocs'] and not config['continuous']:
        print 'Deleting the ddocs used to filter the replications.\n'
        db_index = 0
        while db_index < len(dbs):
            source_db = dbs[db_index]
            try:
                fm.remove_filter_func(config['source_url'] + source_db,
                                      config['source_auth'])
                db_index += 1
            except FilterError as fe:
                logging.log(
                    fe.level, '{0}\n{1}'.format(fe.msg,
                                                json.dumps(fe.r, indent=4)))
                num_failed_ddocs += 1
                db_index += 1
            except:
                print 'Unexpected Error!  View the log for details.'
                logging.error('Unexpected error occured! Error: {0}'.format(
                    sys.exc_info()))
                sys.exit()

    # place the number of failures in to the output queue
    return [num_failed_repl, num_failed_ddocs]