예제 #1
0
def initiate_migration(from_config, to_config, remove_on_source=False, do_lock=True):
    """
    Start a migration operation from one config to another, with optionally removing the data on the source and optionally using a global lock.

    Expects the input configs to be already validated and normalized.

    :param from_config:
    :param to_config:
    :param remove_on_source:
    :param do_lock:
    :return:
    """

    logger.info('Initializing migration from {} to {}'.format(from_config, to_config))


    with migration_context(from_config, to_config, do_lock=do_lock) as context:
        with session_scope() as db:
            # Load all metadata
            to_migrate = [(record.userId, record.bucket, record.archiveId, record.content_url) for record in db.query(ArchiveMetadata).filter(ArchiveMetadata.content_url.like(context.from_archive.primary_client.__uri_scheme__ + '://%'))]

            task_record = ArchiveMigrationTask()
            task_record.archive_documents_to_migrate = len(to_migrate)
            task_record.archive_documents_migrated = 0
            task_record.migrate_from_driver = context.from_archive.primary_client.__config_name__
            task_record.migrate_to_driver = context.to_archive.primary_client.__config_name__
            task_record.state = 'running'
            task_record.started_at = datetime.datetime.utcnow()

            task_record.executor_id = get_threadbased_id()

            db.add(task_record)
            db.flush()
            task_id = task_record.id
            logger.info('Migration Task Id: {}'.format(task_id))

        logger.info('Entering main migration loop')
        logger.info('Migrating {} documents'.format(len(to_migrate)))
        counter = 0
        result_state = 'failed'

        try:
            for (userId, bucket, archiveId, content_url) in to_migrate:
                # content_url = None

                try:
                    # Use high-level archive operations to ensure compression etc are updated appropriately
                    data = context.from_archive.get(userId, bucket, archiveId)
                    context.to_archive.put(userId, bucket, archiveId, data)

                #     with session_scope() as db:
                #         record = db.query(ArchiveMetadata).filter(ArchiveMetadata.userId == rec_tuple[0], ArchiveMetadata.bucket == rec_tuple[1], ArchiveMetadata.archiveId == rec_tuple[2]).first()
                #         if not record:
                #             logger.warn('No record found in db for: {}'.format(rec_tuple))
                #             continue
                #
                #         if not record.content_url.startswith(context.from_client.__uri_scheme__ + '://'):
                #             logger.warn('Initial query returned content url: {} but migration query found url {}. Skipping.'.format(rec_tuple[4], record.content_url))
                #             continue
                #
                #         logger.info('Migrating document {}/{}/{} -- current uri: {}'.format(record.userId, record.bucket, record.archiveId, record.content_url))
                #         content_url = record.content_url
                #         loaded = context.from_client.get_by_uri(record.content_url)
                #         record.content_url = context.to_client.put(record.userId, record.bucket, record.archiveId, loaded)
                #         logger.info('Migrated document {}/{}/{} -- from {} to {}'.format(record.userId, record.bucket, record.archiveId, content_url, record.content_url))
                #
                #         # Should be the most recent/highest id task
                #         task_record = db.merge(task_record)
                #         task_record.archive_documents_migrated += 1
                #         counter = task_record.archive_documents_migrated
                #
                    if remove_on_source:
                        if context.from_archive.primary_client.__config_name__ != context.to_archive.primary_client.__config_name__:
                            logger.info('Deleting document on source after successful migration to destination. Src = {}'.format(content_url))
                            # Only delete after commit is complete
                            try:
                                context.from_archive.primary_client.delete_by_uri(content_url)
                            except Exception as e:
                                logger.exception('Error cleaning up old record with uri: {}. Aborting migration'.format(content_url))
                                raise
                        else:
                            logger.info('Skipping removal of documents on source because source and dest drivers are the same')
                    else:
                        logger.info('Skipping removal of document on source driver because configured to leave source data.')
                    counter = counter + 1
                except Exception as e:
                    logger.exception('Error migrating content url: {} to {}'.format(content_url, context.from_archive.primary_client.__config_name__, context.to_archive.primary_client.__config_name__,))
            else:
                result_state = 'complete'

        finally:
            with session_scope() as db:
                db.add(task_record)
                db.refresh(task_record)
                task_record.last_state = task_record.state
                task_record.state = result_state
                task_record.ended_at = datetime.datetime.utcnow()
                task_record.archive_documents_migrated = counter
                logger.info('Migration result summary: {}'.format(json.dumps(task_record.to_json())))
예제 #2
0
def run_target_with_lease(account, lease_id, target, ttl=60, client_id=None, autorefresh=True, *args, **kwargs):
    """
    Run a handler within the context of a lease that is auto-refreshed as long as the handler runs.

    Uses a thread for the handler and a monitor to watch state and update the lease ttl.

    The leases are fairly slow to actuate, so expect to use this mechanism for longer running tasks where the lease duration should be > 10 sec

    :param account: account to use for the q client, may be None for system user
    :param lease_id:
    :param target:
    :param args:
    :param kwargs:
    :return:
    """
    handler_thread = threading.Thread(target=target, args=args, kwargs=kwargs)

    client = internal_client_for(SimpleQueueClient, account)

    # Ensure task lease exists for acquisition and create if not found
    lease_resp = client.describe_lease(lease_id)
    if not lease_resp:
        lease_resp = client.create_lease(lease_id)

    if not lease_resp:
        raise Exception('Cannot locate or create a lease with id {}'.format(lease_id))

    # Acquire the task lease and run the task
    lease = None
    try:
        my_id = get_threadbased_id() if client_id is None else client_id
        try:
            lease = client.acquire_lease(lease_id, client_id=my_id, ttl=ttl)
            if not lease:
                raise LeaseUnavailableError('Another owner holds lease {}, and did not release within timeout {}'.format(lease_id, ttl))

        except Exception as e:
            raise LeaseAcquisitionFailedError('Error during lease acquisition: {}'.format(e))

        logger.debug('Got lease: {}'.format(lease))

        t = time.time()
        logger.debug('Starting target={} with lease={} and client_id={}'.format(target.__name__, lease_id, lease['held_by']))
        handler_thread.start()

        if autorefresh:
            # Run the task thread and monitor it, refreshing the task lease as needed
            while handler_thread.isAlive():
                # If we're halfway to the timeout, refresh to have a safe buffer
                if time.time() - t > (ttl / 2):
                    # refresh the lease
                    for i in range(3):
                        try:
                            resp = client.refresh_lease(lease_id=lease['id'], client_id=lease['held_by'], epoch=lease['epoch'], ttl=ttl)
                            logger.debug('Lease {} refreshed with response: {}'.format(lease_id, resp))
                            if resp:
                                lease = resp
                                t = time.time()
                                break
                        except Exception as e:
                            logger.exception('Error updating lease {}'.format(lease['id']))
                    else:
                        logger.debug('Lease refresh failed to succeed after retries. Lease {} may be lost due to timeout'.format(lease_id))

                handler_thread.join(timeout=1)
        else:
            handler_thread.join()

        logger.debug('Target thread returned')
    except (LeaseAcquisitionFailedError, LeaseUnavailableError) as e:
        logger.debug('Could not acquire lease, but this may be normal: {}'.format(e))
        raise e
    except Exception as e:
        logger.debug('Attempting to get lease {} failed: {}'.format(lease_id, e))
        raise e
    finally:
        try:
            if lease:
                resp = client.release_lease(lease_id=lease['id'], client_id=lease['held_by'], epoch=lease['epoch'])
                logger.debug('Lease {} released with response: {}'.format(lease_id, resp))
            else:
                logger.debug('No lease found to release.')
        except Exception as e:
            logger.exception('Error releasing lease. Lease will expire on its own. Err: {}'.format(str(e)))