Exemple #1
0
async def run(loop):  # pylint: disable=too-many-locals
    """Run the main application loop for the service.

    This runs the main top level service functions for working with the Queue.
    """
    service = ServiceWorker(loop=loop, cb_handler=cb_subscription_handler)
    probe = Probes(components=[service], loop=loop)

    async def close():
        await service.close()
        my_loop = asyncio.get_running_loop()
        await asyncio.sleep(0.1, loop=my_loop)
        my_loop.stop()

    try:
        await probe.start()
        await service.connect()

        # register the signal handler
        for sig in ('SIGINT', 'SIGTERM'):
            loop.add_signal_handler(
                getattr(signal, sig),
                functools.partial(signal_handler, sig_loop=loop, task=close))

    except Exception as e:  # pylint: disable=broad-except
        # TODO tighten this error and decide when to bail on the infinite reconnect
        logger.error(e)
Exemple #2
0
def process_filing(payment_token, flask_app):
    """Render the filings contained in the submission."""
    if not flask_app:
        raise QueueException('Flask App not available.')

    with flask_app.app_context():

        # try to find the filing 5 times before putting back on the queue - in case payment token ends up on the queue
        # before it is assigned to filing.
        counter = 1
        filing_submission = None
        while not filing_submission and counter <= 5:
            filing_submission = get_filing_by_payment_id(payment_token['paymentToken'].get('id'))
            counter += 1
            sleep(0.2)
        if not filing_submission:
            raise FilingException

        if filing_submission.status == Filing.Status.COMPLETED.value:
            logger.warning('Queue: Attempting to reprocess business.id=%s, filing.id=%s payment=%s',
                           filing_submission.business_id, filing_submission.id, payment_token)
            return

        if payment_token['paymentToken'].get('statusCode') == 'TRANSACTION_FAILED':
            # TODO - need to surface TRANSACTION_FAILED, but Filings manages its own status
            # filing_submission.status = Filing.Status.ERROR
            filing_submission.payment_completion_date = datetime.datetime.utcnow()
            db.session.add(filing_submission)
            db.session.commit()
            return

        legal_filings = filing_submission.legal_filings()
        # TODO: handle case where there are no legal_filings

        uow = versioning_manager.unit_of_work(db.session)
        transaction = uow.create_transaction(db.session)

        if not payment_token['paymentToken'].get('statusCode') == 'TRANSACTION_FAILED':
            if not payment_token['paymentToken'].get('statusCode') == Filing.Status.COMPLETED.value:
                logger.error('Unknown payment status given: %s', payment_token['paymentToken'].get('statusCode'))
                raise QueueException

            business = Business.find_by_internal_id(filing_submission.business_id)

            for filing in legal_filings:
                if filing.get('annualReport'):
                    annual_report.process(business, filing)
                if filing.get('changeOfAddress'):
                    change_of_address.process(business, filing)
                if filing.get('changeOfDirectors'):
                    change_of_directors.process(business, filing)

            filing_submission.transaction_id = transaction.id
            db.session.add(business)

        filing_submission.payment_completion_date = datetime.datetime.utcnow()
        db.session.add(filing_submission)
        db.session.commit()
        return
Exemple #3
0
async def run(loop, token):  # pylint: disable=too-many-locals
    """Run the main application loop for the service.

    This runs the main top level service functions for working with the Queue.
    """
    # NATS client connections
    nc = NATS()
    sc = STAN()

    async def close():
        """Close the stream and nats connections."""
        await sc.close()
        await nc.close()

    # Connection and Queue configuration.
    def nats_connection_options():
        return {
            'servers': os.getenv('NATS_SERVERS', 'nats://127.0.0.1:4222').split(','),
            'io_loop': loop,
            'error_cb': error_cb,
            'name': os.getenv('NATS_CLIENT_NAME', 'entity.filing.tester')
        }

    def stan_connection_options():
        return {
            'cluster_id': os.getenv('NATS_CLUSTER_ID', 'test-cluster'),
            'client_id': str(random.SystemRandom().getrandbits(0x58)),
            'nats': nc
        }

    def subscription_options():
        return {
            'subject': os.getenv('NATS_SUBJECT', 'entity.filings'),
            'queue': os.getenv('NATS_QUEUE', 'filing-worker'),
            'durable_name': os.getenv('NATS_QUEUE', 'filing-worker') + '_durable'
        }

    try:
        # Connect to the NATS server, and then use that for the streaming connection.
        await nc.connect(**nats_connection_options())
        await sc.connect(**stan_connection_options())

        # register the signal handler
        for sig in ('SIGINT', 'SIGTERM'):
            loop.add_signal_handler(getattr(signal, sig),
                                    functools.partial(signal_handler, sig_loop=loop, sig_nc=nc, task=close)
                                    )

        payload = {'paymentToken': {'id': token, 'statusCode': 'COMPLETED'}}
        await sc.publish(subject=subscription_options().get('subject'),
                         payload=json.dumps(payload).encode('utf-8'))

    except Exception as e:  # pylint: disable=broad-except
        # TODO tighten this error and decide when to bail on the infinite reconnect
        logger.error(e)
Exemple #4
0
 async def conn_lost_cb(error):
     logger.info('Connection lost:%s', error)
     for i in range(0, 100):
         try:
             logger.info('Reconnecting, attempt=%i...', i)
             await self.connect()
         except Exception as e:  # pylint: disable=broad-except; catch all errors from client framework
             logger.error('Error %s',
                          e.with_traceback(),
                          stack_info=True)
             continue
         break
Exemple #5
0
async def cb_subscription_handler(msg: nats.aio.client.Msg):
    """Use Callback to process Queue Msg objects."""
    try:
        logger.info('Received raw message seq:%s, data=  %s', msg.sequence, msg.data.decode())
        payment_token = extract_payment_token(msg)
        logger.debug('Extracted payment token: %s', payment_token)
        process_filing(payment_token, FLASK_APP)
    except OperationalError as err:
        logger.error('Queue Blocked - Database Issue: %s', json.dumps(payment_token), exc_info=True)
        raise err  # We don't want to handle the error, as a DB down would drain the queue
    except (QueueException, Exception):  # pylint: disable=broad-except
        # Catch Exception so that any error is still caught and the message is removed from the queue
        capture_message('Queue Error:' + json.dumps(payment_token), level='error')
        logger.error('Queue Error: %s', json.dumps(payment_token), exc_info=True)
Exemple #6
0
def process(business: Business, filing: Filing):
    """Render the annual_report onto the business model objects."""
    agm_date = filing['annualReport'].get('annualGeneralMeetingDate')
    ar_date = filing['annualReport'].get('annualReportDate')
    if agm_date and validations.annual_report.requires_agm(business):
        agm_date = datetime.date.fromisoformat(agm_date)
    if ar_date:
        ar_date = datetime.date.fromisoformat(ar_date)
    else:
        # should never get here (schema validation should prevent this from making it to the filer)
        logger.error(
            'No annualReportDate given for in annual report. Filing id: %s',
            filing.id)
    business.last_agm_date = agm_date
    business.last_ar_date = ar_date
Exemple #7
0
def process(business: Business, filing: Filing):
    """Render the change_of_directors onto the business model objects."""
    new_directors = filing['changeOfDirectors'].get('directors')

    for new_director in new_directors:
        if 'appointed' in new_director['actions']:
            # create address
            address = create_address(new_director['deliveryAddress'],
                                     Address.DELIVERY)

            # add new director to the list
            business.directors.append(
                Director(first_name=new_director['officer'].get(
                    'firstName', '').upper(),
                         middle_initial=new_director['officer'].get(
                             'middleInitial', '').upper(),
                         last_name=new_director['officer'].get('lastName',
                                                               '').upper(),
                         title=new_director.get('title', '').upper(),
                         appointment_date=new_director.get('appointmentDate'),
                         cessation_date=new_director.get('cessationDate'),
                         delivery_address=address))

        if any([action != 'appointed' for action in new_director['actions']]):
            # get name of director in json for comparison *
            new_director_name = \
                new_director['officer'].get('firstName') + new_director['officer'].get('middleInitial') + \
                new_director['officer'].get('lastName') \
                if 'nameChanged' not in new_director['actions'] \
                else new_director['officer'].get('prevFirstName') + \
                new_director['officer'].get('prevMiddleInitial') + new_director['officer'].get('prevLastName')
            if not new_director_name:
                logger.error('Could not resolve director name from json %s.',
                             new_director)
                raise QueueException

            for director in business.directors:
                # get name of director in database for comparison *
                director_name = director.first_name + director.middle_initial + director.last_name
                if director_name.upper() == new_director_name.upper():
                    update_director(director, new_director)
                    break
Exemple #8
0
        await asyncio.sleep(0.1, loop=my_loop)
        my_loop.stop()

    try:
        await probe.start()
        await service.connect()

        # register the signal handler
        for sig in ('SIGINT', 'SIGTERM'):
            loop.add_signal_handler(
                getattr(signal, sig),
                functools.partial(signal_handler, sig_loop=loop, task=close))

    except Exception as e:  # pylint: disable=broad-except
        # TODO tighten this error and decide when to bail on the infinite reconnect
        logger.error(e)


if __name__ == '__main__':
    try:
        event_loop = asyncio.get_event_loop()
        event_loop.run_until_complete(run(event_loop))
        event_loop.run_forever()
    except Exception as err:  # pylint: disable=broad-except; Catching all errors from the frameworks
        logger.error('problem in running the service: %s',
                     err,
                     stack_info=True,
                     exc_info=True)
    finally:
        event_loop.close()
Exemple #9
0
async def run(loop):  # pylint: disable=too-many-locals
    """Run the main application loop for the service.

    This runs the main top level service functions for working with the Queue.
    """
    # NATS client connections
    nc = NATS()
    sc = STAN()

    async def reconnected_cb():
        """Connect to the NATS services.

        This gets called when the client successfully connects, or reconnects.
        """
        logger.info('Connected to NATS at %s...', nc.connected_url.netloc)

    async def close():
        """Close the stream and nats connections."""
        await sc.close()
        await nc.close()

    # Connection and Queue configuration.
    def nats_connection_options():
        return {
            'servers':
            os.getenv('NATS_SERVERS', 'nats://127.0.0.1:4222').split(','),
            'io_loop':
            loop,
            'error_cb':
            error_cb,
            'closed_cb':
            closed_cb,
            'reconnected_cb':
            reconnected_cb,
            'name':
            os.getenv('NATS_CLIENT_NAME', 'entity.filing.worker')
        }

    def stan_connection_options():
        return {
            'cluster_id': os.getenv('NATS_CLUSTER_ID', 'test-cluster'),
            'client_id': str(random.SystemRandom().getrandbits(0x58)),
            'nats': nc
        }

    def subscription_options():
        return {
            'subject': os.getenv('NATS_SUBJECT', 'entity.filings'),
            'queue': os.getenv('NATS_QUEUE', 'filing-worker'),
            'durable_name':
            os.getenv('NATS_QUEUE', 'filing-worker') + '_durable',
            'cb': cb_subscription_handler
        }

    try:
        # Connect to the NATS server, and then use that for the streaming connection.
        await nc.connect(**nats_connection_options())
        await sc.connect(**stan_connection_options())

        # Attach the callback queue
        await sc.subscribe(**subscription_options())
        logger.info('Subscribe the callback: %s to the queue: %s.',
                    subscription_options().get('cb').__name__,
                    subscription_options().get('queue'))

        # register the signal handler
        for sig in ('SIGINT', 'SIGTERM'):
            loop.add_signal_handler(
                getattr(signal, sig),
                functools.partial(signal_handler,
                                  sig_loop=loop,
                                  sig_nc=nc,
                                  task=close))

    except Exception as e:  # pylint: disable=broad-except
        # TODO tighten this error and decide when to bail on the infinite reconnect
        logger.error(e)