コード例 #1
0
    def setUp(self):
        # initialize a traced app with a dummy tracer
        app = self.get_app()
        self.tracer = get_dummy_tracer()
        self.traced_app = TraceMiddleware(app, self.tracer)

        # make the app testable
        app.config['TESTING'] = True
        self.app = app.test_client()
コード例 #2
0
 def test_double_instrumentation(self):
     # ensure Flask is never instrumented twice when `ddtrace-run`
     # and `TraceMiddleware` are used together. `traced_app` MUST
     # be assigned otherwise it's not possible to reproduce the
     # problem (the test scope must keep a strong reference)
     traced_app = TraceMiddleware(self.app, self.tracer)  # noqa: F841
     rv = self.client.get("/child")
     assert rv.status_code == 200
     spans = self.tracer.writer.pop()
     assert len(spans) == 2
コード例 #3
0
 def test_double_instrumentation(self):
     # ensure Flask is never instrumented twice when `ddtrace-run`
     # and `TraceMiddleware` are used together. `traced_app` MUST
     # be assigned otherwise it's not possible to reproduce the
     # problem (the test scope must keep a strong reference)
     traced_app = TraceMiddleware(self.flask_app, self.tracer)  # noqa
     rv = self.app.get('/child')
     eq_(rv.status_code, 200)
     spans = self.tracer.writer.pop()
     eq_(len(spans), 2)
コード例 #4
0
def run_app():
    """runs the application"""
    flask_app = create_app()

    # This is for DataDog (Do Not Delete)
    if USE_DATADOG:
        TraceMiddleware(flask_app, tracer, service="broker-dd", distributed_tracing=False)

    flask_app.run(
        threaded=True,
        host=CONFIG_SERVICES['broker_api_host'],
        port=CONFIG_SERVICES['broker_api_port']
    )
コード例 #5
0
    def setUp(self):
        self.tracer = get_dummy_tracer()
        self.app = create_app()
        self.traced_app = TraceMiddleware(
            self.app,
            self.tracer,
            service="test.flask.service",
            distributed_tracing=True,
        )

        # make the app testable
        self.app.config["TESTING"] = True
        self.client = self.app.test_client()
コード例 #6
0
def run_app():
    """ Run the application. """
    app = create_app()

    # This is for DataDog (Do Not Delete)
    if USE_DATADOG:
        TraceMiddleware(app,
                        tracer,
                        service="broker-dd",
                        distributed_tracing=False)

    with app.app_context():
        current_app.debug = CONFIG_SERVICES['debug']
        local = CONFIG_BROKER['local']
        g.is_local = local
        current_app.config.from_object(__name__)

        # Future: Override config w/ environment variable, if set
        current_app.config.from_envvar('VALIDATOR_SETTINGS', silent=True)

        queue = sqs_queue()

        logger.info("Starting SQS polling")
        while True:
            # Grabs one (or more) messages from the queue
            messages = queue.receive_messages(WaitTimeSeconds=10,
                                              MessageAttributeNames=['All'])
            for message in messages:
                logger.info("Message received: %s", message.body)

                msg_attr = message.message_attributes
                if msg_attr and msg_attr.get(
                        'validation_type',
                    {}).get('StringValue') == 'generation':
                    # Generating a file
                    validator_process_file_generation(message.body)
                else:
                    # Running validations (or generating a file from a Job)
                    a_agency_code = msg_attr.get(
                        'agency_code',
                        {}).get('StringValue') if msg_attr else None
                    validator_process_job(message.body, a_agency_code)

                # Delete from SQS once processed
                message.delete()

            # When you receive an empty response from the queue, wait a second before trying again
            if len(messages) == 0:
                time.sleep(1)
コード例 #7
0
 def test_double_instrumentation_config(self):
     # ensure Flask uses the last set configuration to be sure
     # there are no breaking changes for who uses `ddtrace-run`
     # with the `TraceMiddleware`
     TraceMiddleware(
         self.app,
         self.tracer,
         service="new-intake",
         distributed_tracing=False,
     )
     assert self.app._service == "new-intake"
     assert self.app._use_distributed_tracing is False
     rv = self.client.get("/child")
     assert rv.status_code == 200
     spans = self.tracer.writer.pop()
     assert len(spans) == 2
コード例 #8
0
 def test_double_instrumentation_config(self):
     # ensure Flask uses the last set configuration to be sure
     # there are no breaking changes for who uses `ddtrace-run`
     # with the `TraceMiddleware`
     TraceMiddleware(
         self.flask_app,
         self.tracer,
         service='new-intake',
         distributed_tracing=False,
     )
     eq_(self.flask_app._service, 'new-intake')
     ok_(self.flask_app._use_distributed_tracing is False)
     rv = self.app.get('/child')
     eq_(rv.status_code, 200)
     spans = self.tracer.writer.pop()
     eq_(len(spans), 2)
コード例 #9
0
from ddtrace import tracer
from ddtrace.contrib.flask import TraceMiddleware

# Have flask use stdout as the logger
main_logger = logging.getLogger()
main_logger.setLevel(logging.DEBUG)
c = logging.StreamHandler(sys.stdout)
formatter = logging.Formatter(
    '%(asctime)s - %(name)s - %(levelname)s - %(message)s')
c.setFormatter(formatter)
main_logger.addHandler(c)

#tracer.configure(hostname='vagrant')
app = Flask(__name__)
traced_app = TraceMiddleware(app, tracer, service='simple_service')


@tracer.wrap(name='api_entry')
@app.route('/')
def api_entry():
    time.sleep(0.2)
    slow_function()
    time.sleep(0.2)
    return 'Entrypoint to the Application'


@tracer.wrap(name='slow_function')
def slow_function():
    time.sleep(0.5)
    return 'Slow function in Application'
コード例 #10
0
ファイル: api.py プロジェクト: NBParis/log-workshop-2
# Tracer configuration
tracer.configure(hostname='agent', priority_sampling=True)
tracer.set_tags({'env': 'workshop'})
patch(requests=True)

# enable distributed tracing for requests
# to send headers (globally)
config.requests['distributed_tracing'] = True

app = Flask('api')

if os.environ['FLASK_DEBUG']:
    CORS(app)

traced_app = TraceMiddleware(app, tracer, service='iot-frontend')


@app.route('/')
def homepage():
    return app.send_static_file('index.html')


@app.route('/service-worker.js')
def service_worker_js():
    return app.send_static_file('js/service-worker.js')


@app.route('/status')
def system_status():
    status = requests.get('http://sensors:5002/sensors').json()
コード例 #11
0
from ddtrace.contrib.flask import TraceMiddleware
import logging
import sys

# Have flask use stdout as the logger
main_logger = logging.getLogger()
main_logger.setLevel(logging.DEBUG)
c = logging.StreamHandler(sys.stdout)
formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
c.setFormatter(formatter)
main_logger.addHandler(c)

app = Flask(__name__)

# Copypasta from the tutorial
traced_app = TraceMiddleware(app, tracer, service="my-flask-app", distributed_tracing=False)

@app.route('/')
def api_entry():
    return 'Entrypoint to the Application'

@app.route('/api/apm')
def apm_endpoint():
    return 'Getting APM Started'

@app.route('/api/trace')
def trace_endpoint():
    return 'Posting Traces'

if __name__ == '__main__':
    app.run(host='0.0.0.0', port='5050')
コード例 #12
0
import mocks

from ddtrace import tracer
from ddtrace.contrib.flask import TraceMiddleware
from flask import Flask
from flask import request
from mocks.mesos_slave import mock_mesos_slv
from mocks.mesos_slave import state, stats
from mocks.rabbitmq import mock_rmq
from mocks.redisdb import cluster_nodes

app = Flask(__name__)
# For Datadog APM
# http://pypi.datadoghq.com/trace/docs/#module-ddtrace.contrib.flask
traced_app = TraceMiddleware(app,
                             tracer,
                             service="mocker",
                             distributed_tracing=False)

logger = logging.getLogger('werkzeug')
handler = logging.FileHandler('logs/access.log')
logger.addHandler(handler)


@app.route("/mesos_slave/state.json", methods=["GET"])
def mock_mesos_slave():
    #return mock_mesos_slv()
    return state()


@app.route("/mesos_slave/metrics/snapshot", methods=["GET"])
def mock_mesos_slave_stats():
コード例 #13
0
ファイル: __init__.py プロジェクト: ursulahuang/gello
        if request is None or request.endpoint is None or \
           request.endpoint.startswith('api') or \
           request.endpoint.startswith('auth'):
            return

        if (('TRELLO_ORG_NAME' not in os.environ and 'JIRA_SERVER_ADDRESS' not in os.environ) \
            or 'GITHUB_ORG_LOGIN' not in os.environ) and \
           not request.endpoint.startswith('onboarding'):
            return redirect(url_for('onboarding.index'))
        elif ('TRELLO_ORG_NAME' in os.environ and 'GITHUB_ORG_LOGIN'
              in os.environ) and request.endpoint.startswith('onboarding'):
            return redirect(url_for('main.index'))


# Configure tracing if `APM_ENABLED` is `True`
if app.config.get('APM_ENABLED'):
    from ddtrace import tracer, patch
    from ddtrace.contrib.flask import TraceMiddleware

    # Required for Flask middleware:
    #   http://pypi.datadoghq.com/trace/docs/#module-ddtrace.contrib.flask
    import blinker as _

    patch(celery=True)
    patch(sqlalchemy=True)

    traced_app = TraceMiddleware(app,
                                 tracer,
                                 service='gello',
                                 distributed_tracing=False)
コード例 #14
0
ファイル: thing.py プロジェクト: NBParis/log-workshop-2
from ddtrace.contrib.flask import TraceMiddleware
import random

# Tracer configuration
tracer.configure(hostname='agent')
tracer.set_tags({'env': 'workshop'})
patch(requests=True)

# enable distributed tracing for requests
# to send headers (globally)
config.requests['distributed_tracing'] = True

app = create_app()
app.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = False

traced_app = TraceMiddleware(app, tracer, service='pumps-service', distributed_tracing=True)

@app.route('/')
def hello():
    return Response({'Hello from Oxygenation Pumps': 'world'}, mimetype='application/json')

@app.route('/devices', methods=['GET', 'POST'])
def status():
    if flask_request.method == 'GET':
        pumps = Pump.query.all()
        pump_obj = {'pump_count': len(pumps),
                    'status': []}
        app.logger.info(f"Available pumps {pumps}")
        for pump in pumps:
            pump_obj['status'].append(pump.serialize())
        return jsonify(pump_obj)
コード例 #15
0
import random

sensors = []

# Tracer configuration
tracer.configure(hostname='agent')
tracer.set_tags({'env': 'workshop'})
patch(requests=True)

# enable distributed tracing for requests
# to send headers (globally)
config.requests['distributed_tracing'] = True

app = create_app()
traced_app = TraceMiddleware(app,
                             tracer,
                             service='sensors-api',
                             distributed_tracing=True)


@app.route('/')
def hello():
    return Response({'Hello from Sensors': 'world'},
                    mimetype='application/json')


@app.route('/sensors', methods=['GET', 'POST'])
def get_sensors():
    if flask_request.method == 'GET':
        sensors = Sensor.query.all()
        system_status = []
        for sensor in sensors:
コード例 #16
0
    unicode_view,
)


@app.errorhandler(TestError)
def handle_my_exception(e):
    assert isinstance(e, TestError)
    return 'error', 500


# add tracing to the app (we use a global app to help ensure multiple requests
# work)
service = "test.flask.service"
assert not writer.pop()  # should always be empty
traced_app = TraceMiddleware(app,
                             tracer,
                             service=service,
                             distributed_tracing=True)

# make the app testable
app.config['TESTING'] = True
app = app.test_client()


class TestFlask(object):
    def setUp(self):
        # ensure the last test didn't leave any trash
        writer.pop()

    def test_child(self):
        start = time.time()
        rv = app.get('/child')
コード例 #17
0
## Connecting MySQL
mydb = mysql.connector.connect(
    host=db_config.db_host,
    user=db_config.db_username,
    passwd=db_config.db_password,
    database=db_config.db_name
)
mycursor = mydb.cursor()

## Use a pin to specify metadata related to this connection
Pin.override(mydb, service='kikeyama_mysql')

## Flask
app = Flask(__name__)
#traced_app = TraceMiddleware(app, tracer, service="kikeyama_service", distributed_tracing=False)
traced_app = TraceMiddleware(app, tracer, service='kikeyama_service')

# Enable distributed tracing
ddtrace.config.flask['distributed_tracing_enabled'] = True

@app.route('/')
def api_entry():
    start_time = time.time()

    app.logger.info('getting root endpoint')
#    return 'Entrypoint to the Application'
    name = flask_request.args.get('name', str)
    tracer.set_tags({'name': name})
    mycursor.execute("SELECT Name, UUID, Number FROM kikeyama_table where name='%s'" % name)
    myresult = mycursor.fetchall()
    
コード例 #18
0
import os
from flask import Flask
from ddtrace import tracer
from ddtrace.contrib.flask import TraceMiddleware

app = Flask(__name__)
app.config.from_object('indecision.default_settings')
app.config.from_envvar('INDECISION_SETTINGS')

traced_app = TraceMiddleware(app,
                             tracer,
                             service="indecision",
                             distributed_tracing=False)

if not app.debug:
    import logging
    from logging.handlers import TimedRotatingFileHandler
    # https://docs.python.org/3.6/library/logging.handlers.html#timedrotatingfilehandler
    file_handler = TimedRotatingFileHandler(
        os.path.join(app.config['LOG_DIR'], 'flask.log'), 'midnight')
    file_handler.setLevel(logging.WARNING)
    file_handler.setFormatter(
        logging.Formatter('<%(asctime)s> <%(levelname)s> %(message)s'))
    app.logger.addHandler(file_handler)

import indecision.views
コード例 #19
0
import random

# import tracing functions
from ddtrace import tracer
from ddtrace.contrib.flask import TraceMiddleware

from flask import Flask, request, jsonify

app = Flask(__name__)

# trace the Flask application
tracer.configure(hostname='agent')
TraceMiddleware(app, tracer, service="taster")


@app.route("/taste")
def taste():
    # continue the trace
    trace_id = request.headers.get("X-Datadog-Trace-Id")
    parent_id = request.headers.get("X-Datadog-Parent-Id")
    if trace_id and parent_id:
        span = tracer.current_span()
        span.trace_id = int(trace_id)
        span.parent_id = int(parent_id)

    beer = request.args.get("beer")
    score = 10 if beer == 'stout' else random.randint(1, 10)
    return jsonify(score=score)
コード例 #20
0
ファイル: test_flask.py プロジェクト: vartec/dd-trace-py
    u'üŋïĉóđē',
    unicode_view,
)


@app.errorhandler(TestError)
def handle_my_exception(e):
    assert isinstance(e, TestError)
    return 'error', 500


# add tracing to the app (we use a global app to help ensure multiple requests
# work)
service = "test.flask.service"
assert not writer.pop()  # should always be empty
traced_app = TraceMiddleware(app, tracer, service=service)

# make the app testable
app.config['TESTING'] = True
app = app.test_client()


class TestFlask(object):

    def setUp(self):
        # ensure the last test didn't leave any trash
        writer.pop()

    def test_child(self):
        start = time.time()
        rv = app.get('/child')
コード例 #21
0
from ddtrace import tracer, patch
from ddtrace.contrib.flask import TraceMiddleware

from bootstrap import create_app
from models import Thought

from time import sleep

patch(redis=True)
app = create_app()
cache = Cache(config={'CACHE_TYPE': 'redis', 'CACHE_REDIS_HOST': 'redis'})
cache.init_app(app)

traced_app = TraceMiddleware(app,
                             tracer,
                             service='thinker-microservice',
                             distributed_tracing=True)

# Tracer configuration
tracer.configure(hostname='agent')


@tracer.wrap(name='think')
@cache.memoize(30)
def think(subject):
    tracer.current_span().set_tag('subject', subject)

    sleep(0.5)
    quote = Thought.query.filter_by(subject=subject).first()

    if quote is None:
コード例 #22
0
import requests

from flask import Flask, Response, jsonify
from flask import request as flask_request

from ddtrace import tracer
from ddtrace.contrib.flask import TraceMiddleware

from thoughts import thoughts
from time import sleep

# Tracer configuration
tracer.configure(hostname='agent')

app = Flask('api')
traced_app = TraceMiddleware(app, tracer, service='thinker-microservice')


@tracer.wrap(name='think')
def think(subject):
    tracer.current_span().set_tag('subject', subject)

    sleep(0.5)
    return thoughts[subject]


@app.route('/')
def think_microservice():
    subject = flask_request.args.get('subject')
    thoughts = think(subject)
    return Response(thoughts, mimetype='application/json')
コード例 #23
0
from flask import request, jsonify
from bootstrap import create_app

from models import Beer, Donut
from stats import DonutStats

# import tracing functions
from ddtrace import tracer
from ddtrace.contrib.flask import TraceMiddleware

# initialize Flask application
app = create_app()

# trace the Flask application
TraceMiddleware(app, tracer, service="match-maker")


# some simple routes
@app.route('/ping')
def ping():
    """
    A health check
    """
    return "200 OK"


@app.route('/beers')
def beers():
    """
    List all beers
コード例 #24
0
from flask import Flask, Response, jsonify
from flask import request as flask_request

from ddtrace import tracer, patch, config
from ddtrace.contrib.flask import TraceMiddleware

# Tracer configuration
tracer.configure(hostname='agent')
patch(requests=True)

# enable distributed tracing for requests
# to send headers (globally)
config.requests['distributed_tracing'] = True

app = Flask('api')
traced_app = TraceMiddleware(app, tracer, service='thinker-api')


@app.route('/')
def hello():
    return Response({'hello': 'world'}, mimetype='application/json')


@app.route('/think/')
def think_handler():
    thoughts = requests.get('http://thinker:5001/',
                            params={
                                'subject':
                                flask_request.args.getlist('subject', str),
                            }).text
    return Response(thoughts, mimetype='application/json')
コード例 #25
0
def run_app():
    """ Run the application. """
    app = create_app()

    # This is for DataDog (Do Not Delete)
    if USE_DATADOG:
        TraceMiddleware(app,
                        tracer,
                        service="broker-dd",
                        distributed_tracing=False)

    with app.app_context():
        current_app.debug = CONFIG_SERVICES['debug']
        local = CONFIG_BROKER['local']
        g.is_local = local
        current_app.config.from_object(__name__)

        # Future: Override config w/ environment variable, if set
        current_app.config.from_envvar('VALIDATOR_SETTINGS', silent=True)

        queue = sqs_queue()

        logger.info("Starting SQS polling")
        keep_polling = True
        while keep_polling:
            # With cleanup handling engaged, allowing retries
            dispatcher = SQSWorkDispatcher(queue)

            def choose_job_by_message_attributes(message):
                # Determine if this is a retry of this message, in which case job execution should know so it can
                # do cleanup before proceeding with the job
                q_msg_attr = message.attributes  # the non-user-defined (queue-defined) attributes on the message
                is_retry = False
                if q_msg_attr.get('ApproximateReceiveCount') is not None:
                    is_retry = int(
                        q_msg_attr.get('ApproximateReceiveCount')) > 1

                msg_attr = message.message_attributes
                if msg_attr and msg_attr.get(
                        'validation_type',
                    {}).get('StringValue') == 'generation':
                    # Generating a file
                    job_signature = {
                        "_job": validator_process_file_generation,
                        "file_gen_id": message.body,
                        "is_retry": is_retry
                    }
                else:
                    # Running validations (or generating a file from a Job)
                    a_agency_code = msg_attr.get(
                        'agency_code',
                        {}).get('StringValue') if msg_attr else None
                    job_signature = {
                        "_job": validator_process_job,
                        "job_id": message.body,
                        "agency_code": a_agency_code,
                        "is_retry": is_retry
                    }
                return job_signature

            found_message = dispatcher.dispatch_by_message_attribute(
                choose_job_by_message_attributes)

            # When you receive an empty response from the queue, wait before trying again
            if not found_message:
                time.sleep(1)

            # If this process is exiting, don't poll for more work
            keep_polling = not dispatcher.is_exiting
コード例 #26
0
from ddtrace import tracer
from ddtrace.contrib.flask import TraceMiddleware

# Have flask use stdout as the logger
main_logger = logging.getLogger()
main_logger.setLevel(logging.DEBUG)
c = logging.StreamHandler(sys.stdout)
formatter = logging.Formatter(
    '%(asctime)s - %(name)s - %(levelname)s - %(message)s')
c.setFormatter(formatter)
main_logger.addHandler(c)

app = Flask(__name__)

traced_app = TraceMiddleware(app,
                             tracer,
                             service="MyWebbApp_V2",
                             distributed_tracing=False)


@app.route('/')
def api_entry():
    return 'Entrypoint to the Application'


@app.route('/api/apm')
def apm_endpoint():
    return 'Getting APM Started'


@app.route('/api/trace')
def trace_endpoint():
コード例 #27
0
from ddtrace import tracer
from ddtrace.contrib.flask import TraceMiddleware

tracer.configure(hostname='127.0.0.1')  # configured for localhost as lazy

# Have flask use stdout as the logger
main_logger = logging.getLogger()
main_logger.setLevel(logging.DEBUG)
c = logging.StreamHandler(sys.stdout)
formatter = logging.Formatter(
    '%(asctime)s - %(name)s - %(levelname)s - %(message)s')
c.setFormatter(formatter)
main_logger.addHandler(c)

app = Flask(__name__)
traced_app = TraceMiddleware(
    app, tracer, service='AndyTestApp')  ## added this with an app name


@app.route('/')
def api_entry():
    return 'Entrypoint to the Application'


@app.route('/api/apm')
def apm_endpoint():
    return 'Getting APM Started'


@app.route('/api/trace')
def trace_endpoint():
    return 'Posting Traces'
コード例 #28
0
import blinker as _
from ddtrace import tracer
from ddtrace.contrib.flask import TraceMiddleware

# Have flask use stdout as the logger
main_logger = logging.getLogger()
main_logger.setLevel(logging.DEBUG)
c = logging.StreamHandler(sys.stdout)
formatter = logging.Formatter(
    '%(asctime)s - %(name)s - %(levelname)s - %(message)s')
c.setFormatter(formatter)
main_logger.addHandler(c)

app = Flask(__name__)
example_traced_app = TraceMiddleware(app,
                                     tracer,
                                     service="flask_example",
                                     distributed_tracing=False)


@app.route('/')
def api_entry():
    return 'Entrypoint to the Application'


@app.route('/api/apm')
def apm_endpoint():
    return 'Getting APM Started'


@app.route('/api/trace')
def trace_endpoint():
コード例 #29
0
def run_app():
    """Run the application."""
    app = create_app()

    # This is for DataDog (Do Not Delete)
    if USE_DATADOG:
        TraceMiddleware(app,
                        tracer,
                        service="broker-dd",
                        distributed_tracing=False)

    with app.app_context():
        current_app.debug = CONFIG_SERVICES['debug']
        local = CONFIG_BROKER['local']
        g.is_local = local
        error_report_path = CONFIG_SERVICES['error_report_path']
        current_app.config.from_object(__name__)

        # Create connection to job tracker database
        sess = GlobalDB.db().session

        # Future: Override config w/ environment variable, if set
        current_app.config.from_envvar('VALIDATOR_SETTINGS', silent=True)

        queue = sqs_queue()
        messages = []

        logger.info("Starting SQS polling")
        while True:
            # Set current_message to None before every loop to ensure it's never set to the previous message
            current_message = None
            try:
                # Grabs one (or more) messages from the queue
                messages = queue.receive_messages(
                    WaitTimeSeconds=10, MessageAttributeNames=['All'])
                for message in messages:
                    logger.info("Message received: %s", message.body)

                    # Retrieve the job_id from the message body
                    current_message = message
                    g.job_id = message.body
                    mark_job_status(g.job_id, "ready")

                    # Get the job
                    job = sess.query(Job).filter_by(
                        job_id=g.job_id).one_or_none()
                    if job is None:
                        validation_error_type = ValidationError.jobError
                        write_file_error(g.job_id, None, validation_error_type)
                        raise ResponseException(
                            'Job ID {} not found in database'.format(g.job_id),
                            StatusCode.CLIENT_ERROR, None,
                            validation_error_type)

                    # We have two major functionalities in the Validator: validation and file generation
                    if (not job.file_type or job.file_type.letter_name
                            in ['A', 'B', 'C', 'FABS'] or job.job_type.name !=
                            'file_upload') and job.submission_id:
                        # Run validations
                        validation_manager = ValidationManager(
                            local, error_report_path)
                        validation_manager.validate_job(job.job_id)
                    else:
                        # Retrieve the agency code data from the message attributes
                        msg_attr = current_message.message_attributes
                        agency_code = msg_attr['agency_code']['StringValue'] if msg_attr and \
                            msg_attr.get('agency_code') else None
                        agency_type = msg_attr['agency_type']['StringValue'] if msg_attr and \
                            msg_attr.get('agency_type') else None

                        file_generation_manager = FileGenerationManager(
                            job, agency_code, agency_type, local)
                        file_generation_manager.generate_from_job()
                        sess.commit()
                        sess.refresh(job)

                    # Delete from SQS once processed
                    message.delete()

            except ResponseException as e:
                # Handle exceptions explicitly raised during validation.
                logger.error(traceback.format_exc())

                job = get_current_job()
                if job:
                    if job.filename is not None:
                        # Insert file-level error info to the database
                        write_file_error(job.job_id, job.filename, e.errorType,
                                         e.extraInfo)
                    if e.errorType != ValidationError.jobError:
                        # Job passed prerequisites for validation but an error happened somewhere: mark job as 'invalid'
                        mark_job_status(job.job_id, 'invalid')
                        if current_message:
                            if e.errorType in [
                                    ValidationError.rowCountError,
                                    ValidationError.headerError,
                                    ValidationError.fileTypeError
                            ]:
                                current_message.delete()
            except Exception as e:
                # Handle uncaught exceptions in validation process.
                logger.error(traceback.format_exc())

                # csv-specific errors get a different job status and response code
                if isinstance(e, ValueError) or isinstance(
                        e, csv.Error) or isinstance(e, UnicodeDecodeError):
                    job_status = 'invalid'
                else:
                    job_status = 'failed'
                job = get_current_job()
                if job:
                    if job.filename is not None:
                        error_type = ValidationError.unknownError
                        if isinstance(e, UnicodeDecodeError):
                            error_type = ValidationError.encodingError
                            # TODO Is this really the only case where the message should be deleted?
                            if current_message:
                                current_message.delete()
                        write_file_error(job.job_id, job.filename, error_type)
                    mark_job_status(job.job_id, job_status)
            finally:
                GlobalDB.close()
                # Set visibility to 0 so that another attempt can be made to process in SQS immediately,
                # instead of waiting for the timeout window to expire
                for message in messages:
                    try:
                        message.change_visibility(VisibilityTimeout=0)
                    except ClientError:
                        # Deleted messages will throw errors, which is fine because they are handled
                        pass
コード例 #30
0
logging.basicConfig(level=logging.DEBUG)

# create our little application :)
app = Flask(__name__)

# Load default config and override config from an environment variable
app.config.update(
    dict(
        DATABASE=os.path.join(app.root_path, 'flaskr.db'),
        DEBUG=True,
        SECRET_KEY='development key',
        USERNAME='******',
        PASSWORD='******',
    ))

traced_app = TraceMiddleware(app, tracer, service="flaskr")


def connect_db():
    """Connects to the specific database."""
    rv = sqlite3.connect(app.config['DATABASE'])
    rv.row_factory = sqlite3.Row
    return rv


def init_db():
    """Initializes the database."""
    db = get_db()
    with app.open_resource('schema.sql', mode='r') as f:
        db.cursor().executescript(f.read())
    db.commit()