Esempio n. 1
0
def do_scheduled_tasks(settings):
    start_time = utc.now()
    db = make_session_cls(settings.db_url)()
    # write to checkins
    db.merge(Checkin(proc_name='timer', time=start_time))
    db.commit()
    # connect to RabbitMQ
    rabbit_conn = pika.BlockingConnection(pika.URLParameters(settings.rabbit_url))
    rabbit = rabbit_conn.channel()
    # write message for dispatcher to be consumed
    rabbit.exchange_declare(exchange=settings.dispatcher_ping_exchange, type='topic', durable=True)
    rabbit.basic_publish(
        exchange=settings.dispatcher_ping_exchange,
        routing_key='timer',
        body='timer'
    )

    declare_exchanges(rabbit)
    check_pipelines(settings, db, rabbit)
    db.commit()
    check_jobs(settings, db, rabbit)
    db.commit()
    cleanup_logs(settings, db)
    db.commit()
    run_time = utc.now() - start_time
    logger.info("Finished scheduled tasks.  Took %s seconds" %
           run_time.total_seconds())
Esempio n. 2
0
def main():
    settings = get_settings()

    rabbit_conn = pika.BlockingConnection(
        pika.URLParameters(settings.rabbit_url))
    rabbit = rabbit_conn.channel()
    mp.declare_exchanges(rabbit)
    queue_name = 'mettle_job_logs'
    rabbit.queue_declare(queue=queue_name, exclusive=False, durable=True)
    rabbit.queue_bind(exchange=mp.JOB_LOGS_EXCHANGE,
                      queue=queue_name,
                      routing_key='#')
    logger.info('Bound exchange %s to queue %s' %
                (mp.JOB_LOGS_EXCHANGE, queue_name))

    Session = make_session_cls(settings.db_url)
    for method, properties, body in rabbit.consume(queue=queue_name):
        db = Session()
        data = json.loads(body)
        job_id = data['job_id']
        line_num = data['line_num']
        message = data['msg']
        try:
            db.add(
                JobLogLine(
                    job_id=job_id,
                    line_num=line_num,
                    message=message,
                ))
            db.commit()
            logger.info(message)
        except IntegrityError:
            # We probably got a duplicate log line, which can happen given
            # Rabbit retries.  Query DB for log line matching job_id and
            # line_num.  If we have one, and it is the same message, then just
            # carry on.  If the message is different, then log an error.
            db.rollback()
            existing_line = db.query(JobLogLine).filter_by(
                job_id=job_id, line_num=line_num).one()
            if existing_line.message != message:
                err = """Job {job_id}, log line {num} is stored as
                this:\n{old}\n\n but the queue has just produced a new message
                for the same line, with this value:\n{new}"""

                logger.error(
                    textwrap.dedent(err).format(
                        job_id=job_id,
                        num=line_num,
                        old=existing_line.message,
                        new=message,
                    ))

        rabbit.basic_ack(method.delivery_tag)
Esempio n. 3
0
def main():
    settings = get_settings()

    rabbit_conn = pika.BlockingConnection(
        pika.URLParameters(settings.rabbit_url))
    rabbit = rabbit_conn.channel()
    mp.declare_exchanges(rabbit)
    queue_name = 'mettle_dispatcher'

    rabbit.queue_declare(queue=queue_name, exclusive=False, durable=True)
    rabbit.queue_bind(exchange=mp.ANNOUNCE_SERVICE_EXCHANGE,
                      queue=queue_name,
                      routing_key='#')
    rabbit.queue_bind(exchange=mp.ACK_PIPELINE_RUN_EXCHANGE,
                      queue=queue_name,
                      routing_key='#')
    rabbit.queue_bind(exchange=mp.NACK_PIPELINE_RUN_EXCHANGE,
                      queue=queue_name,
                      routing_key='#')
    rabbit.queue_bind(exchange=mp.CLAIM_JOB_EXCHANGE,
                      queue=queue_name,
                      routing_key='#')
    rabbit.queue_bind(exchange=mp.END_JOB_EXCHANGE,
                      queue=queue_name,
                      routing_key='#')
    rabbit.queue_bind(exchange=settings.dispatcher_ping_exchange,
                      queue=queue_name,
                      routing_key='timer')

    Session = make_session_cls(settings.db_url)

    for method, properties, body in rabbit.consume(queue=queue_name):
        db = Session()
        if method.exchange == mp.ANNOUNCE_SERVICE_EXCHANGE:
            on_announce_service(settings, db, json.loads(body))
        elif method.exchange == mp.ACK_PIPELINE_RUN_EXCHANGE:
            on_pipeline_run_ack(settings, rabbit, db, json.loads(body))
        elif method.exchange == mp.NACK_PIPELINE_RUN_EXCHANGE:
            on_pipeline_run_nack(settings, rabbit, db, json.loads(body))
        elif method.exchange == mp.CLAIM_JOB_EXCHANGE:
            on_job_claim(settings, rabbit, db, json.loads(body),
                         properties.correlation_id)
        elif method.exchange == mp.END_JOB_EXCHANGE:
            on_job_end(settings, rabbit, db, json.loads(body))
        # get messages from process timer restart queue
        elif method.exchange == settings.dispatcher_ping_exchange:
            db.merge(Checkin(proc_name='dispatcher', time=utc.now()))
        db.commit()
        rabbit.basic_ack(method.delivery_tag)
Esempio n. 4
0
def main():
    settings = get_settings()
    session = make_session_cls(settings.db_url)()

    services = {}
    for service_name, service_data in settings.stub_services.items():
        service = session.query(Service).filter_by(name=service_name).first()
        if not service:
            logger.info('Making service %s' % service_name)
            service = Service(
                name=service_name,
                updated_by='datastub',
                pipeline_names=service_data['pipeline_names'],
            )
            session.add(service)
        services[service.name] = service

    nl_lists = {}

    for nl_data in settings.stub_notification_lists:
        nl = session.query(NotificationList).filter_by(
            name=nl_data['name'],
        ).first()

        if not nl:
            logger.info('Making notification list %s' % nl_data['name'])
            nl = NotificationList(
                name=nl_data['name'],
                updated_by='datastub',
            )
            session.add(nl)
        nl.recipients = nl_data['recipients']
        nl_lists[nl.name] = nl

    pipelines = {}
    for pl_data in settings.stub_pipelines:
        pipeline = session.query(Pipeline).filter_by(
            name=pl_data['name'],
            service=services[pl_data['service']],
        ).first()

        if not pipeline:
            logger.info('Making pipeline %s' % pl_data['name'])
            pipeline = Pipeline(
                name=pl_data['name'],
                service=services[pl_data['service']],
                updated_by='datastub',
            )
        if 'crontab' in pl_data:
            pipeline.crontab = pl_data['crontab']
        elif 'chained_from' in pl_data:
            pipeline.chained_from=pipelines[pl_data['chained_from']]
        session.add(pipeline)
        pipeline.notification_list = nl_lists[pl_data['notification_list']]
        pipelines[pl_data['name']] = pipeline

    # query for all pipelines.  any not in the data stub should be made
    # inactive.
    for pipeline in session.query(Pipeline):
        if pipeline.name not in pipelines:
            logger.info('Deactivating unstubbed pipeline "%s".' % pipeline.name)
            pipeline.active = False

    session.commit()

    logger.info('Sleeping')
    sys.stdin.read()
Esempio n. 5
0
def get_db():
    settings = get_settings()
    return make_session_cls(settings.db_url)