Exemplo n.º 1
0
    def bind_pg_to_websocket(self, filter_id=None):
        self.db.execute('LISTEN todos_updates;')
        last_ping = utc.now()
        while True:
            # Handle sending keepalives on the socket.
            now = utc.now()
            elapsed = (now - last_ping).total_seconds()
            if elapsed > self.app.settings.websocket_ping_interval:
                self.ws.send_frame('', self.ws.OPCODE_PING)
                last_ping = now

            # Block on notifications from Postgres, with 5 sec timeout.
            if select.select([self.dbconn], [], [], 5) == ([], [], []):
                logger.debug("No messages for 5 seconds.")
            else:
                self.dbconn.poll()
                while self.dbconn.notifies:
                    notify = self.dbconn.notifies.pop()
                    payload = json.loads(notify.payload)
                    if filter_id is None or payload['id'] == filter_id:
                        # Handle payloads too big for a PG NOTIFY.
                        if 'error' in payload and 'id' in payload:
                            payload = self.get_todo(payload['id'])

                        self.ws.send(json.dumps(payload))
Exemplo n.º 2
0
    def bind_queue_to_websocket(self, exchange, routing_keys):
        settings = self.app.settings
        self.rabbit_conn = pika.BlockingConnection(pika.URLParameters(settings.rabbit_url))
        channel = self.rabbit_conn.channel()

        queue = channel.queue_declare(exclusive=True)
        queue_name = queue.method.queue

        logger.debug('Rabbit socket on %s/%s' % (exchange, routing_keys))
        channel.exchange_declare(exchange=exchange, type='topic', durable=True)

        for rk in routing_keys:
            channel.queue_bind(exchange=exchange,
                               queue=queue_name,
                               routing_key=rk)

        channel.basic_consume(self.on_rabbit_message, no_ack=True, queue=queue_name)

        last_ping = utc.now()
        while True:
            now = utc.now()
            elapsed = (now - last_ping).total_seconds()
            if elapsed > settings.websocket_ping_interval:
                self.ws.send_frame('', self.ws.OPCODE_PING)
                last_ping = now
            self.rabbit_conn.process_data_events()
Exemplo n.º 3
0
def do_scheduled_tasks(settings):
    start_time = utc.now()
    db = make_session_cls(settings.db_url)()
    # write to checkins
    db.merge(Checkin(proc_name='timer', time=start_time))
    db.commit()
    # connect to RabbitMQ
    rabbit_conn = pika.BlockingConnection(pika.URLParameters(settings.rabbit_url))
    rabbit = rabbit_conn.channel()
    # write message for dispatcher to be consumed
    rabbit.exchange_declare(exchange=settings.dispatcher_ping_exchange, type='topic', durable=True)
    rabbit.basic_publish(
        exchange=settings.dispatcher_ping_exchange,
        routing_key='timer',
        body='timer'
    )

    declare_exchanges(rabbit)
    check_pipelines(settings, db, rabbit)
    db.commit()
    check_jobs(settings, db, rabbit)
    db.commit()
    cleanup_logs(settings, db)
    db.commit()
    run_time = utc.now() - start_time
    logger.info("Finished scheduled tasks.  Took %s seconds" %
           run_time.total_seconds())
Exemplo n.º 4
0
def test_jwtsession_expire_days():
    class A(spa.Handler):
        def get(self):
            return spa.Response(self.request.environ['jwtsession']['foo'])

    routes = (
        ('/', 'a', A),
    )

    app = spa.App(routes)

    secret = 'foobar'

    app = JWTSessionMiddleware(app, secret_key=secret, expire_days=1)

    c = Client(app, spa.Response)
    tok = jwt.encode({
        'foo': 'bar',
        'iat': utc.now() - datetime.timedelta(days=1)
    }, secret)
    c.set_cookie('localhost', 'session', tok)
    with pytest.raises(KeyError):
        # The handler should fail to read 'foo' from the session, because it
        # will have been dropped when the middleware saw that its 'iat'
        # timestamp was too old.
        c.get('/')
Exemplo n.º 5
0
def read():
    """Reads the most recent update result, filtering results that are too old.

    Args:
        None.

    Returns:
        An update.result.Result based on the most recent update result file
        within the time threshold or None if none exist.
    """
    result_files = glob.glob(
        os.path.join(_RESULT_FILE_DIR, _UPDATE_RESULT_FILENAME_FORMAT % '*'))
    if not result_files:
        return None

    # Filenames start with a timestamp, so the last one lexicographically is the
    # most recently created file.
    most_recent_result_file = sorted(result_files)[-1]
    with open(most_recent_result_file) as result_file:
        most_recent_result = update.result.read(result_file)

    # Ignore the result if it's too old.
    delta = utc.now() - most_recent_result.timestamp
    if delta.total_seconds() > _RECENT_UPDATE_THRESHOLD_SECONDS:
        return None

    return most_recent_result
Exemplo n.º 6
0
    def unserialize(cls, string, secret_key, algorithm='HS256', expire_days=None):

        """Load the secure cookie from a serialized string.

        :param string: the cookie value to unserialize.
        :param secret_key: the secret key used to serialize the cookie.
        :return: a new :class:`JWTCookie`.
        """
        if isinstance(string, text_type):
            string = string.encode('utf-8', 'replace')
        if isinstance(secret_key, text_type):
            secret_key = secret_key.encode('utf-8', 'replace')

        items = jwt.decode(string, secret_key, algorithms=[algorithm])

        if expire_days:
            if 'iat' not in items:
                raise TokenTimestampError('No iat claim in token')

            issued_at = utc.fromtimestamp(items['iat'])
            time_passed = utc.now() - issued_at
            if time_passed > datetime.timedelta(days=expire_days):
                raise TokenTimestampError('Token is too old')

        return cls(items, secret_key, algorithm)
Exemplo n.º 7
0
    def __call__(self, environ, start_response):
        qs_params = {k: v[0] for k, v in
                     parse_qs(environ['QUERY_STRING']).items()}
        if self.qs_name not in qs_params:
            return self.app(environ, start_response)
        try:
            session_vals = jwt.decode(qs_params[self.qs_name], key=self.secret_key)
        except jwt.DecodeError:
            # silently drop malformed tokens
            return self.app(environ, start_response)

        if self.expire_days:
            if 'iat' not in session_vals:
                # We can't enforce token expiration if the token has no issued
                # at claim.  So ignore the token.
                return self.app(environ, start_response)

            issued_at = utc.fromtimestamp(session_vals['iat'])
            if (utc.now() - issued_at).days > self.expire_days:
                # Token has an issued at claim, but it's too old.  Ignore the
                # token.
                return self.app(environ, start_response)

        environ[self.wsgi_name].update(session_vals)
        return self.app(environ, start_response)
Exemplo n.º 8
0
def test_qs_middleware():
    class A(spa.Handler):
        def get(self):
            session = self.request.environ['jwtsession']
            assert 'foo' in session
            return spa.Response(session['foo'])

    routes = (
        ('/', 'a', A),
    )

    app = spa.App(routes)

    secret = 'foobar'

    app = JWTSessionParamMiddleware(app, secret_key=secret)
    app = JWTSessionMiddleware(app, secret_key=secret)

    c = Client(app, spa.Response)
    token = jwt.encode({'foo': 'bar', 'iat': utc.now()}, secret).decode('utf-8')
    url = '/?session_token=%s' % token
    resp = c.get(url)
    assert resp.data == b'bar'
    cookieparts = resp.headers['Set-Cookie'].split('; ')[0].split('=')
    assert cookieparts[0] == 'session'
    assert jwt.decode(cookieparts[1], secret)['foo'] == 'bar'
Exemplo n.º 9
0
    def get_targets(self, target_time):
        # The HawaiianPipeline is in no hurry.  If you call get_targets with a
        # target_time that's too recent, it will nack and make you wait.
        now = utc.now()
        wait_until = target_time + timedelta(days=4)

        if now < wait_until:
            raise mp.PipelineNack("What's the rush, man?", wait_until)

        return {
            "flour": [],
            "water": [],
            "yeast": [],
            "sugar": [],
            "salt": [],
            "olive oil": [],
            "mix": ["flour", "water", "yeast", "sugar", "salt", "olive oil"],
            "raise": ["mix"],
            "roll": ["raise"],
            "sauce": ["roll"],
            "cheese": ["sauce"],
            "ham": ["cheese"],
            "pineapple": ["cheese"],
            "bake": ["ham", "pineapple"],
            "box": ["bake"],
            "deliver": ["box"],
            "eat": ["deliver"]
        }
Exemplo n.º 10
0
def parse_date(request):
    if 'date' in request.args:
        try:
            date_string = request.args.get('date')
            # Deal with a URL that has a + sign replaced by a space
            dt = parser.parse(date_string.replace(' ', '+'))
            d = utc.to_utc(dt)
        except Exception as err:
            raise Exception("Invalid Date '{}' {}".format(date_string, err))
    else:
        d = utc.now()

    current_date = utc.now()
    if ((d -  current_date).total_seconds() > 86400.0):
        raise Exception("Date '{}' more than 24 hours in future.".format(date_string))

    return d
Exemplo n.º 11
0
    def _claim_job(self, target_time, target):
        """
        Make a an RPC call to the dispatcher to claim self.job_id.

        If claiming the job_id is successful, the dispatcher will return '1',
        and this function will return True.

        If claiming the job is unsuccessful, probably because some other worker
        has already claimed it, the dispatcher will return '0', and this
        function will return False.
        """
        self.corr_id = str(uuid.uuid4())
        logger.info('Claiming job %s.' % self.job_id)
        self.chan.queue_declare(queue=self.queue, exclusive=True)

        start_time = utc.now()
        expire_time = self.get_expire_time(target_time, target, start_time)

        consumer_tag = self.chan.basic_consume(self._on_claim_response,
                                               no_ack=True,
                                               queue=self.queue)
        try:
            mp.claim_job(self.chan, self.job_id, self.queue,
                         start_time.isoformat(), expire_time.isoformat(),
                         self.corr_id)

            # Block while we wait for a response, as in the RabbitMQ RPC example
            # doc.
            wait_start = utc.now()
            while self._claim_response == None:
                self.conn.process_data_events()
                elapsed = (utc.now() - wait_start).total_seconds()
                if elapsed > self.assignment_wait_secs:
                    logger.warning('Timed out waiting for job grant %s.' %
                                   self.job_id)
                    return False

            granted = self._claim_response == u'1'
            if granted:
                logger.info('Claimed job %s.' % self.job_id)
            else:
                logger.info('Failed to claim job %s.' % self.job_id)
        finally:
            self.chan.basic_cancel(consumer_tag)
        return granted
Exemplo n.º 12
0
def parse_date(request):
    if 'date' in request.args:
        try:
            date_string = request.args.get('date')
            # Deal with a URL that has a + sign replaced by a space
            dt = parser.parse(date_string.replace(' ', '+'))
            d = utc.to_utc(dt)
        except Exception as err:
            raise Exception("Invalid Date '{}' {}".format(date_string, err))
    else:
        d = utc.now()

    current_date = utc.now()
    if ((d - current_date).total_seconds() > 86400.0):
        raise Exception(
            "Date '{}' more than 24 hours in future.".format(date_string))

    return d
Exemplo n.º 13
0
def on_pipeline_run_nack(settings, rabbit, db, data):
    logger.info(
        "Pipeline run nack {service}:{pipeline}:{run_id}".format(**data))
    run = db.query(PipelineRun).filter_by(id=data['run_id']).one()

    # create a new nack record.
    if data['reannounce_time'] is None:
        rtime = None
        # If reannounce_time is None, then give up on this pipeline run.
        run.ack_time = utc.now()
        run.end_time = utc.now()
    else:
        rtime = isodate.parse_datetime(data['reannounce_time'])

    db.add(
        PipelineRunNack(
            pipeline_run=run,
            message=data['message'],
            reannounce_time=rtime,
        ))
Exemplo n.º 14
0
def on_pipeline_run_ack(settings, rabbit, db, data):
    logger.info(
        "Pipeline run ack {service}:{pipeline}:{run_id}".format(**data))
    run = db.query(PipelineRun).filter_by(id=data['run_id']).one()

    if run.ack_time is None:
        run.ack_time = utc.now()
        run.targets = data['targets']
        run.target_parameters = data.get('target_parameters', {})

    if run.is_ended(db):
        if run.end_time is None:
            run.end_time = utc.now()
            if run.all_targets_succeeded(db):
                run.succeeded = True
    else:
        for target in run.get_ready_targets(db):
            job = run.make_job(db, target)
            if job:
                lock_and_announce_job(db, rabbit, job)
Exemplo n.º 15
0
def main():
    settings = get_settings()

    rabbit_conn = pika.BlockingConnection(
        pika.URLParameters(settings.rabbit_url))
    rabbit = rabbit_conn.channel()
    mp.declare_exchanges(rabbit)
    queue_name = 'mettle_dispatcher'

    rabbit.queue_declare(queue=queue_name, exclusive=False, durable=True)
    rabbit.queue_bind(exchange=mp.ANNOUNCE_SERVICE_EXCHANGE,
                      queue=queue_name,
                      routing_key='#')
    rabbit.queue_bind(exchange=mp.ACK_PIPELINE_RUN_EXCHANGE,
                      queue=queue_name,
                      routing_key='#')
    rabbit.queue_bind(exchange=mp.NACK_PIPELINE_RUN_EXCHANGE,
                      queue=queue_name,
                      routing_key='#')
    rabbit.queue_bind(exchange=mp.CLAIM_JOB_EXCHANGE,
                      queue=queue_name,
                      routing_key='#')
    rabbit.queue_bind(exchange=mp.END_JOB_EXCHANGE,
                      queue=queue_name,
                      routing_key='#')
    rabbit.queue_bind(exchange=settings.dispatcher_ping_exchange,
                      queue=queue_name,
                      routing_key='timer')

    Session = make_session_cls(settings.db_url)

    for method, properties, body in rabbit.consume(queue=queue_name):
        db = Session()
        if method.exchange == mp.ANNOUNCE_SERVICE_EXCHANGE:
            on_announce_service(settings, db, json.loads(body))
        elif method.exchange == mp.ACK_PIPELINE_RUN_EXCHANGE:
            on_pipeline_run_ack(settings, rabbit, db, json.loads(body))
        elif method.exchange == mp.NACK_PIPELINE_RUN_EXCHANGE:
            on_pipeline_run_nack(settings, rabbit, db, json.loads(body))
        elif method.exchange == mp.CLAIM_JOB_EXCHANGE:
            on_job_claim(settings, rabbit, db, json.loads(body),
                         properties.correlation_id)
        elif method.exchange == mp.END_JOB_EXCHANGE:
            on_job_end(settings, rabbit, db, json.loads(body))
        # get messages from process timer restart queue
        elif method.exchange == settings.dispatcher_ping_exchange:
            db.merge(Checkin(proc_name='dispatcher', time=utc.now()))
        db.commit()
        rabbit.basic_ack(method.delivery_tag)
Exemplo n.º 16
0
    def serialize(self, expires=None):
        """Serialize the secure cookie into a string.

        If expires is provided, the session will be automatically invalidated
        after expiration when you unseralize it. This provides better
        protection against session cookie theft.

        :param expires: an optional expiration date for the cookie (a
                        :class:`datetime.datetime` object)
        """
        if self.secret_key is None:
            raise RuntimeError('no secret key defined')
        if expires:
            self['exp'] = expires
        self['iat'] = utc.now()
        return jwt.encode(self, self.secret_key, self.algorithm)
Exemplo n.º 17
0
 def __init__(self,
              hash_val: str,
              keywords,
              file_path: str,
              create_date: datetime,
              edit_date: datetime,
              file_size: int,
              num_words: int,
              parse_date: datetime = utc.now()):
     self._hash = hash_val
     self._keywords = keywords
     self._file_path = file_path
     self._parse_date = parse_date
     self._create_date = create_date
     self._edit_date = edit_date
     self._file_size = file_size
     self._num_words = num_words
Exemplo n.º 18
0
def _get_latest_update_result():
    result_files = glob.glob(
        os.path.join(_RESULT_FILE_DIR, _UPDATE_RESULT_FILENAME_FORMAT % '*'))
    if not result_files:
        return None

    # Filenames start with a timestamp, so the last one lexicographically is the
    # most recently created file.
    most_recent_result_file = sorted(result_files)[-1]
    with open(most_recent_result_file) as result_file:
        most_recent_result = update_result.read(result_file)

    # Ignore the result if it's too old.
    delta = utc.now() - most_recent_result.timestamp
    if delta.total_seconds() > _RECENT_UPDATE_THRESHOLD_SECONDS:
        return None

    return most_recent_result
Exemplo n.º 19
0
def setup_sentry():
    instance = current_instance()
    instance.add_tag('Status', 'deploy')
    instance.add_tag('StatusDetail', 'setup_sentry')
    run('sudo apt-get install -y build-essential python python-dev python-pip '
         'python-virtualenv python-setuptools libevent-dev supervisor nginx '
         'postgresql postgresql-server-dev-all')
    run('sudo -u postgres createuser -D -S -R sentry')
    run('sudo -u postgres createdb -O sentry sentry')
    run('sudo -u postgres psql -c '
        '"alter user sentry with encrypted password \'sentry\'"')
    put('deploy/sentry.conf.py', '/home/ubuntu/sentry.conf.py')
    run('echo -e "SENTRY_KEY = \'`openssl rand -hex 8`\'" >> '
        '/home/ubuntu/sentry.conf.py')
    run('sudo -u ubuntu mkdir -p /home/ubuntu/sentry')
    run('virtualenv --distribute /home/ubuntu/sentry')
    with cd('/home/ubuntu/sentry'):
        ve = lambda cmd: virtualenv_run(cmd, env='/home/ubuntu/sentry')
        ve('pip install psycopg2 gevent sentry nydus')
        ve('sentry --config=/home/ubuntu/sentry.conf.py upgrade --noinput')
        ve('echo "from django.contrib.auth.models import User; '
           'User.objects.create_superuser(\'sentry\', '
           '\'[email protected]\', \'sentry\')" | '
           'sentry --config=/home/ubuntu/sentry.conf.py shell')
        ve('sentry --config=/home/ubuntu/sentry.conf.py '
           'repair --owner=sentry')
        put('deploy/sentry.supervisord.conf',
            '/home/ubuntu/sentry.supervisord.conf')
        ve('sudo cp /home/ubuntu/sentry.supervisord.conf '
           '/etc/supervisor/conf.d/sentry.conf')
        ve('sudo supervisorctl update')
    run('sudo touch /etc/nginx/sites-enabled/sentry')
    run('sudo unlink /etc/nginx/sites-enabled/sentry')
    put('deploy/sentry.nginx.conf', '/home/ubuntu/sentry.nginx.conf')
    run('sudo cp /home/ubuntu/sentry.nginx.conf '
        '/etc/nginx/sites-available/sentry.conf')
    run('sudo ln -s /etc/nginx/sites-available/sentry.conf '
        '/etc/nginx/sites-enabled/sentry.conf')
    run('sudo /etc/init.d/nginx restart')
    instance.add_tag('Status', 'work')
    instance.remove_tag('StatusDetail')
    instance.add_tag('DeployedAt', now().strftime('%Y-%m-%d %H:%M:%S'))
Exemplo n.º 20
0
def test_read_session_cookie():
    class A(spa.Handler):
        def get(self):
            return spa.Response(self.request.environ['jwtsession']['foo'])

    routes = (
        ('/', 'a', A),
    )

    app = spa.App(routes)

    secret = 'foobar'

    app = JWTSessionMiddleware(app, secret_key=secret)

    c = Client(app, spa.Response)
    c.set_cookie('localhost', 'session', jwt.encode({'foo': 'bar',
                                                     'iat': utc.now()}, secret))
    resp = c.get('/')
    assert resp.data == b'bar'
Exemplo n.º 21
0
    def post(self, notification_id):
        n = self.db.query(Notification).filter_by(id=notification_id).one()
        if n.acknowledged_by is None:
            # TODO: check data length first so we can't be DOSed with a huge
            # payload.

            data = json.loads(self.request.get_data())
            if data.get('acknowledged') == True:
                user = self.request.session['username']
                n.acknowledged_by = user
                n.acknowledged_time = utc.now()
                print n
                self.db.commit()
                return redirect('/api/notifications/{id}/'.format(
                    id=notification_id), code=303)
            else:
                return BadRequest('Must include acknowledged: true to '
                                  'acknowledge a notification.')
        else:
            return BadRequest('Notification already acknowledged.')
Exemplo n.º 22
0
def setup_sentry():
    instance = current_instance()
    instance.add_tag("Status", "deploy")
    instance.add_tag("StatusDetail", "setup_sentry")
    run(
        "sudo apt-get install -y build-essential python python-dev python-pip "
        "python-virtualenv python-setuptools libevent-dev supervisor nginx "
        "postgresql postgresql-server-dev-all"
    )
    run("sudo -u postgres createuser -D -S -R sentry")
    run("sudo -u postgres createdb -O sentry sentry")
    run("sudo -u postgres psql -c " "\"alter user sentry with encrypted password 'sentry'\"")
    put("deploy/sentry.conf.py", "/home/ubuntu/sentry.conf.py")
    run("echo -e \"SENTRY_KEY = '`openssl rand -hex 8`'\" >> " "/home/ubuntu/sentry.conf.py")
    run("sudo -u ubuntu mkdir -p /home/ubuntu/sentry")
    run("virtualenv --distribute /home/ubuntu/sentry")
    with cd("/home/ubuntu/sentry"):
        ve = lambda cmd: virtualenv_run(cmd, env="/home/ubuntu/sentry")
        ve("pip install psycopg2 gevent sentry nydus")
        ve("sentry --config=/home/ubuntu/sentry.conf.py upgrade --noinput")
        ve(
            'echo "from django.contrib.auth.models import User; '
            "User.objects.create_superuser('sentry', "
            "'*****@*****.**', 'sentry')\" | "
            "sentry --config=/home/ubuntu/sentry.conf.py shell"
        )
        ve("sentry --config=/home/ubuntu/sentry.conf.py " "repair --owner=sentry")
        put("deploy/sentry.supervisord.conf", "/home/ubuntu/sentry.supervisord.conf")
        ve("sudo cp /home/ubuntu/sentry.supervisord.conf " "/etc/supervisor/conf.d/sentry.conf")
        ve("sudo supervisorctl update")
    run("sudo touch /etc/nginx/sites-enabled/sentry")
    run("sudo unlink /etc/nginx/sites-enabled/sentry")
    put("deploy/sentry.nginx.conf", "/home/ubuntu/sentry.nginx.conf")
    run("sudo cp /home/ubuntu/sentry.nginx.conf " "/etc/nginx/sites-available/sentry.conf")
    run("sudo ln -s /etc/nginx/sites-available/sentry.conf " "/etc/nginx/sites-enabled/sentry.conf")
    run("sudo /etc/init.d/nginx restart")
    instance.add_tag("Status", "work")
    instance.remove_tag("StatusDetail")
    instance.add_tag("DeployedAt", now().strftime("%Y-%m-%d %H:%M:%S"))
Exemplo n.º 23
0
    def tar(self, appname, appversion):
        """
        Given an app name and version to be used in the tarball name,
        create a tar.bz2 file with all of this folder's contents inside.

        Return a Build object with attributes for appname, appversion,
        time, and path.
        """
        name_tmpl = '%(app)s-%(version)s-%(time)s.tar.bz2'
        time = utc.now()
        name = name_tmpl % {
            'app': appname,
            'version': appversion,
            'time': time.strftime('%Y-%m-%dT%H-%M')
        }

        if not os.path.exists(TARBALL_HOME):
            os.mkdir(TARBALL_HOME)
        tarball = os.path.join(TARBALL_HOME, name)
        tar_params = {'filename': tarball, 'folder': self.folder}
        tar_result = run('tar -C %(folder)s -cjf %(filename)s .' % tar_params)
        tar_result.raise_for_status()
        return Build(appname, appversion, time, tarball)
Exemplo n.º 24
0
    def create_object_from_file(self, local_path):
        return Sp4Ephemerides(local_path, 1.5e6, name_list=["QZS-4", "QZS-3"])

    
class GPSCache(EphemerisFileCache):
    
    def __init__(self):
        EphemerisFileCache.__init__(self, "norad_gps")

    def get_url(self, utc_date):
        return "http://www.celestrak.com/NORAD/elements/gps-ops.txt"

    def create_object_from_file(self, local_path):
        return Sp4Ephemerides(local_path, 1.5e6)

class GalileoCache(EphemerisFileCache):
    
    def __init__(self):
        EphemerisFileCache.__init__(self, "norad_galileo")

    def get_url(self, utc_date):
        return "http://www.celestrak.com/NORAD/elements/galileo.txt"

    def create_object_from_file(self, local_path):
        return Sp4Ephemerides(local_path, 1.5e6)

    
if __name__=="__main__":
    cache = NORADCache()
    print(cache.get_positions(utc.now()))
Exemplo n.º 25
0
def cleanup_logs(settings, db):
    logger.info("Cleaning up old logs.")
    cutoff_time = utc.now() - timedelta(days=settings.max_log_days)
    db.query(JobLogLine).filter(
        JobLogLine.received_time<cutoff_time
    ).delete(synchronize_session=False)
Exemplo n.º 26
0
 def next_run_time(self):
     if self.chained_from:
         return self.chained_from.next_run_time()
     schedule = croniter(self.crontab, utc.now())
     return schedule.get_next(datetime.datetime)
Exemplo n.º 27
0
def run_pipelines(service_name, rabbit_url, pipelines, queue_name=None):
    while True:
        try:
            # Expects 'pipelines' to be a dict of pipeline names (as keys) and
            # classes (as values),

            queue_name = queue_name or mp.service_queue_name(service_name)
            rabbit = RabbitChannel(rabbit_url, service_name, pipelines,
                                   queue_name)

            for method, properties, body in rabbit.consume(queue=queue_name):
                data = json.loads(body.decode('utf-8'))
                pipeline_name = data['pipeline']
                pipeline_cls = pipelines[pipeline_name]
                target_time = isodate.parse_datetime(data['target_time'])
                run_id = data['run_id']

                if method.exchange == mp.ANNOUNCE_PIPELINE_RUN_EXCHANGE:
                    pipeline = pipeline_cls(rabbit.conn, rabbit, service_name,
                                            pipeline_name, run_id)
                    # If it's a pipeline run announcement, then call get_targets
                    # and publish result.
                    try:
                        targets = pipeline.get_targets(target_time)
                        target_params = pipeline.get_target_parameters(
                            target_time)
                        logger.info(
                            "Acking pipeline run %s:%s:%s" %
                            (service_name, data['pipeline'], data['run_id']))
                        mp.ack_pipeline_run(rabbit, service_name,
                                            data['pipeline'],
                                            data['target_time'], run_id,
                                            targets, target_params)
                    except PipelineNack as pn:
                        logger.info(
                            "Nacking pipeline run %s:%s:%s" %
                            (service_name, data['pipeline'], data['run_id']))
                        reannounce_time = None
                        if pn.reannounce_time:
                            reannounce_time = pn.reannounce_time.isoformat()
                        mp.nack_pipeline_run(rabbit, service_name,
                                             data['pipeline'], run_id,
                                             reannounce_time, pn.message)
                elif method.exchange == '' and method.routing_key == queue_name:
                    # Job message published directly to our queue, not going
                    # through an exchange.
                    job_id = data['job_id']
                    target = data['target']
                    pipeline = pipeline_cls(rabbit.conn, rabbit, service_name,
                                            pipeline_name, run_id, target,
                                            job_id)
                    # If it's a job announcement, then publish ack, run job,
                    # then publish completion.
                    # publish ack
                    claimed = pipeline._claim_job(target_time, data['target'])

                    if claimed:
                        # WOOO!  Actually do some work here.
                        succeeded = pipeline.make_target(
                            target_time, target, data['target_parameters'])

                        mp.end_job(rabbit, service_name, data['pipeline'],
                                   data['target_time'], data['target'], job_id,
                                   utc.now().isoformat(), succeeded)
                    else:
                        logger.info('Failed to claim job %s.' % job_id)
                rabbit.basic_ack(method.delivery_tag)

        except (pika.exceptions.AMQPError, AttributeError) as e:
            if isinstance(e, AttributeError) and (
                    "'NoneType' object has no attribute 'sendall'" in str(e)
                    or "'NoneType' object has no attribute 'send'" in str(e)):

                logger.exception('Unexpected RabbitMQ exception: %s.' % str(e))
                logger.info(
                    'Connection will be re-established in %s seconds!' %
                    SLEEP_INTERVAL_ON_RABBITMQ_EXCEPTION)
                time.sleep(SLEEP_INTERVAL_ON_RABBITMQ_EXCEPTION)
            else:
                raise
Exemplo n.º 28
0
 def validate_crontab(self, key, cronspec):
     if cronspec is not None:
         # If the cronspec is not parseable, croniter will raise an exception
         # here.
         croniter(cronspec, utc.now())
     return cronspec
Exemplo n.º 29
0
    def create_object_from_file(self, local_path):
        return Sp4Ephemerides(local_path, 1.5e6)


class GalileoCache(EphemerisFileCache):
    def __init__(self):
        EphemerisFileCache.__init__(self, "norad_galileo")

    def get_url(self, utc_date):
        return "http://www.celestrak.com/NORAD/elements/galileo.txt"

    def create_object_from_file(self, local_path):
        return Sp4Ephemerides(local_path, 1.5e6)


class BeidouCache(EphemerisFileCache):
    def __init__(self):
        EphemerisFileCache.__init__(self, "norad_beidou")

    def get_url(self, utc_date):
        return "https://www.celestrak.com/NORAD/elements/beidou.txt"

    def create_object_from_file(self, local_path):
        return Sp4Ephemerides(local_path, 1.5e6)


if __name__ == "__main__":
    cache = NORADCache()
    print(cache.get_positions(utc.now()))
Exemplo n.º 30
0
def check_jobs(settings, db, rabbit):
    logger.info("Checking jobs.")

    now = utc.now()
    expired_jobs = db.query(Job).filter(
        Job.end_time==None,
        Job.expires<now,
    )

    log_cutoff_time = now - timedelta(minutes=settings.job_log_lookback_minutes)

    for job in expired_jobs:
        recent_log_lines_count = db.query(JobLogLine).filter(
            JobLogLine.job==job,
            JobLogLine.received_time>log_cutoff_time
        ).count()
        if recent_log_lines_count:
            pipeline = job.pipeline_run.pipeline
            subj = "Job %s running past expire time." % job.target
            msg = textwrap.dedent("""Job {target_time} {target}, from pipeline {pipeline} has
            passed its expire time, but is still emitting log output.  Will let
            it continue.  Please consider modifiying the service to provide a
            more accurate expiration time.""".format(
                target_time=job.pipeline_run.target_time.isoformat(),
                target=job.target,
                pipeline=pipeline.name,
            ))
            notify_failed_run(db, job.pipeline_run, subj, msg)
        else:
            # This expired job is no longer doing stuff.  As far as we can tell.
            job.end_time = now

            # See if we have run out of retries
            pipeline = job.pipeline_run.pipeline
            attempts_count = db.query(Job).filter_by(
                pipeline_run=job.pipeline_run,
                target=job.target).count()

            if attempts_count < pipeline.retries:
                # Make a new job
                new_job = Job(
                    pipeline_run=job.pipeline_run,
                    target=job.target,
                    target_parameters=job.target_parameters
                )
                db.add(new_job)
            else:
                # No more retries.  Send a failure notification
                subj = "Job %s out of retries" % job.target
                msg = """Job {target_time} {target}, from pipeline {pipeline} has
                passed its expire time, has not recently emitted log messages, and
                has no retries remaining.  You should look into it.""".format(
                    target_time=job.pipeline_run.target_time.isoformat(),
                    target=job.target,
                    pipeline=pipeline.name,
                )
                notify_failed_run(db, job.pipeline_run, subj, msg)

    # Handle jobs that haven't been acked.  They should be announced.  Any
    # expired ones have already been cleaned up by the time we get here.
    new_jobs = db.query(Job).filter(
        Job.start_time==None,
    )

    db.commit()
    for job in new_jobs:
        lock_and_announce_job(db, rabbit, job)
Exemplo n.º 31
0
def check_pipelines(settings, db, rabbit):
    logger.info("Checking pipelines.")

    scheduled_pipelines = db.query(Pipeline).filter(
        Pipeline.active==True,
        Pipeline.crontab!=None,
        Pipeline.chained_from==None,
    )

    # Create any needed pipeline runs for pipelines with a crontab
    now = utc.now()
    start = now - timedelta(days=settings.lookback_days)
    for pipeline in scheduled_pipelines:
        for target_time in crontimes(pipeline.crontab, start):
            if target_time < now:
                ensure_pipeline_run(db, pipeline, target_time)
            else:
                break

    # Create any needed pipeline runs for pipelines that are chained to other
    # pipelines.
    chained_pipelines = db.query(Pipeline).filter(
        Pipeline.active==True,
    ).join(Pipeline.chained_from, aliased=True).filter(
        Pipeline.active==True,
    )

    for pipeline in chained_pipelines:
        # get successful parent runs within lookback_days
        parent_runs = db.query(PipelineRun).filter(
            PipelineRun.pipeline==pipeline.chained_from,
            PipelineRun.target_time>=now - timedelta(days=settings.lookback_days),
            PipelineRun.succeeded==True,
        )

        for pr in parent_runs:
            chained_runs = db.query(PipelineRun).filter(
                PipelineRun.pipeline==pipeline,
                PipelineRun.chained_from_id==pr.id,
            )
            if chained_runs.count() == 0:
                logger.info("Chaining run of pipeline %s from %s for "
                            "target time %s" % (pipeline.name, pr.pipeline.name,
                                                pr.target_time.isoformat()))
                db.add(PipelineRun(
                    pipeline=pipeline,
                    target_time=pr.target_time,
                    started_by='timer',
                    chained_from_id=pr.id,
                ))

    db.commit()

    # Now announce all unacked pipeline runs, whether created by this timer or
    # by someone manually, or by the dispatcher (using a trigger off some other
    # run)
    unacked_runs = db.query(PipelineRun).filter(
        PipelineRun.created_time>start,
        PipelineRun.ack_time==None,
        PipelineRun.end_time==None,
    )
    for run in unacked_runs:
        # If run has previously been nacked, and we haven't reached the
        # reannounce time, then don't announce yet.
        announce_time = run.get_announce_time()
        if announce_time is None or announce_time < now:
            lock_and_announce_run(db, rabbit, run)
        else:
            logger.info("Skipping announcement for run %s until %s" % (run.id,
                                                                       announce_time))

    # Finally, check for any acked runs without an end_time, and see if they're
    # actually finished.
    unended_runs = db.query(PipelineRun).filter(
        PipelineRun.created_time>start,
        PipelineRun.ack_time!=None,
        PipelineRun.end_time==None
    )
    for run in unended_runs:
        ready_targets = run.get_ready_targets(db)
        if ready_targets:
            for target in ready_targets:
                # This job will be announced later in the check_jobs function.
                run.make_job(db, target)
        elif run.is_ended(db):
            run.end_time=now
            if run.all_targets_succeeded(db):
                run.succeeded = True
            elif run.is_failed(db):
                # Job has ended with failure (attemps reach the maximum retries allowed)
                notify_failed_run(db, run)
Exemplo n.º 32
0
import sun
import location
import sky_object
import utc
import angle
import numpy as np

class SunObject(sky_object.SkyObject):
    
    def __init__(self):
        sky_object.SkyObject.__init__(self, "Sun")

    def get_az_el(self, utc_date, lat, lon, alt):
        s = sun.Sun()
        loc = location.Location(lat, lon, alt=alt)
        ra, decl = s.radec(utc_date)
        _el, _az = loc.equatorial_to_horizontal(utc_date, ra,decl)
        el, az = np.round([_el.to_degrees(), _az.to_degrees()], decimals=6)
        ret = []
        ret.append({'name': 'sun', 'r': 1e10, 'el':el, 'az':az, 'jy':10000.0})
        return ret

if __name__=="__main__":
    cache = SunObject()
    print(cache.get_az_el(utc.now(), lat=angle.from_dms(-45.86391200), lon=angle.from_dms(170.51348452), alt=46.5))