示例#1
0
def reg():

    jobs = map(lambda x: x.func, scheduler.get_jobs())

    if regulate not in jobs:
        scheduler.schedule(
            scheduled_time=datetime.utcnow(), # Time for first execution
            func=regulate,                    # Function to be queued
            interval=600,                     # Time before the function is called again, in seconds
            repeat=None,                      # Repeat this number of times (None means repeat forever)
            result_ttl=1200                   # How long to keep the results
        )
        return jsonify({"message" : "regulated"})
    return jsonify({ "message" : "no need to regulate" })
示例#2
0
def reg():

    jobs = map(lambda x: x.func, scheduler.get_jobs())

    if regulate not in jobs:
        scheduler.schedule(
            scheduled_time=datetime.utcnow(),  # Time for first execution
            func=regulate,  # Function to be queued
            interval=600,  # Time before the function is called again, in seconds
            repeat=
            None,  # Repeat this number of times (None means repeat forever)
            result_ttl=1200  # How long to keep the results
        )
        return jsonify({"message": "regulated"})
    return jsonify({"message": "no need to regulate"})
示例#3
0
def reindex():
    jobs = scheduler.get_jobs()

    for job in jobs:
        if job.func == reindex_services or job.description == "ioos_catalog.views.services.reindex()":
            scheduler.cancel(job)

    scheduler.schedule(
        scheduled_time=datetime.utcnow(),  # Time for first execution
        func=reindex_services,  # Function to be queued
        interval=21600,  # Time before the function is called again, in seconds
        repeat=None,  # Repeat this number of times (None means repeat forever)
        result_ttl=40000,  # How long to keep the results
        timeout=1200  # Default timeout of 180 seconds may not be enough
    )

    return jsonify({"message": "scheduled"})
示例#4
0
def regulate():
    with app.app_context():

        # Get services that have not been updated in two weeks and remove them.
        # The reindex job sets the 'updated' field.  The below logic should effectively remove
        # services that the reindex task has not seen in two weeks.
        two_weeks_ago = (datetime.utcnow() - timedelta(weeks=2)).replace(tzinfo=pytz.utc)
        deletes = [s for s in db.Service.find() if s.updated.replace(tzinfo=pytz.utc).astimezone(pytz.utc) < two_weeks_ago]
        for d in deletes:
            d.cancel_ping()
            d.cancel_harvest()
            # I don't think we want to delete these.
            # Lets make deletion a manual process.
            #d.delete()
            # TODO: Now delete the stats that were collected for this service.

        # Get function and args of
        jobs = scheduler.get_jobs()

        # Make sure a daily report job is running
        daily_email_jobs = [job for job in jobs if job.func == send_daily_report_email]
        if len(daily_email_jobs) > 1:
            # Cancel all but the first daily email job
            for j in daily_email_jobs[1:]:
                scheduler.cancel(j)
        elif len(daily_email_jobs) < 1:
            # Run today at 3am (7am UTC) if it is between midnight and 3am
            runat = datetime.utcnow().replace(hour=7, minute=0, second=0, microsecond=0)
            if datetime.utcnow() > runat:
                # Run tomorrow at 3am (7am UTC) because it is already past that time.
                runat = runat + timedelta(days=1)

            scheduler.schedule(
                scheduled_time=runat,           # Time for first execution
                func=send_daily_report_email,   # Function to be queued
                interval=86400,                 # Time before the function is called again, in seconds (86400 == 1 day)
                repeat=None,                    # Repeat this number of times (None means repeat forever)
                result_ttl=100000               # How long to keep the results, in seconds
            )

        # Make sure a service update job is running
        reindex_services_jobs = [job for job in jobs if job.func == reindex_services]
        if len(reindex_services_jobs) < 1:
            scheduler.schedule(
                scheduled_time=datetime.utcnow(),  # Time for first execution
                func=reindex_services,             # Function to be queued
                interval=21600,                    # Time before the function is called again, in seconds (21600 == 1/4 of a day)
                repeat=None,                       # Repeat this number of times (None means repeat forever)
                result_ttl=40000,                  # How long to keep the results, in seconds
                timeout=1200                       # Default timeout of 180 seconds may not be enough
            )

        # Make sure each service has a ping job
        stat_jobs = [unicode(job.args[0]) for job in jobs if job.func == ping_service_task]
        # Get services that don't have jobs
        need_ping = [s for s in db.Service.find() if unicode(s._id) not in stat_jobs]
        # Schedule the ones that do not
        for s in need_ping:
            s.schedule_ping(cancel=False)

        # Make sure each service has a harvest job
        harvest_jobs = [unicode(job.args[0]) for job in jobs if job.func == harvest]
        # Get services that don't have jobs
        need_harvest = [s for s in db.Service.find() if unicode(s._id) not in harvest_jobs]
        # Schedule the ones that do not
        for s in need_harvest:
            s.schedule_harvest(cancel=False)


    return "Regulated %s reindex jobs, %s ping jobs, %s harvest jobs, and deleted %s old services" % (len(reindex_services_jobs), len(need_ping), len(need_harvest), len(deletes))
示例#5
0
def jobs():
    jobs = []
    for job, dt in scheduler.get_jobs(with_times=True):
        jobs.append(serialize_job(job, dt))
    return jsonify({"jobs": jobs})
示例#6
0
def clear_jobs():
    for job in scheduler.get_jobs():
        scheduler.cancel(job)
    return jsonify({"status": "ok"})
示例#7
0
def show_service(service_id):
    now = datetime.utcnow()
    week_ago = now - timedelta(days=7)

    service = db.Service.find_one({'_id': service_id})

    stats = list(
        db.Stat.find({
            'service_id': service_id,
            'created': {
                '$lte': now,
                '$gte': week_ago
            }
        }).sort('created', DESCENDING))

    avg_response_time = sum(
        [x.response_time
         for x in stats if x.response_time]) / len(stats) if len(stats) else 0

    ping_data = {'good': [], 'bad': []}
    for i, x in enumerate(reversed(stats)):
        v = {'x': i, 'y': x.response_time or 250}
        if x.operational_status:
            ping_data['good'].append(v)
            ping_data['bad'].append({'x': i, 'y': 0})
        else:
            ping_data['bad'].append(v)
            ping_data['good'].append({'x': i, 'y': 0})

    # Organize datasets by type.  Include the UID and _id of each dataset in the output so we can link to them.
    datasets = db.Dataset.aggregate([{
        '$match': {
            'services.service_id': service_id
        }
    }, {
        '$group': {
            '_id': '$services.asset_type',
            'datasets': {
                '$push': {
                    'uid': '$uid',
                    '_id': '$_id'
                }
            }
        }
    }])

    harvests = {'next': None, 'last': None}
    pings = {'next': None, 'last': None}
    for job in scheduler.get_jobs(with_times=True):
        if job[0].id == service.harvest_job_id:
            harvests['last'] = job[0].ended_at
            harvests['next'] = job[1]
        elif job[0].id == service.ping_job_id:
            if len(stats) > 0:
                pings['last'] = stats[0].created
            pings['next'] = job[1]

    return render_template('show_service.html',
                           service=service,
                           stats=stats,
                           avg_response_time=avg_response_time,
                           ping_data=ping_data,
                           datasets=datasets,
                           harvests=harvests,
                           pings=pings)
示例#8
0
 def get_harvest_job_id(self):
     for job in scheduler.get_jobs():
         if job.func == harvest and unicode(job.args[0]) == unicode(self._id):
             return job.id
示例#9
0
 def get_ping_job_id(self):
     for job in scheduler.get_jobs():
         if job.func == ping_service_task and unicode(job.args[0]) == unicode(self._id):
             return job.id
示例#10
0
def jobs():
    jobs = []
    for job,dt in scheduler.get_jobs(with_times=True):
        jobs.append(serialize_job(job,dt))
    return jsonify({ "jobs" : jobs })
示例#11
0
def clear_jobs():
    for job in scheduler.get_jobs():
        scheduler.cancel(job)
    return jsonify({ "status" : "ok" })
示例#12
0
 def get_harvest_job_id(self):
     for job in scheduler.get_jobs():
         if job.func == harvest and unicode(job.args[0]) == unicode(
                 self._id):
             return job.id
示例#13
0
 def get_ping_job_id(self):
     for job in scheduler.get_jobs():
         if job.func == ping_service_task and unicode(
                 job.args[0]) == unicode(self._id):
             return job.id