Exemple #1
0
def actions(request):
    scheduler = get_scheduler()

    if request.method == 'POST' and request.POST.get('action', False):
        # Confirmation page for selected Action
        if request.POST.get('_selected_action', False):
            context_data = {
                'action': request.POST['action'],
                'job_ids': request.POST.getlist('_selected_action'),
            }
            return render(request, 'rq_scheduler/templates/confirm_action.html', context_data)

        # Performing the actual action
        elif request.POST.get('job_ids', False):
            job_ids = request.POST.getlist('job_ids')

            if request.POST['action'] == 'delete':
                for job_id in job_ids:
                    job = Job.fetch(job_id, connection=scheduler.connection)
                    job.cancel()
                messages.info(request, 'You have successfully deleted %s jobs!' % len(job_ids))
            elif request.POST['action'] == 'enqueue':
                for job_id in job_ids:
                    job = Job.fetch(job_id, connection=scheduler.connection)
                    scheduler.enqueue_job(job)
                messages.info(request, 'You have successfully enqueued %d  jobs!' % len(job_ids))

    return redirect('rq_scheduler:jobs')
Exemple #2
0
 def test_empty_removes_jobs(self):
     """Emptying a queue deletes the associated job objects"""
     q = Queue('example')
     job = q.enqueue(say_hello)
     self.assertTrue(Job.exists(job.id))
     q.empty()
     self.assertFalse(Job.exists(job.id))
Exemple #3
0
    def test_create_empty_job(self):
        """Creation of new empty jobs."""
        job = Job()
        job.description = 'test job'

        # Jobs have a random UUID and a creation date
        self.assertIsNotNone(job.id)
        self.assertIsNotNone(job.created_at)
        self.assertEqual(str(job), "<Job %s: test job>" % job.id)

        # ...and nothing else
        self.assertIsNone(job.origin)
        self.assertIsNone(job.enqueued_at)
        self.assertIsNone(job.started_at)
        self.assertIsNone(job.ended_at)
        self.assertIsNone(job.result)
        self.assertIsNone(job.exc_info)

        with self.assertRaises(ValueError):
            job.func
        with self.assertRaises(ValueError):
            job.instance
        with self.assertRaises(ValueError):
            job.args
        with self.assertRaises(ValueError):
            job.kwargs
 def status(self):
     if Job.exists(self.task_id, connection=redis_connection):
         job = Job.fetch(self.task_id, connection=redis_connection)
         job.refresh()
         return job.status
     else:
         return "unknown"
Exemple #5
0
    def test_worker_sets_job_status(self):
        """Ensure that worker correctly sets job status."""
        q = Queue()
        w = Worker([q])

        job = q.enqueue(say_hello)
        self.assertEqual(job.get_status(), Status.QUEUED)
        self.assertEqual(job.is_queued, True)
        self.assertEqual(job.is_finished, False)
        self.assertEqual(job.is_failed, False)

        w.work(burst=True)
        job = Job.fetch(job.id)
        self.assertEqual(job.get_status(), Status.FINISHED)
        self.assertEqual(job.is_queued, False)
        self.assertEqual(job.is_finished, True)
        self.assertEqual(job.is_failed, False)

        # Failed jobs should set status to "failed"
        job = q.enqueue(div_by_zero, args=(1,))
        w.work(burst=True)
        job = Job.fetch(job.id)
        self.assertEqual(job.get_status(), Status.FAILED)
        self.assertEqual(job.is_queued, False)
        self.assertEqual(job.is_finished, False)
        self.assertEqual(job.is_failed, True)
Exemple #6
0
 def test_cancel(self):
     """job.cancel() deletes itself & dependents mapping from Redis."""
     job = Job.create(func=say_hello)
     job2 = Job.create(func=say_hello, depends_on=job)
     job2.register_dependency()
     job.cancel()
     self.assertFalse(self.testconn.exists(job.key))
     self.assertFalse(self.testconn.exists(job.dependents_key))
Exemple #7
0
 def message(self):
     if self.task_result is not None and self.task_result != "":
         return self.task_result
     elif Job.exists(self.task_id, connection=redis_connection):
         job = Job.fetch(self.task_id, connection=redis_connection)
         return job.meta.get("message", None)
     else:
         return "unknown"
Exemple #8
0
 def status(self):
     if self.task_result is not None and self.task_result != "":
         return self.task_result
     elif Job.exists(self.task_id, connection=redis_connection):
         job = Job.fetch(self.task_id, connection=redis_connection)
         return job.status
     else:
         return "unknown"
Exemple #9
0
 def last_progress_update(self):
     if self.task_result is not None and self.task_result != "":
         return "run complete"
     elif Job.exists(self.task_id, connection=redis_connection):
         job = Job.fetch(self.task_id, connection=redis_connection)
         return job.meta.get("updated", None)
     else:
         return "unknown"
 def job(cls, id):
     cls.connect()
     job = Job(id)
     try:
         job.refresh()
     except Exception:
         pass
     return serialize_job(job)
Exemple #11
0
 def progress(self):
     if self.task_result is not None and self.task_result != "":
         return 100
     elif Job.exists(self.task_id, connection=redis_connection):
         job = Job.fetch(self.task_id, connection=redis_connection)
         return job.meta.get("progress", 0)
     else:
         return "unknown"
Exemple #12
0
 def test_get_job_ttl(self):
     """Getting job TTL."""
     ttl = 1
     job = Job.create(func=fixtures.say_hello, ttl=ttl)
     job.save()
     self.assertEqual(job.get_ttl(), ttl)
     job = Job.create(func=fixtures.say_hello)
     job.save()
     self.assertEqual(job.get_ttl(), None)
Exemple #13
0
    def test_data_property_sets_job_properties(self):
        """Job tuple gets derived lazily from data property."""
        job = Job()
        job.data = dumps(('foo', None, (1, 2, 3), {'bar': 'qux'}))

        self.assertEqual(job.func_name, 'foo')
        self.assertEqual(job.instance, None)
        self.assertEqual(job.args, (1, 2, 3))
        self.assertEqual(job.kwargs, {'bar': 'qux'})
Exemple #14
0
    def test_requeue_sets_status_to_queued(self):
        """Requeueing a job should set its status back to QUEUED."""
        job = Job.create(func=div_by_zero, args=(1, 2, 3))
        job.save()
        get_failed_queue().quarantine(job, Exception('Some fake error'))
        get_failed_queue().requeue(job.id)

        job = Job.fetch(job.id)
        self.assertEqual(job.get_status(), JobStatus.QUEUED)
Exemple #15
0
    def test_data_property_sets_job_properties(self):
        """Job tuple gets derived lazily from data property."""
        job = Job()
        job.data = dumps(("foo", None, (1, 2, 3), {"bar": "qux"}))

        self.assertEqual(job.func_name, "foo")
        self.assertEqual(job.instance, None)
        self.assertEqual(job.args, (1, 2, 3))
        self.assertEqual(job.kwargs, {"bar": "qux"})
Exemple #16
0
    def test_job_properties_set_data_property(self):
        """Data property gets derived from the job tuple."""
        job = Job()
        job.func_name = 'foo'
        fname, instance, args, kwargs = loads(job.data)

        self.assertEqual(fname, job.func_name)
        self.assertEqual(instance, None)
        self.assertEqual(args, ())
        self.assertEqual(kwargs, {})
Exemple #17
0
def retrieve_market_scrape():
  row_count = MappedMarketResult.query.count()
  #check row count to clean up market if it's over 9000 (dbz?)
  if row_count >= 9000:
    clean_up_market()
  
  #retrieve results from redis queue
  if sched.scrapejobid is None:
    print 'No scrape job found'
    return
  
  job_id = sched.scrapejobid
  currentjob = Job(connection=conn)
  
  try:
    currentjob = currentjob.fetch(job_id, connection=conn)
    print 'scrape job found'
  except:
    print 'job not available'
    sched.scrapejobid = None
    return
  
  print 'found job %s ' % currentjob
  print 'for job id %s ' % job_id
    
  if currentjob is not None:
    if currentjob.result is not None:
      marketresults = currentjob.result
      print 'found market results %s ' % marketresults
  
      #delete existing market results
         
      #cur = MappedMarketResult.query.filter_by(g_spreadsheet_id=str(session['g_spreadsheet_id']), g_worksheet_id=str(session['g_worksheet_id'])) 
    
      #[db.session.delete(c) for c in cur]  
      #db.session.commit()
      #mapped market result havs [itemid, name, cards, price, amount, title, vendor, coords, date]
      
      print 'adding to db'
      vals = marketresults.values()
      #flattenedvals = [item for sublist in vals for item in sublist]
      daterun = datetime.now()
      for k in marketresults.keys():
        [db.session.add(MappedMarketResult(str(mr.itemid), str(mr.name), str(mr.cards), str(mr.price), str(mr.amount), str(mr.title), str(mr.vendor), str(mr.coords), str(daterun))) for mr in marketresults[k]]
     
      db.session.commit()
      print 'added to db'
      print 'removing job results'
      currentjob.delete()
      print 'finished deleting job results'
      
      update_guild_treasure_with_market()
  else: 
    print 'current job is not ready %s' % job_id
Exemple #18
0
 def fetch(self):
     if self.job_id:
         job_id = str(self.job_id)
         if self._enqueued_job:
             self._enqueued_job.refresh()
         else:
             connection = get_connection(self.queue)
             if RqJob.exists(job_id, connection=connection):
                 self._enqueued_job = RqJob.fetch(
                     job_id, connection=connection)
         return self._enqueued_job
Exemple #19
0
    def test_requeueing_preserves_timeout(self):
        """Requeueing preserves job timeout."""
        job = Job.create(func=div_by_zero, args=(1, 2, 3))
        job.origin = 'fake'
        job.timeout = 200
        job.save()
        get_failed_queue().quarantine(job, Exception('Some fake error'))
        get_failed_queue().requeue(job.id)

        job = Job.fetch(job.id)
        self.assertEquals(job.timeout, 200)
Exemple #20
0
    def test_custom_meta_is_persisted(self):
        """Additional meta data on jobs are stored persisted correctly."""
        job = Job.create(func=fixtures.say_hello, args=('Lionel',))
        job.meta['foo'] = 'bar'
        job.save()

        raw_data = self.testconn.hget(job.key, 'meta')
        self.assertEqual(loads(raw_data)['foo'], 'bar')

        job2 = Job.fetch(job.id)
        self.assertEqual(job2.meta['foo'], 'bar')
Exemple #21
0
    def test_result_ttl_is_persisted(self):
        """Ensure that job's result_ttl is set properly"""
        job = Job.create(func=say_hello, args=("Lionel",), result_ttl=10)
        job.save()
        job_from_queue = Job.fetch(job.id, connection=self.testconn)
        self.assertEqual(job.result_ttl, 10)

        job = Job.create(func=say_hello, args=("Lionel",))
        job.save()
        job_from_queue = Job.fetch(job.id, connection=self.testconn)
        self.assertEqual(job.result_ttl, None)
Exemple #22
0
    def test_custom_meta_is_persisted(self):
        """Additional meta data on jobs are stored persisted correctly."""
        job = Job.create(func=say_hello, args=("Lionel",))
        job.meta["foo"] = "bar"
        job.save()

        raw_data = self.testconn.hget(job.key, "meta")
        self.assertEqual(loads(raw_data)["foo"], "bar")

        job2 = Job.fetch(job.id)
        self.assertEqual(job2.meta["foo"], "bar")
Exemple #23
0
    def test_persistence_of_empty_jobs(self):  # noqa
        """Storing empty jobs."""
        job = Job()
        job.save()

        expected_date = strip_milliseconds(job.created_at)
        stored_date = self.testconn.hget(job.key, "created_at")
        self.assertEquals(times.to_universal(stored_date), expected_date)

        # ... and no other keys are stored
        self.assertItemsEqual(self.testconn.hkeys(job.key), ["created_at"])
Exemple #24
0
    def test_store_then_fetch(self):
        """Store, then fetch."""
        job = Job.create(some_calculation, 3, 4, z=2)
        job.save()

        job2 = Job.fetch(job.id)
        self.assertEquals(job.func, job2.func)
        self.assertEquals(job.args, job2.args)
        self.assertEquals(job.kwargs, job2.kwargs)

        # Mathematical equation
        self.assertEquals(job, job2)
Exemple #25
0
    def test_data_property_sets_job_properties(self):
        """Job tuple gets derived lazily from data property."""
        job = Job()
        def foo(a,b,c,bar=''):
            pass

        job.data = dumps((dumps(foo), None, (1, 2, 3), {'bar': 'qux'}))

        self.assertEquals(job.func_name, 'tests.test_job.foo')
        self.assertEquals(job.instance, None)
        self.assertEquals(job.args, (1, 2, 3))
        self.assertEquals(job.kwargs, {'bar': 'qux'})
Exemple #26
0
    def test_store_then_fetch(self):
        """Store, then fetch."""
        job = Job.create(func=fixtures.some_calculation, args=(3, 4), kwargs=dict(z=2))
        job.save()

        job2 = Job.fetch(job.id)
        self.assertEqual(job.func, job2.func)
        self.assertEqual(job.args, job2.args)
        self.assertEqual(job.kwargs, job2.kwargs)

        # Mathematical equation
        self.assertEqual(job, job2)
Exemple #27
0
 def test_get_result_ttl(self):
     """Getting job result TTL."""
     job_result_ttl = 1
     default_ttl = 2
     job = Job.create(func=fixtures.say_hello, result_ttl=job_result_ttl)
     job.save()
     self.assertEqual(job.get_result_ttl(default_ttl=default_ttl), job_result_ttl)
     self.assertEqual(job.get_result_ttl(), job_result_ttl)
     job = Job.create(func=fixtures.say_hello)
     job.save()
     self.assertEqual(job.get_result_ttl(default_ttl=default_ttl), default_ttl)
     self.assertEqual(job.get_result_ttl(), None)
Exemple #28
0
    def test_job_properties_set_data_property(self):
        """Data property gets derived from the job tuple."""
        job = Job()
        def foo(a,b,c,bar=''):
            pass
        job.func = foo
        func, instance, args, kwargs = loads(job.data)
        fname = "tests.test_job."+loads(func).__name__

        self.assertEquals(fname, job.func_name)
        self.assertEquals(instance, None)
        self.assertEquals(args, ())
        self.assertEquals(kwargs, {})
Exemple #29
0
    def test_result_ttl_is_persisted(self):
        """Ensure that job's result_ttl is set properly"""
        job = Job.create(connection=self.testconn, func=fixtures.say_hello,
                         args=('Lionel',), result_ttl=10)
        job.save()
        self.conn.get_job(job.id)
        self.assertEqual(job.result_ttl, 10)

        job = Job.create(connection=self.testconn, func=fixtures.say_hello,
                         args=('Lionel',))
        job.save()
        self.conn.get_job(job.id)
        self.assertEqual(job.result_ttl, None)
Exemple #30
0
    def compute(self):
        """
        Add any metadata to this object from the model run output
        """
        try:
            self.set_trackline()
        except:
            app.logger.warning("Could not process trackline results.  URL may be invalid?")

        if Job.exists(self.task_id, connection=redis_connection):
            job = Job.fetch(self.task_id, connection=redis_connection)
            self.task_result = unicode(job.meta.get("outcome", ""))

        self.save()
Exemple #31
0
 def test_fetching_can_fail(self):
     """Fetching fails for non-existing jobs."""
     with self.assertRaises(NoSuchJobError):
         Job.fetch('b4a44d44-da16-4620-90a6-798e8cd72ca0')
Exemple #32
0
 def test_unpickleable_result(self):
     """Unpickleable job result doesn't crash job.to_dict()"""
     job = Job.create(func=fixtures.say_hello, args=('Lionel', ))
     job._result = queue.Queue()
     data = job.to_dict()
     self.assertEqual(data['result'], 'Unpickleable return value')
Exemple #33
0
def get_results(job_key):
    job = Job.fetch(job_key, connection=conn) # 获取根据job_id获取任务的返回值
    if job.is_finished: # 检验是否完成
        return str(job.result), 200
    else:
        return "Wait!", 202
Exemple #34
0
def delete_all_job_view(queue_name, state=None):
    for job_id in get_all_job_ids(queue_name, state):
        Job.fetch(job_id).delete()
    return dict(status='OK')
Exemple #35
0
def fetch(job_id):
    try:
        job = Job.fetch(job_id, connection=conn)
    except:
        return "No job", 404
    return jsonify(job.is_finished)
Exemple #36
0
    def test_dependents_key_for_should_return_prefixed_job_id(self):
        """test redis key to store job dependents hash under"""
        job_id = 'random'
        key = Job.dependents_key_for(job_id=job_id)

        assert key == Job.redis_job_namespace_prefix + job_id + ':dependents'
Exemple #37
0
def updateJobMeta(jobID):
    job = Job.fetch(jobID, connection=redis)
    job.meta['ts'] = time.time()
    print(job.meta['ts'])
    print(job.meta['output'])
def update_bash_status(bash_id, job_id, logs, rq_connection):
    from app.models import get_db_session_instance
    from rq.job import Job
    import requests
    API_UPDATE_VIZSTATUS_TO_DC = 'http://api.mint-data-catalog.org/datasets/update_dataset_viz_status'
    API_CHECK_HAS_LAYER = 'http://minty.mintviz.org/minty/has_layer/'
    API_CHECK_MINT_CHART_LAYER = 'http://minty.mintviz.org/minty/chart/'

    def update_viz_status_to_dc(dataset_id, viz_config):
        payload = {
            'dataset_id': dataset_id,
            'viz_config_id': viz_config,
            '$set': {
                'visualized': True
            }
        }
        req = requests.post(API_UPDATE_VIZSTATUS_TO_DC,
                            data=json.dumps(payload))
        if req.status_code != 200:
            return 'error'

        response = req.json()
        #print(response)
        if not isinstance(response, dict):
            return 'error'

        if 'error' in response:
            print(response['error'])
            return 'error'

        return 'success'

    def utc_to_local(utc_dt):
        from datetime import timezone
        return utc_dt.replace(tzinfo=timezone.utc).astimezone(tz=None)

    def check_has_layer(uuid2, dataset_id, viz_config, viz_type, db_session):
        req = None
        if viz_type == 'mint-chart':
            req = requests.get(API_CHECK_MINT_CHART_LAYER + uuid2)
        else:
            req = requests.get(API_CHECK_HAS_LAYER + uuid2)

        if req.status_code != 200:
            return 'Failed request to check layer,\ncannot check if minty has layer or not.\nDataset_id: %s\nViz_config: %s' % (
                dataset_id, viz_config)

        response = req.json()
        # print(response)
        if not isinstance(response, dict):
            return 'has_layer return error.\nDataset_id: %s\nViz_config: %s' % (
                dataset_id, viz_config)

        from datetime import datetime, timedelta
        if viz_type == 'mint-chart':
            if response.get('status') != None and response['status'] == 404:
                return 'Viz_type : mint-chart, cannot find the layer with this uuid.\nDataset_id: %s\nViz_config: %s' % (
                    dataset_id, viz_config)
            elif (response.get('data')
                  == None) or (response.get('data') != None
                               and len(response['data']) == 0):
                return 'Viz_type : mint-chart, the layer with this uuid return the wrong data content.\nDataset_id: %s\nViz_config: %s' % (
                    dataset_id, viz_config)

            layer_modified_at = response.get('modified_at', None)
            if layer_modified_at:
                layer_modified_at = utc_to_local(
                    datetime.strptime(layer_modified_at, '%Y-%m-%d %H:%M:%S'))
            else:
                layer_modified_at = utc_to_local(datetime.utcnow())
        else:
            if response['has'] is False:
                return 'Failed in pipeline: not generate the layer.\nDataset_id: %s\nViz_config: %s' % (
                    dataset_id, viz_config)
            layer_modified_at = find_modified_at_by_md5vector(
                uuid2, db_session)

        from rq import get_current_job

        job = get_current_job()

        job_enqueued_at = utc_to_local(
            job.enqueued_at.replace(second=0,
                                    microsecond=0)) - timedelta(minutes=10)
        layer_modified_at = layer_modified_at

        time_comparision = "\njob_enqueued_at: %s\nlayer_modified_at: %s" % (
            datetime.strftime(job_enqueued_at, '%Y-%m-%d %H:%M:%S %f %z'),
            datetime.strftime(layer_modified_at, '%Y-%m-%d %H:%M:%S %f %z'))

        if job_enqueued_at > layer_modified_at:
            return "Failed to run the command, the job enqueued_at time is later than the layer updated." + time_comparision

        return 'success'

    def find_modified_at_by_md5vector(md5vector, db_session):
        from app.models import Layer
        from sqlalchemy.orm import load_only
        layer = db_session.query(Layer).filter_by(md5=md5vector).options(
            load_only('id', 'modified_at')).first()
        if layer:
            return layer.modified_at.astimezone(tz=None)
        else:
            return utc_to_local(datetime.utcnow())

    db_session = get_db_session_instance()
    bash = db_session.query(Bash).filter_by(id=bash_id).first()
    bash.rqids = job_id
    db_session.commit()

    update_to_dc = ''
    check_layer = check_has_layer(bash.md5vector, bash.dataset_id,
                                  bash.viz_config, bash.viz_type, db_session)

    if check_layer == 'success':
        check_layer = 'Layer check success.\nDataset_id: %s\nViz_config: %s' % (
            bash.dataset_id, bash.viz_config)
        bash.status = 'success'
        if update_viz_status_to_dc(bash.dataset_id,
                                   bash.viz_config) == 'success':
            update_to_dc = 'Update viz status to data catalog success.\nDataset_id: %s\nViz_config: %s' % (
                bash.dataset_id, bash.viz_config)
        else:
            update_to_dc = 'Error in updating viz status to data catalog.\nDataset_id: %s\nViz_config: %s' % (
                bash.dataset_id, bash.viz_config)
    else:
        bash.status = 'failed'

    _j = Job.fetch(job_id, connection=rq_connection)

    if _j.exc_info:
        logs['exc_info'] = str(
            _j.exc_info) + '\n\n' + check_layer + '\n\n' + update_to_dc
    else:
        logs['exc_info'] = check_layer + '\n\n' + update_to_dc

    bash.logs = json.dumps(logs)

    db_session.commit()
    path = bash.dir
    if path == '':
        path = bash.data_file_path
    else:
        path = path + '/'

    abspath = os.path.abspath(os.path.expanduser(path)).replace('*', '')

    if abspath.startswith('/tmp/') and len(abspath) > len('/tmp/'):
        if bash.dir:
            if os.path.isdir(abspath):
                import shutil
                shutil.rmtree(abspath, ignore_errors=True)
        else:
            if os.path.exists(abspath) and not os.path.isdir(abspath):
                os.remove(abspath)
        # subprocess.run(["bash", "rm", "-rf", path])

    return bash
def get_device_message_response(generic_asset_name_groups, duration):

    unit = "MW"
    planning_horizon = min(
        duration, current_app.config.get("FLEXMEASURES_PLANNING_HORIZON"))

    if not has_assets():
        current_app.logger.info("User doesn't seem to have any assets.")

    value_groups = []
    new_event_groups = []
    for event_group in generic_asset_name_groups:
        for event in event_group:

            # Parse the entity address
            try:
                ea = parse_entity_address(event,
                                          entity_type="event",
                                          fm_scheme="fm0")
            except EntityAddressException as eae:
                return invalid_domain(str(eae))
            sensor_id = ea["asset_id"]
            event_id = ea["event_id"]
            event_type = ea["event_type"]

            # Look for the Sensor object
            sensor = Sensor.query.filter(Sensor.id == sensor_id).one_or_none()
            if sensor is None or not can_access_asset(sensor):
                current_app.logger.warning(
                    "Cannot identify sensor given the event %s." % event)
                return unrecognized_connection_group()
            if sensor.generic_asset.generic_asset_type.name not in (
                    "battery",
                    "one-way_evse",
                    "two-way_evse",
            ):
                return invalid_domain(
                    f"API version 1.3 only supports device messages for batteries and Electric Vehicle Supply Equipment (EVSE). "
                    f"Sensor ID:{sensor_id} does not belong to a battery or EVSE, but {p.a(sensor.generic_asset.generic_asset_type.description)}."
                )

            # Use the event_id to look up the schedule start
            if event_type not in ("soc", "soc-with-targets"):
                return unrecognized_event_type(event_type)
            connection = current_app.queues["scheduling"].connection
            try:  # First try the scheduling queue
                job = Job.fetch(event, connection=connection)
            except NoSuchJobError:  # Then try the most recent event_id (stored as a generic asset attribute)
                if event_id == sensor.generic_asset.get_attribute(
                        "soc_udi_event_id"):
                    schedule_start = datetime.fromisoformat(
                        sensor.generic_asset.get_attribute("soc_datetime"))
                    message = (
                        "Your UDI event is the most recent event for this device, but "
                    )
                else:
                    return unrecognized_event(event_id, event_type)
            else:
                if job.is_finished:
                    message = "A scheduling job has been processed based on your UDI event, but "
                elif job.is_failed:  # Try to inform the user on why the job failed
                    e = job.meta.get(
                        "exception",
                        Exception(
                            "The job does not state why it failed. "
                            "The worker may be missing an exception handler, "
                            "or its exception handler is not storing the exception as job meta data."
                        ),
                    )
                    return unknown_schedule(
                        f"Scheduling job failed with {type(e).__name__}: {e}")
                elif job.is_started:
                    return unknown_schedule("Scheduling job in progress.")
                elif job.is_queued:
                    return unknown_schedule(
                        "Scheduling job waiting to be processed.")
                elif job.is_deferred:
                    try:
                        preferred_job = job.dependency
                    except NoSuchJobError:
                        return unknown_schedule(
                            "Scheduling job waiting for unknown job to be processed."
                        )
                    return unknown_schedule(
                        f'Scheduling job waiting for {preferred_job.status} job "{preferred_job.id}" to be processed.'
                    )
                else:
                    return unknown_schedule(
                        "Scheduling job has an unknown status.")
                schedule_start = job.kwargs["start"]

            schedule_data_source_name = "Seita"
            scheduler_source = DataSource.query.filter_by(
                name="Seita", type="scheduling script").one_or_none()
            if scheduler_source is None:
                return unknown_schedule(
                    message +
                    f'no data is known from "{schedule_data_source_name}".')

            power_values = sensor.search_beliefs(
                event_starts_after=schedule_start,
                event_ends_before=schedule_start + planning_horizon,
                source=scheduler_source,
                most_recent_beliefs_only=True,
                one_deterministic_belief_per_event=True,
            )
            # For consumption schedules, positive values denote consumption. For the db, consumption is negative
            consumption_schedule = -simplify_index(power_values)["event_value"]
            if consumption_schedule.empty:
                return unknown_schedule(
                    message + "the schedule was not found in the database.")

            # Update the planning window
            resolution = sensor.event_resolution
            start = consumption_schedule.index[0]
            duration = min(duration,
                           consumption_schedule.index[-1] + resolution - start)
            consumption_schedule = consumption_schedule[start:start +
                                                        duration - resolution]
            value_groups.append(consumption_schedule.tolist())
            new_event_groups.append(event)

    response = groups_to_dict(new_event_groups,
                              value_groups,
                              generic_asset_type_name="event")
    response["start"] = isodate.datetime_isoformat(start)
    response["duration"] = isodate.duration_isoformat(duration)
    response["unit"] = unit

    d, s = request_processed()
    return dict(**response, **d), s
Exemple #40
0
#!/usr/bin/python
import rq
from rq.job import Job
from rq import registry
from redis import StrictRedis

REDIS_HOST = '172.17.0.1'
REDIS_PORT = '6379'

CON = StrictRedis(host=REDIS_HOST, port=REDIS_PORT)
REG = registry.FinishedJobRegistry('default', connection=CON)
JOBS = REG.get_job_ids()

for job_num in JOBS:

    job = Job.fetch(job_num, connection=CON)
    start = job.started_at
    finish = job.ended_at
    duration = finish - start
    print "job number: ", job_num
    print "job function name: ", job.func_name
    print "job duration: ", duration.total_seconds()
    print "job status: ", job.status
    print "job result: ", job.result
Exemple #41
0
def getResults(job_key):
    job = Job.fetch(job_key, connection=conn)
    if job.is_finished:
        return str(job.result), 200
    else:
        return "", 202
Exemple #42
0
def cancel_all_queued_jobs(queues):
    for queue in queues:
        job_ids = get_queue(queue).get_job_ids()
        for job_id in job_ids:
            Job.fetch(job_id).cancel()
Exemple #43
0
def allocate_resources_and_queue(command,
                                 name=None,
                                 depends_on=None,
                                 num_gpus=0,
                                 blocking=False,
                                 redis_path='redis.sock'):
    with Connection(Redis(unix_socket_path=redis_path)):
        _queue = Queue('blocking') if blocking else None
        allocated_gpus = ''
        meta = {}

        not_allocated = True

        if _queue is None:
            if num_gpus > 0:
                _queue = Queue('gpu')
            else:
                _queue = Queue('cpu')

        while not_allocated:
            if num_gpus > 0:
                available_gpus = GPUtil.getAvailable(order='first',
                                                     limit=1000,
                                                     maxLoad=0.05,
                                                     maxMemory=0.05,
                                                     includeNan=False,
                                                     excludeID=[],
                                                     excludeUUID=[])

                q_gpu = Queue('gpu')
                q_blocking = Queue('blocking')

                registry = q_gpu.started_job_registry
                blocking_registry = q_blocking.started_job_registry
                other_job_gpus = []
                for job_id in registry.get_job_ids(
                ) + blocking_registry.get_job_ids():
                    _job = Job.fetch(job_id)
                    if 'gpu' in _job.meta:
                        if isinstance(_job.meta['gpu'], list):
                            other_job_gpus.extend(_job.meta['gpu'])
                        else:
                            other_job_gpus.append(_job.meta['gpu'])

                available_gpus = [
                    a for a in available_gpus if a not in other_job_gpus
                ]

                if len(available_gpus) >= num_gpus:
                    available_gpus = available_gpus[:num_gpus]
                    allocated_gpus = ','.join(map(str, available_gpus))
                    meta = {'gpu': available_gpus}
                    not_allocated = False
                else:
                    logging.info(
                        'Unable to allocate resources...Trying again in 30 seconds.'
                    )
                    sleep(30)
            else:
                not_allocated = False

        command = f'export CUDA_VISIBLE_DEVICES={allocated_gpus} && {command}'
        _queue.enqueue(subprocess.check_call, [command],
                       shell=True,
                       job_id=name,
                       depends_on=depends_on,
                       meta=meta,
                       timeout=timeout,
                       result_ttl=timeout)
Exemple #44
0
def get_final_loaded_data(load_id):
    job = Job.fetch(load_id['id'],connection = conn)
    total_df = job.result
    return total_df
Exemple #45
0
 def test_enqueue_job_async_status_finished(self):
     queue = Queue(is_async=False)
     job = Job.create(func=fixtures.say_hello)
     job = queue.enqueue_job(job)
     self.assertEqual(job.result, 'Hi there, Stranger!')
     self.assertEqual(job.get_status(), JobStatus.FINISHED)
Exemple #46
0
    def __get_jobs_descriptions(self, jobs):
        """ Jobs (message) descriptions search method """

        jobs = Job.fetch_many(jobs, connection=self.redis_conn)
        return [job.description for job in jobs]
Exemple #47
0
    def test_job_delete_removes_itself_from_registries(self):
        """job.delete() should remove itself from job registries"""
        job = Job.create(func=fixtures.say_hello,
                         status=JobStatus.FAILED,
                         connection=self.testconn,
                         origin='default',
                         serializer=JSONSerializer)
        job.save()
        registry = FailedJobRegistry(connection=self.testconn,
                                     serializer=JSONSerializer)
        registry.add(job, 500)

        job.delete()
        self.assertFalse(job in registry)

        job = Job.create(func=fixtures.say_hello,
                         status=JobStatus.FINISHED,
                         connection=self.testconn,
                         origin='default',
                         serializer=JSONSerializer)
        job.save()

        registry = FinishedJobRegistry(connection=self.testconn,
                                       serializer=JSONSerializer)
        registry.add(job, 500)

        job.delete()
        self.assertFalse(job in registry)

        job = Job.create(func=fixtures.say_hello,
                         status=JobStatus.STARTED,
                         connection=self.testconn,
                         origin='default',
                         serializer=JSONSerializer)
        job.save()

        registry = StartedJobRegistry(connection=self.testconn,
                                      serializer=JSONSerializer)
        registry.add(job, 500)

        job.delete()
        self.assertFalse(job in registry)

        job = Job.create(func=fixtures.say_hello,
                         status=JobStatus.DEFERRED,
                         connection=self.testconn,
                         origin='default',
                         serializer=JSONSerializer)
        job.save()

        registry = DeferredJobRegistry(connection=self.testconn,
                                       serializer=JSONSerializer)
        registry.add(job, 500)

        job.delete()
        self.assertFalse(job in registry)

        job = Job.create(func=fixtures.say_hello,
                         status=JobStatus.SCHEDULED,
                         connection=self.testconn,
                         origin='default',
                         serializer=JSONSerializer)
        job.save()

        registry = ScheduledJobRegistry(connection=self.testconn,
                                        serializer=JSONSerializer)
        registry.add(job, 500)

        job.delete()
        self.assertFalse(job in registry)
Exemple #48
0
 def _add_exception(j: Job, e: Optional[BaseException]):
     j.meta.setdefault(RqExcMan.EXC_META_KEY, []).append(e)
     j.save()
Exemple #49
0
    def test_key_for_should_return_prefixed_job_id(self):
        """test redis key to store job hash under"""
        job_id = 'random'
        key = Job.key_for(job_id=job_id)

        assert key == (Job.redis_job_namespace_prefix + job_id).encode('utf-8')
Exemple #50
0
    def test_dependencies_key_should_have_prefixed_job_id(self):
        job_id = 'random'
        job = Job(id=job_id)
        expected_key = Job.redis_job_namespace_prefix + ":" + job_id + ':dependencies'

        assert job.dependencies_key == expected_key
Exemple #51
0
 def lemmatization_job(self):
     try:
         return Job.fetch(self.current_job, get_connection())
     except NoSuchJobError:
         pass
Exemple #52
0
def fetch(id):
    j = Job.fetch(str(id), connection=r)
    return j
Exemple #53
0
def assignment_info(assignment_id, login):
    student_id = db.getStudentID(login.lower())

    try:
        assignment = db.getAssignment(ASSIGNMENTS[assignment_id]["canvas_id"])
    except:
        return "Assignment not found!", 404

    # subject to change
    canvas_assignment_id, unlock_date, initial_due_date, revision_due_date = assignment

    if not datetime.date.today() >= datetime.datetime.strptime(
            unlock_date, "%Y-%m-%d").date():
        return "assignment not unlocked yet", 400

    submission = db.getSubmission(student_id,
                                  ASSIGNMENTS[assignment_id]["canvas_id"])

    if submission is None:
        submission = db.addSubmission(student_id,
                                      ASSIGNMENTS[assignment_id]["canvas_id"])

    # subject to change
    _, _, test_commit, job_id, initial_date, revision_date, initial_commit, revision_commit = submission
    test_comment = None
    initial_comment = None
    revision_comment = None
    job_position = None

    if job_id:
        try:
            job = Job.fetch(job_id, connection=conn)
        except:
            job = None
        if job:
            if job.is_finished: job_id = None
            else:
                jobs = q.get_jobs()
                if not job in jobs:
                    job_id = None
                else:
                    try:
                        job_position = jobs.index(job)
                    except ValueError:
                        job_position = 0
        else:
            job_id = None

    user_dir = "%s/%s" % (REPOS_DIR, login)
    tests_dir = "%s/%s/student-tests" % (REPOS_DIR, login)
    repo_dir = "%s/%s/submission" % (REPOS_DIR, login)

    if os.path.isdir(repo_dir):
        try:
            repo = git.Repo(repo_dir, search_parent_directories=True)
        except:
            pass
        else:
            if test_commit:
                try:
                    test_comment = repo.commit(test_commit).message.strip()
                except:
                    test_comment = None
            if initial_commit:
                try:
                    initial_comment = repo.commit(
                        initial_commit).message.strip()
                except:
                    initial_comment = None
            if revision_commit:
                try:
                    revision_comment = repo.commit(
                        revision_commit).message.strip()
                except:
                    revision_comment = None

    return jsonify({
        'job_id':
        job_id,
        'job_position':
        job_position,
        'file_exists':
        os.path.exists(f'{tests_dir}/{assignment_id}.html'),
        'test_commit':
        test_commit,
        'test_comment':
        test_comment,
        'initial_due_date':
        initial_due_date,
        'initial_date':
        initial_date,
        'initial_commit':
        initial_commit,
        'initial_comment':
        initial_comment,
        'revision_due_date':
        revision_due_date,
        'revision_date':
        revision_date,
        'revision_commit':
        revision_commit,
        'revision_comment':
        revision_comment,
    })
Exemple #54
0
def create_update_cache_job(queue, instance, keys, decrement=1):
    queue.connection.sadd(queue.redis_queues_keys, queue.key)
    job_wrapper = JobWrapper.create(update_cache_job,
                                    instance=instance,
                                    keys=keys,
                                    decrement=decrement,
                                    connection=queue.connection,
                                    origin=queue.name,
                                    timeout=queue.DEFAULT_TIMEOUT)
    last_job_key = instance.get_last_job_key()

    with queue.connection.pipeline() as pipe:
        while True:
            try:
                pipe.watch(last_job_key)
                last_job_id = queue.connection.get(last_job_key)
                depends_on_wrapper = None
                if last_job_id is not None:
                    pipe.watch(Job.key_for(last_job_id),
                               JobWrapper.params_key_for(last_job_id))
                    depends_on_wrapper = JobWrapper(last_job_id, queue.connection)

                pipe.multi()

                depends_on_status = None
                if depends_on_wrapper is not None:
                    depends_on = depends_on_wrapper.job
                    depends_on_status = depends_on.get_status()

                if depends_on_status is None:
                    # enqueue without dependencies
                    pipe.set(last_job_key, job_wrapper.id)
                    job_wrapper.save_enqueued(pipe)
                    pipe.execute()
                    break

                if depends_on_status in [JobStatus.QUEUED,
                                         JobStatus.DEFERRED]:
                    new_job_params = \
                        depends_on_wrapper.merge_job_params(keys, decrement,
                                                            pipeline=pipe)
                    pipe.execute()
                    msg = 'SKIP %s (decrement=%s, job_status=%s, job_id=%s)'
                    msg = msg % (last_job_key, new_job_params[1],
                                 depends_on_status, last_job_id)
                    logger.debug(msg)
                    # skip this job
                    return None

                pipe.set(last_job_key, job_wrapper.id)

                if depends_on_status not in [JobStatus.FINISHED]:
                    # add job as a dependent
                    job = job_wrapper.save_deferred(last_job_id, pipe)
                    pipe.execute()
                    logger.debug('ADD AS DEPENDENT for %s (job_id=%s) OF %s' %
                                 (last_job_key, job.id, last_job_id))
                    return job

                job_wrapper.save_enqueued(pipe)
                pipe.execute()
                break
            except WatchError:
                logger.debug('RETRY after WatchError for %s' % last_job_key)
                continue
    logger.debug('ENQUEUE %s (job_id=%s)' % (last_job_key, job_wrapper.id))

    queue.push_job_id(job_wrapper.id)
Exemple #55
0
def test_assignment(assignment_id, login):
    # return "Server down for maintainence.", 500
    previous_failed = False

    student_id = db.getStudentID(login.lower())

    db.addSubmission(student_id,
                     ASSIGNMENTS[assignment_id]["canvas_id"])  # just in case
    job_id = db.getJob(student_id, ASSIGNMENTS[assignment_id]["canvas_id"])
    if job_id:
        try:
            job = Job.fetch(job_id, connection=conn)
        except:
            job = None
        if job:
            if job.is_failed:
                previous_failed = True
            elif not job.is_finished:
                return "Test already running.", 300

    user_dir = "%s/%s" % (REPOS_DIR, login)
    repo_dir = "%s/submission" % user_dir
    tests_dir = "%s/student-tests" % user_dir
    assignment_name = ASSIGNMENTS[assignment_id]["folder_name"]

    _commit = _get_last_commit(assignment_id, login)
    commit = _commit['commit_id']
    commit_comment = _commit['commit_comment']
    head_commit = _commit['head_commit']
    last_commit = db.getCommit(student_id,
                               ASSIGNMENTS[assignment_id]["canvas_id"])

    if not head_commit:
        return f"You haven't made any commits to your repository.", 404
    if not commit:
        return f"No files found in the {assignment_id} folder in your repository.", 404

    if not previous_failed and str(last_commit) == str(head_commit):
        return "You haven't pushed any commits since your last test.", 300
    elif not previous_failed and str(last_commit) == str(commit):
        return f"None of your commits since your last test have affected {assignment_name}.", 300
    db.setCommit(student_id, ASSIGNMENTS[assignment_id]["canvas_id"], commit)

    github_link = f'https://github.iu.edu/csci-b351-sp19/{login}-submission/tree/{commit}/{assignment_name}'
    job = q.enqueue_call(
        func=runTest,
        args=(assignment_id,
              f'{repo_dir}/{assignment_name}/' if assignment_name != "a2" else
              f'{repo_dir}/{assignment_name}/code',
              f'{tests_dir}/{assignment_name}.html', github_link,
              GRADING_TOOLS_DIR),
        result_ttl=86400,
        timeout=600)
    if job:
        job_id = job.get_id()
        if not job.is_finished: job_position = q.count
        else: job_position = None
    else:
        return 'Error starting test.', 500

    db.setJob(student_id, ASSIGNMENTS[assignment_id]["canvas_id"], job_id)

    return jsonify({
        'file_exists': False,
        'job_id': job_id,
        'job_position': job_position,
        'test_commit': commit,
        'test_comment': commit_comment
    }), 200
Exemple #56
0
 def test_persistence_of_empty_jobs(self):  # noqa
     """Storing empty jobs."""
     job = Job()
     with self.assertRaises(ValueError):
         job.save()
Exemple #57
0
    def run(self,
            cli_args,
            target_func,
            single_thread=False,
            gpu_id=None,
            queue_ttl=-1,
            result_ttl=-1,
            log_level=logging.INFO,
            job_timeout=86400,
            poll=False,
            poll_interval=5,
            requeue=False,
            max_attempts=3):
        """
        Run an Experiment using Redis Queue
        :param cli_args: Arguments that override trial default arguments
        :param target_func: Function (in its own module) to run for each Trial
        :param single_thread: If True, run trials in current thread
        :param gpu_id: GPU to use in single thread mode
        :param queue_ttl: Maximum time (in seconds) a Trial may stay in queue
        :param log_level: Logging level (for messages)
        :param result_ttl: Maximum time (in seconds) for any results to be stored in queue
        :param job_timeout: Maximum time (in seconds) a job may run before being terminated
        :param poll: If True, wait for jobs to complete
        :param poll_interval: The time (in seconds) to sleep between checks when polling
        :param requeue: If True, re-queue previously completed jobs
        :param max_attempts: Maximum number of attempts to re-run failed jobs
        :return:
        """
        logging.getLogger().setLevel(log_level)

        if requeue is False:
            trial_status = self.get_trial_status()
            finished = sum(1 for status in trial_status.values()
                           if status == JobStatus.FINISHED)
            remaining = len(self.trials) - finished
            if remaining == 0:
                logging.info("Experiment completed!")
                return

        func_target = target_func
        func_module = os.path.abspath(target_func.__module__)
        func_description = f"{func_module}|{func_target.__name__}"

        for trial in self.trials:
            job_id = self.make_job_id(self.name, trial.name)
            runtime_args = trial.deepcopy_and_modify_args(cli_args)

            # Don't do any redis job tracking if running locally
            if single_thread:
                logging.info("Running job %s locally", job_id)
                # noinspection PyBroadException
                try:
                    if torch.cuda.is_available() and gpu_id is not None:
                        with torch.cuda.device(gpu_id):
                            _ = target_func(runtime_args)
                    else:
                        _ = target_func(runtime_args)
                    logging.info("Completed job %s", job_id)
                except KeyboardInterrupt:
                    logging.error('Caught interrupt, exiting')
                    exit(0)
                except Exception:  # super broad, but maybe reasonable in this case...
                    logging.error("Job %s failed", job_id, exc_info=True)
                continue

            job_meta = {"attempts": 0, "max_attempts": max_attempts}

            # Don"t queued jobs that are queued or running
            # TODO slack message with collective job submit status
            try:
                job = Job.fetch(job_id, connection=self.redis_conn)
                status = job.get_status()
                attempts = job.meta.get("attempts", 0)
                if attempts > 0:
                    job_meta["attempts"] = attempts
                if requeue and (status == JobStatus.FINISHED):
                    # Remove old job if it was previously completed
                    logging.info("Removing completed job %s", job_id)
                    job.delete()
                if status in (JobStatus.QUEUED, JobStatus.DEFERRED,
                              JobStatus.STARTED):
                    logging.info("Experiment %s: trial %s already queued",
                                 self.name, trial.name)
                    self.redis_conn.hset(self.name, trial.name, status)
                    # TODO what about "zombie" jobs? Can they exist? Check TTL?
                    continue
                elif status == JobStatus.FAILED:
                    if attempts < job_meta["max_attempts"]:
                        self.failed_queue.requeue(job_id)
                        logging.info(
                            "Job %s has failed on %s of %s attempts, re-queued",
                            job_id, attempts, job_meta["max_attempts"])
                        continue
                    else:
                        if requeue:
                            job.meta["attempts"] = 0
                            job.save_meta()
                            self.failed_queue.requeue(job_id)
                            logging.info("Re-queued previously failed job %s",
                                         job_id)
                        else:
                            logging.error(
                                "Job %s failed %s of %s times, skipping",
                                job_id, attempts, job_meta["max_attempts"])
                        continue

            except NoSuchJobError:
                pass

            _ = self.job_queue.enqueue(func_description,
                                       runtime_args,
                                       ttl=queue_ttl,
                                       result_ttl=result_ttl,
                                       timeout=job_timeout,
                                       job_id=job_id,
                                       meta=job_meta)
            self.redis_conn.hset(self.name, trial.name, JobStatus.QUEUED)
            logging.info("Queued job %s", job_id)

        if poll and (single_thread is False):
            self.wait_for_jobs(poll_interval)
def manage_run_results(job_id):
    return Job.fetch(job_id, connection=sjs.get_redis_conn()).result
Exemple #59
0
    def get_schedule(self, sensor: Sensor, job_id: str, duration: timedelta,
                     **kwargs):
        """Get a schedule from FlexMeasures.

        .. :quickref: Schedule; Download schedule from the platform

        **Optional fields**

        - "duration" (6 hours by default; can be increased to plan further into the future)

        **Example response**

        This message contains a schedule indicating to consume at various power
        rates from 10am UTC onwards for a duration of 45 minutes.

        .. sourcecode:: json

            {
                "values": [
                    2.15,
                    3,
                    2
                ],
                "start": "2015-06-02T10:00:00+00:00",
                "duration": "PT45M",
                "unit": "MW"
            }

        :reqheader Authorization: The authentication token
        :reqheader Content-Type: application/json
        :resheader Content-Type: application/json
        :status 200: PROCESSED
        :status 400: INVALID_TIMEZONE, INVALID_DOMAIN, INVALID_UNIT, UNKNOWN_SCHEDULE, UNRECOGNIZED_CONNECTION_GROUP
        :status 401: UNAUTHORIZED
        :status 403: INVALID_SENDER
        :status 405: INVALID_METHOD
        :status 422: UNPROCESSABLE_ENTITY
        """

        planning_horizon = min(  # type: ignore
            duration, current_app.config.get("FLEXMEASURES_PLANNING_HORIZON"))

        # Look up the scheduling job
        connection = current_app.queues["scheduling"].connection
        try:  # First try the scheduling queue
            job = Job.fetch(job_id, connection=connection)
        except NoSuchJobError:
            return unrecognized_event(job_id, "job")
        if job.is_finished:
            error_message = "A scheduling job has been processed with your job ID, but "
        elif job.is_failed:  # Try to inform the user on why the job failed
            e = job.meta.get(
                "exception",
                Exception(
                    "The job does not state why it failed. "
                    "The worker may be missing an exception handler, "
                    "or its exception handler is not storing the exception as job meta data."
                ),
            )
            return unknown_schedule(
                f"Scheduling job failed with {type(e).__name__}: {e}")
        elif job.is_started:
            return unknown_schedule("Scheduling job in progress.")
        elif job.is_queued:
            return unknown_schedule("Scheduling job waiting to be processed.")
        elif job.is_deferred:
            try:
                preferred_job = job.dependency
            except NoSuchJobError:
                return unknown_schedule(
                    "Scheduling job waiting for unknown job to be processed.")
            return unknown_schedule(
                f'Scheduling job waiting for {preferred_job.status} job "{preferred_job.id}" to be processed.'
            )
        else:
            return unknown_schedule("Scheduling job has an unknown status.")
        schedule_start = job.kwargs["start"]

        schedule_data_source_name = "Seita"
        scheduler_source = DataSource.query.filter_by(
            name="Seita", type="scheduling script").one_or_none()
        if scheduler_source is None:
            return unknown_schedule(
                error_message +
                f'no data is known from "{schedule_data_source_name}".')

        power_values = sensor.search_beliefs(
            event_starts_after=schedule_start,
            event_ends_before=schedule_start + planning_horizon,
            source=scheduler_source,
            most_recent_beliefs_only=True,
            one_deterministic_belief_per_event=True,
        )
        # For consumption schedules, positive values denote consumption. For the db, consumption is negative
        consumption_schedule = -simplify_index(power_values)["event_value"]
        if consumption_schedule.empty:
            return unknown_schedule(
                error_message + "the schedule was not found in the database.")

        # Update the planning window
        resolution = sensor.event_resolution
        start = consumption_schedule.index[0]
        duration = min(duration,
                       consumption_schedule.index[-1] + resolution - start)
        consumption_schedule = consumption_schedule[start:start + duration -
                                                    resolution]
        response = dict(
            values=consumption_schedule.tolist(),
            start=isodate.datetime_isoformat(start),
            duration=isodate.duration_isoformat(duration),
            unit=sensor.unit,
        )

        d, s = request_processed()
        return dict(**response, **d), s
Exemple #60
0
def check_status(id):
    try:
        job = Job.fetch(id, connection=redis)
        return job.get_status()
    except NoSuchJobError:
        return 'There was no such job'