Exemple #1
0
    def enqueue_job(self, job):
        """
        Move a scheduled job to a queue. In addition, it also does puts the job
        back into the scheduler if needed.
        """
        self.log.debug('Pushing {0} to {1}'.format(job.id, job.origin))

        interval = job.meta.get('interval', None)
        repeat = job.meta.get('repeat', None)

        # If job is a repeated job, decrement counter
        if repeat:
            job.meta['repeat'] = int(repeat) - 1
        job.enqueued_at = times.now()
        job.save()

        queue = self.get_queue_for_job(job)
        queue.push_job_id(job.id)
        self.connection.zrem(self.scheduled_jobs_key, job.id)

        if interval:
            # If this is a repeat job and counter has reached 0, don't repeat
            if repeat is not None:
                if job.meta['repeat'] == 0:
                    return
            self.connection._zadd(self.scheduled_jobs_key,
                                  times.to_unix(times.now()) + int(interval),
                                  job.id)
Exemple #2
0
    def perform_job(self, job, queue, heartbeat_ttl=None):
        """Performs the actual work of a job.  Will/should only be called
        inside the work horse's process.
        """
        self.prepare_job_execution(job, heartbeat_ttl)

        self.procline('Processing %s from %s since %s' % (
            job.func_name,
            job.origin, time.time()))

        try:
            job.started_at = times.now()

            # I have DISABLED the time limit!
            rv = job.perform()

            # Pickle the result in the same try-except block since we need to
            # use the same exc handling when pickling fails
            job._result = rv
            job._status = rq.job.JobStatus.FINISHED
            job.ended_at = times.now()

            #
            # Using the code from Worker.handle_job_success
            #
            with self.connection.pipeline() as pipeline:
                pipeline.watch(job.dependents_key)
                queue.enqueue_dependents(job, pipeline=pipeline)

                self.set_current_job_id(None, pipeline=pipeline)
                self.increment_successful_job_count(pipeline=pipeline)

                result_ttl = job.get_result_ttl(self.default_result_ttl)
                if result_ttl != 0:
                    job.save(pipeline=pipeline, include_meta=False)

                job.cleanup(result_ttl, pipeline=pipeline,
                            remove_from_queue=False)

                pipeline.execute()

        except:
            # Use the public setter here, to immediately update Redis
            job.status = rq.job.JobStatus.FAILED
            self.handle_exception(job, *sys.exc_info())
            return False

        if rv is None:
            self.log.info('Job OK')
        else:
            self.log.info('Job OK, result = %s' % (rq.worker.yellow(rq.compat.text_type(rv)),))

        if result_ttl == 0:
            self.log.info('Result discarded immediately.')
        elif result_ttl > 0:
            self.log.info('Result is kept for %d seconds.' % result_ttl)
        else:
            self.log.warning('Result will never expire, clean up result key manually.')

        return True
Exemple #3
0
    def enqueue_job(self, job):
        """
        Move a scheduled job to a queue. In addition, it also does puts the job
        back into the scheduler if needed.
        """
        self.log.debug('Pushing {0} to {1}'.format(job.id, job.origin))

        interval = job.meta.get('interval', None)
        repeat = job.meta.get('repeat', None)

        # If job is a repeated job, decrement counter
        if repeat:
            job.meta['repeat'] = int(repeat) - 1
        job.enqueued_at = times.now()
        job.save()

        queue = self.get_queue_for_job(job)
        queue.push_job_id(job.id)
        self.connection.zrem(self.scheduled_jobs_key, job.id)

        if interval:
            # If this is a repeat job and counter has reached 0, don't repeat
            if repeat is not None:
                if job.meta['repeat'] == 0:
                    return
            self.connection._zadd(self.scheduled_jobs_key,
                                  times.to_unix(times.now()) + int(interval),
                                  job.id)
Exemple #4
0
    def perform_job(self, job):
        """Performs the actual work of a job.  Will/should only be called
        inside the work horse's process.
        """
        self.procline('Processing %s from %s since %s' % (
            job.func_name,
            job.origin, time.time()))

        try:
            with death_penalty_after(job.timeout or Queue.DEFAULT_TIMEOUT):
                job.started_at = times.now()
                rv = job.perform()

            # Pickle the result in the same try-except block since we need to
            # use the same exc handling when pickling fails
            job._result = rv
            job._status = Status.FINISHED
            job.ended_at = times.now()

            keys = self.connection.hgetall(job._annotations)
            p = self.connection.pipeline()
            ingress = int(keys['ingress'])
            egress = int(keys['egress'])
            mean = float(keys['mean'])
            delta = job.ended_at - job.started_at
            delta = delta.seconds + (delta.microseconds / 1e6)
            mean = (mean + delta) / (max(1, ingress + egress))
            p.hincrby(job._annotations, 'ingress', amount=-1)
            p.hincrby(job._annotations, 'egress',  amount=1)
            p.hset(job._annotations, 'mean', mean)
            p.execute()

            result_ttl = job.get_ttl(self.default_result_ttl)
            pipeline = self.connection._pipeline()
            if result_ttl != 0:
                job.save(pipeline=pipeline)
            job.cleanup(result_ttl, pipeline=pipeline)
            pipeline.execute()

        except:
            # Use the public setter here, to immediately update Redis
            job.status = Status.FAILED
            self.handle_exception(job, *sys.exc_info())
            return False

        if rv is None:
            self.log.info('Job OK')
        else:
            self.log.info('Job OK, result = %s' % (yellow(text_type(rv)),))

        if result_ttl == 0:
            self.log.info('Result discarded immediately.')
        elif result_ttl > 0:
            self.log.info('Result is kept for %d seconds.' % result_ttl)
        else:
            self.log.warning('Result will never expire, clean up result key manually.')

        return True
Exemple #5
0
def create_board(request):
    bingo_board = _get_user_bingo_board(request)
    game = bingo_board.game if bingo_board is not None else None
    user = request.user

    if bingo_board:
        Game.objects.filter(id=bingo_board.game.id).update(
            last_used=times.now())
        return redirect(reverse(bingo, kwargs={
            'board_id': bingo_board.board_id}))
    elif request.POST:
        create_form = CreateForm(
            request.POST,
            prefix="create",
            game=game)

        if create_form.is_valid():
            with transaction.atomic():
                ip = request.META['REMOTE_ADDR']
                password = create_form.cleaned_data.get('password')
                game_description = create_form.cleaned_data.get(
                    'description', '')
                game = get_game(
                    site=get_current_site(request),
                    description=game_description,
                    create=True)
                Game.objects.filter(id=game.id).update(
                    last_used=times.now())

                # if the user is logged in, associate the board with
                # the user, and ignore the password
                # (so no other user can claim the board)
                user = user if user.is_authenticated() else None
                if user:
                    password = None

                bingo_board = BingoBoard(
                    game=game, user=user, ip=ip, password=password)
                bingo_board.save()

                if USE_SSE:
                    _publish_num_users(game.site.id, game.num_users(),
                                       game.num_active_users())

                return redirect(reverse(bingo, kwargs={
                    'board_id': bingo_board.board_id}))
        else:
            reclaim_form = ReclaimForm(prefix="reclaim")
            return render(
                request,
                "bingo/reclaim_board.html",
                {
                    'reclaim_form': reclaim_form,
                    'create_form': create_form,
                }
            )
    else:
        return redirect(reverse(main))
Exemple #6
0
 def test_clean_rq(self):
     r = get_redis_connection()
     self.assertEqual(len(r.keys("rq:job:*")), 0)
     r.hmset("rq:job:abc", {"bar": "baz"})
     r.hmset("rq:job:def", {"created_at": times.format(times.now(), "UTC")})
     r.hmset("rq:job:123", {"created_at": times.format(times.now() - timedelta(days=10), "UTC")})
     self.assertEqual(len(r.keys("rq:job:*")), 3)
     call_command("clean_rq")
     self.assertEqual(len(r.keys("rq:job:*")), 2)
Exemple #7
0
 def test_clean_rq(self):
     r = redis.Redis(**settings.REDIS)
     self.assertEqual(len(r.keys('rq:job:*')), 0)
     r.hmset('rq:job:abc', {'bar': 'baz'})
     r.hmset('rq:job:def', {'created_at': times.format(times.now(), 'UTC')})
     r.hmset('rq:job:123', {
         'created_at': times.format(
             times.now() - timedelta(days=10), 'UTC')})
     self.assertEqual(len(r.keys('rq:job:*')), 3)
     call_command('clean_rq')
     self.assertEqual(len(r.keys('rq:job:*')), 2)
Exemple #8
0
 def test_clean_rq(self):
     r = get_redis_connection()
     self.assertEqual(len(r.keys('rq:job:*')), 0)
     r.hmset('rq:job:abc', {'bar': 'baz'})
     r.hmset('rq:job:def', {'created_at': times.format(times.now(), 'UTC')})
     r.hmset('rq:job:123', {
         'created_at': times.format(
             times.now() - timedelta(days=10), 'UTC')})
     self.assertEqual(len(r.keys('rq:job:*')), 3)
     call_command('clean_rq')
     self.assertEqual(len(r.keys('rq:job:*')), 2)
Exemple #9
0
def create_board(request):
    bingo_board = _get_user_bingo_board(request)
    game = bingo_board.game if bingo_board is not None else None
    user = request.user

    if bingo_board:
        Game.objects.filter(id=bingo_board.game.id).update(
            last_used=times.now())
        return redirect(
            reverse(bingo, kwargs={'board_id': bingo_board.board_id}))
    elif request.POST:
        create_form = CreateForm(request.POST, prefix="create", game=game)

        if create_form.is_valid():
            with transaction.atomic():
                ip = request.META['REMOTE_ADDR']
                password = create_form.cleaned_data.get('password')
                game_description = create_form.cleaned_data.get(
                    'description', '')
                game = get_game(site=get_current_site(request),
                                description=game_description,
                                create=True)
                Game.objects.filter(id=game.id).update(last_used=times.now())

                # if the user is logged in, associate the board with
                # the user, and ignore the password
                # (so no other user can claim the board)
                user = user if user.is_authenticated() else None
                if user:
                    password = None

                bingo_board = BingoBoard(game=game,
                                         user=user,
                                         ip=ip,
                                         password=password)
                bingo_board.save()

                if USE_SSE:
                    _publish_num_users(game.site.id, game.num_users(),
                                       game.num_active_users())

                return redirect(
                    reverse(bingo, kwargs={'board_id': bingo_board.board_id}))
        else:
            reclaim_form = ReclaimForm(prefix="reclaim")
            return render(request, "bingo/reclaim_board.html", {
                'reclaim_form': reclaim_form,
                'create_form': create_form,
            })
    else:
        return redirect(reverse(main))
    def test_job_with_repeat(self):
        """
        Ensure jobs with repeat attribute are put back in the scheduler
        X (repeat) number of times
        """
        time_now = times.now()
        interval = 10
        # If job is repeated once, the job shouldn't be put back in the queue
        job = self.scheduler.schedule(time_now,
                                      say_hello,
                                      interval=interval,
                                      repeat=1)
        self.scheduler.enqueue_job(job)
        self.assertNotIn(
            job.id,
            self.testconn.zrange(self.scheduler.scheduled_jobs_key, 0, 1))

        # If job is repeated twice, it should only be put back in the queue once
        job = self.scheduler.schedule(time_now,
                                      say_hello,
                                      interval=interval,
                                      repeat=2)
        self.scheduler.enqueue_job(job)
        self.assertIn(
            job.id,
            self.testconn.zrange(self.scheduler.scheduled_jobs_key, 0, 1))
        self.scheduler.enqueue_job(job)
        self.assertNotIn(
            job.id,
            self.testconn.zrange(self.scheduler.scheduled_jobs_key, 0, 1))

        time_now = times.now()
        # Now the same thing using enqueue_periodic
        job = self.scheduler.enqueue_periodic(time_now, interval, 1, say_hello)
        self.scheduler.enqueue_job(job)
        self.assertNotIn(
            job.id,
            self.testconn.zrange(self.scheduler.scheduled_jobs_key, 0, 1))

        # If job is repeated twice, it should only be put back in the queue once
        job = self.scheduler.enqueue_periodic(time_now, interval, 2, say_hello)
        self.scheduler.enqueue_job(job)
        self.assertIn(
            job.id,
            self.testconn.zrange(self.scheduler.scheduled_jobs_key, 0, 1))
        self.scheduler.enqueue_job(job)
        self.assertNotIn(
            job.id,
            self.testconn.zrange(self.scheduler.scheduled_jobs_key, 0, 1))
    def test_job_with_intervals_get_rescheduled(self):
        """
        Ensure jobs with interval attribute are put back in the scheduler
        """
        time_now = times.now()
        interval = 10
        job = self.scheduler.schedule(time_now, say_hello, interval=interval)
        self.scheduler.enqueue_job(job)
        self.assertIn(
            job.id,
            self.testconn.zrange(self.scheduler.scheduled_jobs_key, 0, 1))
        self.assertEqual(
            self.testconn.zscore(self.scheduler.scheduled_jobs_key, job.id),
            times.to_unix(time_now) + interval)

        # Now the same thing using enqueue_periodic
        job = self.scheduler.enqueue_periodic(time_now, interval, None,
                                              say_hello)
        self.scheduler.enqueue_job(job)
        self.assertIn(
            job.id,
            self.testconn.zrange(self.scheduler.scheduled_jobs_key, 0, 1))
        self.assertEqual(
            self.testconn.zscore(self.scheduler.scheduled_jobs_key, job.id),
            times.to_unix(time_now) + interval)
Exemple #12
0
 def events_current(self):
     now = times.now()
     return self.events\
         .filter(Event.starts_at < now)\
         .filter(Event.ends_at > now)\
         .filter(Event.cancelled_at == None)\
         .order_by(Event.starts_at, Event.name)
Exemple #13
0
def date_cs(value):
    """Parses a Czech text to a date object."""
    value = clean_whitespace(value)
    match = re.search(r'(\d+)\s*\.?\s*(\w+)(\s+\d{2,4})?', value, re.U)
    if match:
        # day
        day = int(match.group(1))

        # month
        try:
            month = int(match.group(2))
        except ValueError:
            months = (
                u'leden', u'únor', u'březen', u'duben',
                u'květen', u'červen', u'červenec', u'srpen',
                u'září', u'říjen', u'listopad', u'prosinec',
                u'ledna', u'února', u'března', u'dubna',
                u'května', u'června', u'července', u'srpna',
                u'září', u'října', u'listopadu', u'prosince',
            )
            month = (months.index(match.group(2)) % 12) + 1

        # year
        if not match.group(3):
            year = times.now().year
        elif len(match.group(3)) == 2:
            year = 2000 + int(match.group(3))
        else:
            year = int(match.group(3))

        return datetime.date(year, month, day)
    return None
Exemple #14
0
 def test_periodic_jobs_sets_ttl(self):
     """
     Ensure periodic jobs set result_ttl to infinite.
     """
     job = self.scheduler.schedule(times.now(), say_hello, interval=5)
     job_from_queue = Job.fetch(job.id, connection=self.testconn)
     self.assertEqual(job.result_ttl, -1)
Exemple #15
0
def reclaim_board(request):
    ip = request.META['REMOTE_ADDR']
    game = get_game(site=get_current_site(request), create=False)
    if game is not None:
        Game.objects.filter(id=game.id).update(last_used=times.now())
    bingo_board = _get_user_bingo_board(request)

    if not bingo_board is None:
        return redirect(
            reverse(bingo, kwargs={'board_id': bingo_board.board_id}))
    if request.POST:
        reclaim_form = ReclaimForm(request.POST, game=game, prefix="reclaim")
        if reclaim_form.is_valid():
            bingo_board = reclaim_form.cleaned_data['bingo_board']
            request.session['board_id'] = bingo_board.id
            bingo_board.ip = ip
            bingo_board.save()
            return redirect(
                reverse(bingo, kwargs={'board_id': bingo_board.board_id}))
    else:
        reclaim_form = ReclaimForm(prefix="reclaim")
    create_form = CreateForm(prefix="create", game=game)
    return render(request, "bingo/reclaim_board.html", {
        'reclaim_form': reclaim_form,
        'create_form': create_form,
    })
Exemple #16
0
    def save(self, pipeline=None):
        """Persists the current job instance to its corresponding Redis key."""
        key = self.key
        connection = pipeline if pipeline is not None else self.connection

        obj = {}
        obj['created_at'] = times.format(self.created_at or times.now(), 'UTC')

        if self.func_name is not None:
            obj['data'] = dumps(self.job_tuple)
        if self.origin is not None:
            obj['origin'] = self.origin
        if self.description is not None:
            obj['description'] = self.description
        if self.enqueued_at is not None:
            obj['enqueued_at'] = times.format(self.enqueued_at, 'UTC')
        if self.ended_at is not None:
            obj['ended_at'] = times.format(self.ended_at, 'UTC')
        if self._result is not None:
            obj['result'] = dumps(self._result)
        if self.exc_info is not None:
            obj['exc_info'] = self.exc_info
        if self.timeout is not None:
            obj['timeout'] = self.timeout
        if self.result_ttl is not None:
            obj['result_ttl'] = self.result_ttl
        if self._status is not None:
            obj['status'] = self._status
        if self.meta:
            obj['meta'] = dumps(self.meta)

        connection.hmset(key, obj)
Exemple #17
0
    def run(self):
        # delete old showtimes
        query = Showtime.objects.filter(starts_at__lt=times.now())
        count = query.count()
        query.delete()
        log.info('Cleanup: deleted %d old showtimes.', count)

        # delete redundant showtimes
        count = 0
        for showtime in Showtime.objects.all():
            with log.pass_on_exception():
                duplicates = Showtime.objects.filter(
                    id__ne=showtime.id,
                    cinema=showtime.cinema,
                    starts_at=showtime.starts_at,
                    film_scraped__title_main=showtime.film_scraped.title_main
                )
                count += duplicates.count()
                duplicates.delete()
        log.info('Cleanup: deleted %d redundant showtimes.', count)

        # delete redundant films
        for film in Film.objects.all():
            with log.pass_on_exception():
                if not film.showtimes.count():
                    log.info('Cleanup: deleting redundant film %s.', film)
                    film.delete()
Exemple #18
0
    def enqueue_job(self, job, set_meta_data=True):
        """Enqueues a job for delayed execution.

        If the `set_meta_data` argument is `True` (default), it will update
        the properties `origin` and `enqueued_at`.

        If Queue is instantiated with async=False, job is executed immediately.
        """
        # Add Queue key set
        self.connection.sadd(self.redis_queues_keys, self.key)

        if set_meta_data:
            job.origin = self.name
            job.enqueued_at = times.now()

        if job.timeout is None:
            job.timeout = self.DEFAULT_TIMEOUT
        job.save()

        if self._async:
            self.push_job_id(job.id)
        else:
            job.perform()
            job.save()
        return job
Exemple #19
0
    def run(self):
        # delete old showtimes
        query = Showtime.objects.filter(starts_at__lt=times.now())
        count = query.count()
        query.delete()
        log.info('Cleanup: deleted %d old showtimes.', count)

        # delete redundant showtimes
        count = 0
        for showtime in Showtime.objects.all():
            with log.pass_on_exception():
                duplicates = Showtime.objects.filter(
                    id__ne=showtime.id,
                    cinema=showtime.cinema,
                    starts_at=showtime.starts_at,
                    film_scraped__title_main=showtime.film_scraped.title_main)
                count += duplicates.count()
                duplicates.delete()
        log.info('Cleanup: deleted %d redundant showtimes.', count)

        # delete redundant films
        for film in Film.objects.all():
            with log.pass_on_exception():
                if not film.showtimes.count():
                    log.info('Cleanup: deleting redundant film %s.', film)
                    film.delete()
Exemple #20
0
def create_event():
    form = EventForm()

    if form.validate_on_submit():
        event = Event()
        with db.transaction as session:
            event.name = form.name.data
            event.venue = form.venue.data
            event.description = form.description.data
            event.user = current_user
            event.starts_at = times.to_universal(form.starts_at.data, current_user.timezone)
            session.add(event)
        with db.transaction:
            event.contacts_invited_ids_str = form.contacts_invited_ids_str.data
        send_email_invites(event)
        return redirect(url_for('facebook_event', id=event.id))

    else:
        # default starts_at
        td = datetime.timedelta(days=1)
        dt = times.to_local(times.now(), current_user.timezone) + td
        dt = datetime.datetime.combine(dt.date(), datetime.time(20, 00, 00))
        form.starts_at.data = dt

    return render_template('create_event.html', form=form)
Exemple #21
0
    def reload(self, settings_file=None):
        if settings_file is None:
            if hasattr(self,
                       'SETTINGS_FILE') and self.SETTINGS_FILE is not None:
                # First check if SETTINGS_FILE has been defined. If so, we'll reload from that file.
                settings_file = self.SETTINGS_FILE
            else:
                # Looks like we're just loading the 'empty' config.
                logger.info("Initializing empty configuration.")
                self.SETTINGS_FILE = None
                self._initialize({})
                return

        if path(settings_file).exists() and path(settings_file).isfile():
            self.SETTINGS_FILE = settings_file = path(settings_file).abspath()
            logger.console("Loading configuration from %s." %
                           path(settings_file).abspath())
            # Find the complete set of settings files based on inheritance
            all_configs = []
            config = {}
            try:
                while True:
                    with open(settings_file, mode='rb') as file:
                        temp_config = yaml.load(file)
                        logger.debug("Loaded %s file." % settings_file)
                    all_configs.append((temp_config, settings_file))
                    if 'SUPER' not in temp_config:
                        break
                    else:
                        new_settings = path(temp_config['SUPER'])
                        if not new_settings.isabs():
                            settings_file = (settings_file.dirname() /
                                             new_settings).abspath()
                        logger.debug("Going to next settings file... %s" %
                                     path(temp_config['SUPER']).abspath())
            except Exception as e:
                logger.exception(e.message)

            # load parent configs
            all_configs.reverse()
            for c in all_configs[:-1]:
                logger.debug("Loading parent configuration from %s." %
                             path(c[1]).abspath())
                update_additive(config, c[0])

            # load main config
            logger.debug("Finalizing configuration from %s." %
                         path(all_configs[-1][1]).abspath())
            update_additive(config, all_configs[-1][0])

            for param in self._required_params:
                if param not in config:
                    raise Exception(
                        "Required setting '%s' is missing from config file %s."
                        % (param, self.SETTINGS_FILE))
            self._initialize(config)
            self.SETTINGS_FILE_LOAD_TIME = times.now()
        else:
            raise SettingsFileNotFoundException("Settings file %s not found!" %
                                                settings_file)
 def test_periodic_jobs_sets_ttl(self):
     """
     Ensure periodic jobs set result_ttl to infinite.
     """
     job = self.scheduler.schedule(times.now(), say_hello, interval=5)
     job_from_queue = Job.fetch(job.id, connection=self.testconn)
     self.assertEqual(job.result_ttl, -1)
Exemple #23
0
    def enqueue_job(self, job, timeout=None, set_meta_data=True):
        """Enqueues a job for delayed execution.

        If the `set_meta_data` argument is `True` (default), it will update
        the properties `origin` and `enqueued_at`.

        If Queue is instantiated with async=False, job is executed immediately.
        """
        if set_meta_data:
            job.origin = self.name
            job.enqueued_at = times.now()

        job.timeout = timeout  # _timeout_in_seconds(timeout)
        job.save()

        if self._async:
            p = self.connection.pipeline()
            p.rpush(self.key, job.id)
            p.rpush(self.key + ":blk", True)
            p.incr(self.enqueues_count_key)
            _, _, num_enqueued_jobs = p.execute()

            job.num_enqueued_jobs = num_enqueued_jobs
        else:
            job.perform()
            job.save()
        return job
Exemple #24
0
 def events_current(self):
     now = times.now()
     return self.events\
         .filter(Event.starts_at < now)\
         .filter(Event.ends_at > now)\
         .filter(Event.cancelled_at == None)\
         .order_by(Event.starts_at, Event.name)
Exemple #25
0
 def count(cls, pk, dimensions='_', metrics=None, at=False):
     metrics = metrics or {}
     metrics = isinstance(metrics, list) and dict([(k, 1) for k in metrics
                                                   ]) or metrics
     try:
         r = cls.hail_driver()
         if not r:
             return 0
         set_number_name = 'hail_number'
         set_number = r.get(set_number_name) or 0
         if not set_number:
             set_number = r.set(set_number_name, 0)
         at = at or times.now()
         if isinstance(at, str):
             try:
                 at = datetime.datetime.fromtimestamp(float(at))
             except ValueError:
                 pass  # if at is not a float, ignore this
         if isinstance(at, datetime.datetime):
             at = at.ctime()
         if isinstance(pk, cls):
             pk = pk.getattr(cls.unique_key)
         hit_key = '%s_%s_%s_%s' % (cls.__name__, pk, at,
                                    random.randint(1, 1000))
         r.sadd('hail_%s' % (set_number), hit_key)
         r.set(hit_key,
               json.dumps((cls.__name__, pk, dimensions, metrics, at)))
     except Exception as e:
         return '%s' % e
     return 'OK'
Exemple #26
0
    def enqueue_job(self, job, set_meta_data=True):
        """Enqueues a job for delayed execution.

        If the `set_meta_data` argument is `True` (default), it will update
        the properties `origin` and `enqueued_at`.

        If Queue is instantiated with async=False, job is executed immediately.
        """
        # Add Queue key set
        self.connection.sadd(self.redis_queues_keys, self.key)

        if set_meta_data:
            job.origin = self.name
            job.enqueued_at = times.now()

        if job.timeout is None:
            job.timeout = self.DEFAULT_TIMEOUT
        job.save()

        if self._async:
            self.push_job_id(job.id)
        else:
            job.perform()
            job.save()
        return job
Exemple #27
0
def reclaim_board(request):
    ip = request.META['REMOTE_ADDR']
    game = get_game(site=get_current_site(request), create=False)
    if game is not None:
        Game.objects.filter(id=game.id).update(last_used=times.now())
    bingo_board = _get_user_bingo_board(request)

    if not bingo_board is None:
        return redirect(reverse(bingo, kwargs={
            'board_id': bingo_board.board_id}))
    if request.POST:
        reclaim_form = ReclaimForm(request.POST, game=game, prefix="reclaim")
        if reclaim_form.is_valid():
            bingo_board = reclaim_form.cleaned_data['bingo_board']
            request.session['board_id'] = bingo_board.id
            bingo_board.ip = ip
            bingo_board.save()
            return redirect(reverse(bingo, kwargs={'board_id':
                                                   bingo_board.board_id}))
    else:
        reclaim_form = ReclaimForm(prefix="reclaim")
    create_form = CreateForm(prefix="create", game=game)
    return render(request,
                  "bingo/reclaim_board.html", {
                      'reclaim_form': reclaim_form,
                      'create_form': create_form,
                  })
Exemple #28
0
    def status(self, request):
        """Shows current timer's status."""
        identity = self.bot.get_plugin('identity').get_identity_by_request(
            request)
        pomodoro = self.pomodoros.get(identity.id)

        if pomodoro is None:
            request.respond('There is no active pomodoro')
        else:
            now = times.now()
            worked = pomodoro['due'] - now
            mins = max(0, worked.seconds / 60)
            secs = max(0, worked.seconds % 60)

            if worked.days < 0:
                request.respond('You pomodoro almost finished.')
            else:
                if mins > 0:
                    request.respond(
                        '{0} minutes till the end of the current pomodoro.'.
                        format(mins))
                else:
                    request.respond(
                        '{0} seconds till the end of the current pomodoro.'.
                        format(secs))
Exemple #29
0
    def callback_perform_job(self, result):
        rv, job = result
        if isinstance(rv, defer.Deferred):
            rv = yield rv
        pickled_rv = dumps(rv)
        job._status = Status.FINISHED
        job.ended_at = times.now()

        if LOGGING_OK_JOBS:
            meta = ','.join([item for item in job.meta.values()])
            if rv is None:
                self.log.msg('[%s] Job OK. %s' % (meta, job))
            else:
                self.log.msg('[%s] Job OK. %s. result = %r' % (meta, job, rv))

        result_ttl = self.default_result_ttl if job.result_ttl is None else job.result_ttl
        if result_ttl == 0:
            yield job.delete()
            #self.log.msg('Result discarded immediately.')
        else:
            yield self.connection.hset(job.key, 'result', pickled_rv)
            yield self.connection.hset(job.key, 'ended_at',
                                       times.format(job.ended_at, 'UTC'))
            yield self.connection.hset(job.key, 'status', job._status)
            if result_ttl > 0:
                yield self.connection.expire(job.key, result_ttl)
Exemple #30
0
def update_user(login):
    g.db = core.get_db()
    now = times.now()

    stats_to_save = ('followers', 'following', 'disk_usage', 'public_repos')

    user = g.db.users.find_one({'login': login})

    gh = net.GitHub(token=user['gitorama']['token'])

    # update user's data
    new_user_data = gh.get('/user')
    user.update(new_user_data)
    user['gitorama']['update_at'] = now + app.config['USER_UPDATE_INTERVAL']
    g.db.users.save(user)

    # update users's repositories
    repositories = gh.get('/user/repos')

    for rep in repositories:
        rep_from_db = g.db.user_reps.find_one(
            {
                'owner.login': rep['owner']['login'],
                'name': rep['name']
            }) or {}
        rep_from_db.update(rep)
        g.db.user_reps.save(rep_from_db)

    today = datetime.datetime(now.year, now.month, now.day)
    key = dict(login=user['login'], date=today)
    stats = g.db.user_stats.find_one(key) or key

    stats.update((key, value) for key, value in user.iteritems()
                 if key in stats_to_save)
    g.db.user_stats.save(stats)
Exemple #31
0
def date_time_year(date, time, year=None, tz='Europe/Prague'):
    """Parses strings representing parts of datetime and combines them
    together. Resulting datetime is in UTC.
    """
    dt_string = u'{date} {time} {year}'.format(
        date=date,
        time=time,
        year=year or times.now().year,
    )
    possible_formats = (
        '%d. %m. %H:%M %Y',
        '%d. %m. %H.%M %Y',
    )
    dt = None
    for format in possible_formats:
        try:
            dt = datetime.datetime.strptime(dt_string, format)
        except ValueError:
            pass
        else:
            break
    if dt:
        return times.to_universal(dt, tz)
    else:
        raise ValueError(dt_string)
Exemple #32
0
 def count(cls, pk, dimensions='_', metrics=None, at=False):
     metrics = metrics or {}
     metrics = isinstance(metrics, list) and dict([(k, 1) for k in metrics]) or metrics
     try:
         r = cls.hail_driver()
         if not r:
             return 0
         set_number_name = 'hail_number'
         set_number = r.get(set_number_name) or 0
         if not set_number:
             set_number = r.set(set_number_name, 0)
         at = at or times.now()
         if isinstance(at, str):
             try:
                 at = datetime.datetime.fromtimestamp(float(at))
             except ValueError:
                 pass  # if at is not a float, ignore this
         if isinstance(at, datetime.datetime):
             at = at.ctime()
         if isinstance(pk, cls):
             pk = pk.getattr(cls.unique_key)
         hit_key = '%s_%s_%s_%s' % (
                 cls.__name__, pk, at, random.randint(1, 1000))
         r.sadd('hail_%s' % (set_number), hit_key)
         r.set(hit_key, json.dumps(
             (cls.__name__, pk, dimensions, metrics, at)))
     except Exception as e:
         return '%s' % e
     return 'OK'
Exemple #33
0
    def get_jobs(self, until=None, with_times=False):
        """
        Returns a list of job instances that will be queued until the given time.
        If no 'until' argument is given all jobs are returned. This function
        accepts datetime and timedelta instances as well as integers representing
        epoch values.
        If with_times is True a list of tuples consisting of the job instance and
        it's scheduled execution time is returned.
        """
        def epoch_to_datetime(epoch):
            return datetime.fromtimestamp(float(epoch))

        if until is None:
            until = "+inf"
        elif isinstance(until, datetime):
            until = times.to_unix(until)
        elif isinstance(until, timedelta):
            until = times.to_unix((times.now() + until))
        job_ids = self.connection.zrangebyscore(self.scheduled_jobs_key, 0,
                                                until, withscores=with_times,
                                                score_cast_func=epoch_to_datetime)
        if not with_times:
            job_ids = zip(job_ids, repeat(None))
        jobs = []
        for job_id, sched_time in job_ids:
            try:
                job = Job.fetch(job_id, connection=self.connection)
                if with_times:
                    jobs.append((job, sched_time))
                else:
                    jobs.append(job)
            except NoSuchJobError:
                # Delete jobs that aren't there from scheduler
                self.cancel(job_id)
        return jobs
Exemple #34
0
    def enqueue_job(self,
                    job,
                    timeout=None,
                    set_meta_data=True,
                    timestamp=None):
        """Enqueues a job for delayed execution.

        When the `timeout` argument is sent, it will overrides the default
        timeout value of 180 seconds.  `timeout` may either be a string or
        integer.

        If the `set_meta_data` argument is `True` (default), it will update
        the properties `origin` and `enqueued_at`.

        If Queue is instantiated with async=False, job is executed immediately.
        """
        if set_meta_data:
            job.origin = self.name
            job.enqueued_at = times.now()

        if timeout:
            job.timeout = timeout  # _timeout_in_seconds(timeout)
        else:
            job.timeout = 180  # default

        if not timestamp:
            timestamp = self._default_timestamp

        if self._async:
            job.save()
            self.push_job_id(job.id, timestamp)
        else:
            job.perform()
            job.save()
        return job
Exemple #35
0
    def get_days(cls, period, at=None, tzoffset=None):
        ats = False
        at = at or times.now()
        period = str(period)
        if '|' in period:
            period, tzoffset = period.split('|')
        if period == 'ytd':
            period = 'year'
            period = cls.get(period)
            start = convert(at, tzoffset).replace(month=1,
                    day=1,hour=0,minute=0,second=0, microsecond=0)
            ats = period.datetimes_strs(start=start, tzoffset=tzoffset)
        if period == 'mtd':
            period = 'thirty'
            period = cls.get(period)
            start = convert(at, tzoffset).replace(day=1, hour=0,
                    minute=0, second=0, microsecond=0)
            ats = period.datetimes_strs(start=start, tzoffset=tzoffset)
        if period == 'wtd':
            period = 'thirty'
            period = cls.get(period)
            start = convert(at, tzoffset).replace(hour=0, minute=0,
                    second=0, microsecond=0)
            start = start - timedelta(start.weekday() + 2)
            ats = period.datetimes_strs(start=start, tzoffset=tzoffset)
        if period in ['today', 'hours']:
            period = 'thirty'
            period = cls.get(period)
            start = convert(at, tzoffset).replace(hour=0, minute=0,
                    second=0, microsecond=0)
            ats = period.datetimes_strs(start=start, tzoffset=tzoffset)
        if period == 'yesterday':
            period = 'thirty'
            period = cls.get(period)
            end = convert(at, tzoffset).replace(hour=0, minute=0,
                    second=0, microsecond=0)
            start = end - timedelta(1)
            end = end - timedelta(seconds=1)
            ats = period.datetimes_strs(start=start, end=end, tzoffset=tzoffset)
        if period == 'seven':
            period = 'thirty'
            period = cls.get(period)
            start = convert(at, tzoffset).replace(hour=0, minute=0,
                    second=0, microsecond=0) - timedelta(7)
            ats = period.datetimes_strs(start=start, tzoffset=tzoffset)
        if '-' in str(period):
            start_s, end_s = period.split('-')
            period = 'thirty'
            period = cls.get(period)
            end = datetime.strptime(end_s, '%m/%d/%Y').replace(hour=0, minute=0,
                    second=0, microsecond=0)+timedelta(1)-timedelta(seconds=1)
            start = datetime.strptime(start_s, '%m/%d/%Y').replace(hour=0, minute=0, second=0, microsecond=0)
            ats = period.datetimes_strs(start=start, end=end, tzoffset=tzoffset)

        period = cls.get(period)
        if not ats and not at:
            ats = period.datetimes_strs(tzoffset=tzoffset)
        elif not ats:
            ats = [period.flatten_str(convert(at, tzoffset))]
        return period, list(ats), tzoffset
Exemple #36
0
    def handle_exception(self, job, *exc_info):
        """Walks the exception handler stack to delegate exception handling."""
        exc_string = ''.join(
                traceback.format_exception_only(*exc_info[:2]) +
                traceback.format_exception(*exc_info))
        self.log.error(exc_string)

        result_ttl =  self.default_result_ttl if job.result_ttl is None else job.result_ttl
        job.ended_at = times.now()
        job.exc_info = exc_string
        job._status = Status.FAILED
        p = self.connection.pipeline()
        p.hset(job.key, 'ended_at', job.ended_at)
        p.hset(job.key, 'exc_info', job.exc_info)
        p.hset(job.key, 'status', job._status)
        if result_ttl > 0:
            p.expire(job.key, result_ttl)
            self.log.debug('Result for job %s is kept for %d seconds.' % (job.id, result_ttl))
        else:
            self.log.warning('Result for job %s will never expire, clean up result key manually.' % (job.id, ))
        p.execute()

        for handler in reversed(self._exc_handlers):
            self.log.debug('Invoking exception handler %s' % (handler,))
            fallthrough = handler(job, *exc_info)

            # Only handlers with explicit return values should disable further
            # exc handling, so interpret a None return value as True.
            if fallthrough is None:
                fallthrough = True

            if not fallthrough:
                break
Exemple #37
0
    def dump(self):
        """Returns a serialization of the current job instance"""
        obj = {}
        obj['created_at'] = times.format(self.created_at or times.now(), 'UTC')

        if self.func_name is not None:
            obj['data'] = dumps(self.job_tuple)
        if self.origin is not None:
            obj['origin'] = self.origin
        if self.description is not None:
            obj['description'] = self.description
        if self.enqueued_at is not None:
            obj['enqueued_at'] = times.format(self.enqueued_at, 'UTC')
        if self.ended_at is not None:
            obj['ended_at'] = times.format(self.ended_at, 'UTC')
        if self._result is not None:
            obj['result'] = dumps(self._result)
        if self.exc_info is not None:
            obj['exc_info'] = self.exc_info
        if self.timeout is not None:
            obj['timeout'] = self.timeout
        if self.result_ttl is not None:
            obj['result_ttl'] = self.result_ttl
        if self._status is not None:
            obj['status'] = self._status
        if self._dependency_id is not None:
            obj['dependency_id'] = self._dependency_id
        if self.meta:
            obj['meta'] = dumps(self.meta)

        return obj
Exemple #38
0
    def dump(self):
        """Returns a serialization of the current job instance"""
        obj = {}
        obj['created_at'] = times.format(self.created_at or times.now(), 'UTC')

        if self.func_name is not None:
            obj['data'] = dumps(self.job_tuple)
        if self.origin is not None:
            obj['origin'] = self.origin
        if self.description is not None:
            obj['description'] = self.description
        if self.enqueued_at is not None:
            obj['enqueued_at'] = times.format(self.enqueued_at, 'UTC')
        if self.ended_at is not None:
            obj['ended_at'] = times.format(self.ended_at, 'UTC')
        if self._result is not None:
            obj['result'] = dumps(self._result)
        if self.exc_info is not None:
            obj['exc_info'] = self.exc_info
        if self.timeout is not None:
            obj['timeout'] = self.timeout
        if self.result_ttl is not None:
            obj['result_ttl'] = self.result_ttl
        if self._status is not None:
            obj['status'] = self._status
        if self._dependency_id is not None:
            obj['dependency_id'] = self._dependency_id
        if self.meta:
            obj['meta'] = dumps(self.meta)

        return obj
Exemple #39
0
def get_logins_to_build_digests_for(db):
    logins = set(u['_id'] for u in db.daily_digests.find(
        {
            '$or': [
                {
                    'update_at': {
                        '$exists': False
                    }
                },
                {
                    'update_at': {
                        '$lte': times.now()
                    }
                },
            ],
        }, {'_id': True}))

    all_daily_digest_logins = set(
        u['_id'] for u in db.daily_digests.find({}, {'_id': True}))
    all_known_logins = set(
        u['login']
        for u in db.users.find({'gitorama.token': {
            '$exists': True
        }}, {'login': True}))
    logins.update(all_known_logins - all_daily_digest_logins)
    return logins
Exemple #40
0
 def datetimes(self, start=False, end=False, tzoffset=None):
     from dateutil import rrule
     from util import datetimeIterator
     in_range = lambda dt: (not start or start <= dt) and (not end or end >=
                                                           dt)
     use_start = start or self.start(end)
     use_end = end or convert(times.now(), tzoffset)
     interval, length = self.getUnits()
     if interval >= 3600 * 24 * 30:
         rule = rrule.MONTHLY
         step = interval / (3600 * 24 * 30)
     elif interval >= 3600 * 24 * 7:
         rule = rrule.WEEKLY
         step = interval / (3600 * 24 * 7)
     elif interval >= 3600 * 24:
         rule = rrule.DAILY
         step = interval / (3600 * 24)
     elif interval >= 3600:
         rule = rrule.HOURLY
         step = interval / 3600
     elif interval >= 60:
         rule = rrule.MINUTELY
         step = interval / 60
     else:
         rule = rrule.SECONDLY
         step = interval
     dts = rrule.rrule(rule,
                       dtstart=use_start,
                       until=use_end,
                       interval=step)
     return dts
Exemple #41
0
 def today(cls, queryset):
     today = times.now().date()
     return (
         queryset.filter(starts_at__gt=today - timedelta(days=1))
                 .filter(starts_at__lt=today + timedelta(days=1))
                 .order_by('starts_at')
     )
Exemple #42
0
Fichier : job.py Projet : hungld/rq
    def save(self):
        """Persists the current job instance to its corresponding Redis key."""
        key = self.key

        obj = {}
        obj['created_at'] = times.format(self.created_at or times.now(), 'UTC')

        if self.func_name is not None:
            obj['data'] = dumps(self.job_tuple)
        if self.origin is not None:
            obj['origin'] = self.origin
        if self.description is not None:
            obj['description'] = self.description
        if self.enqueued_at is not None:
            obj['enqueued_at'] = times.format(self.enqueued_at, 'UTC')
        if self.ended_at is not None:
            obj['ended_at'] = times.format(self.ended_at, 'UTC')
        if self._result is not None:
            obj['result'] = dumps(self._result)
        if self.exc_info is not None:
            obj['exc_info'] = self.exc_info
        if self.timeout is not None:
            obj['timeout'] = self.timeout
        if self.result_ttl is not None:
            obj['result_ttl'] = self.result_ttl
        if self._status is not None:
            obj['status'] = self._status
        if self.meta:
            obj['meta'] = dumps(self.meta)

        self.connection.hmset(key, obj)
Exemple #43
0
class Showtime(db.Document):

    meta = {'ordering': ['-starts_at']}

    cinema = db.ReferenceField(Cinema, dbref=False, required=True)
    film_paired = db.ReferenceField(Film, dbref=False)
    film_scraped = db.EmbeddedDocumentField(ScrapedFilm, required=True)
    starts_at = db.DateTimeField(required=True)
    tags = db.ListField(db.StringField())  # dubbing, 3D, etc.
    url_booking = db.URLField()
    price = db.DecimalField()
    prices = db.MapField(db.DecimalField())
    scraped_at = db.DateTimeField(required=True, default=lambda: times.now())

    @property
    def starts_at_day(self):
        return self.starts_at.date()

    @property
    def film(self):
        return self.film_paired or self.film_scraped

    @db.queryset_manager
    def upcoming(cls, queryset):
        now = times.now()
        week_later = now + timedelta(days=7)
        return (queryset.filter(starts_at__gte=now).filter(
            starts_at__lte=week_later).order_by('starts_at'))

    def clean(self):
        self.tags = tuple(frozenset(tag for tag in self.tags if tag))
Exemple #44
0
 def start(self, at):
     at = at or times.now()
     interval, length = self.getUnits()
     dt= (at - timedelta(seconds=length))
     if interval < 60:
         interval_seconds = interval
     else: interval_seconds = 60
     if interval < 3600:
         interval_minutes = (interval - interval_seconds)/60
     else: interval_minutes = 60
     if interval < 3600*24:
         interval_hours = (interval - interval_seconds -
                 (60*interval_minutes))/3600
     else:
         interval_hours = 24
     if interval_hours == 0: interval_hours = 1
     if interval_minutes == 0: interval_minutes = 1
     new_start = dt.replace(
         microsecond = 0,
         second = (dt.second - dt.second%interval_seconds),
         minute = (dt.minute - dt.minute%interval_minutes),
         hour = (dt.hour - dt.hour%interval_hours),)
     if interval >= (3600*24*30):
         new_start = new_start.replace(day=1)
     return new_start
Exemple #45
0
 def start(self, at):
     at = at or times.now()
     interval, length = self.getUnits()
     dt = (at - timedelta(seconds=length))
     if interval < 60:
         interval_seconds = interval
     else:
         interval_seconds = 60
     if interval < 3600:
         interval_minutes = (interval - interval_seconds) / 60
     else:
         interval_minutes = 60
     if interval < 3600 * 24:
         interval_hours = (interval - interval_seconds -
                           (60 * interval_minutes)) / 3600
     else:
         interval_hours = 24
     if interval_hours == 0: interval_hours = 1
     if interval_minutes == 0: interval_minutes = 1
     new_start = dt.replace(
         microsecond=0,
         second=(dt.second - dt.second % interval_seconds),
         minute=(dt.minute - dt.minute % interval_minutes),
         hour=(dt.hour - dt.hour % interval_hours),
     )
     if interval >= (3600 * 24 * 30):
         new_start = new_start.replace(day=1)
     return new_start
Exemple #46
0
 def datetimes(self, start=False, end=False, tzoffset=None):
     from dateutil import rrule
     from util import datetimeIterator
     in_range = lambda dt: (not start or start <= dt) and (
         not end or end >= dt)
     use_start = start or self.start(end)
     use_end = end or convert(times.now(), tzoffset)
     interval, length = self.getUnits()
     if interval >= 3600*24*30:
         rule = rrule.MONTHLY
         step = interval / (3600*24*30)
     elif interval >= 3600*24*7:
         rule = rrule.WEEKLY
         step = interval / (3600*24*7)
     elif interval >= 3600*24:
         rule = rrule.DAILY
         step = interval / (3600*24)
     elif interval >= 3600:
         rule = rrule.HOURLY
         step = interval / 3600
     elif interval >= 60:
         rule = rrule.MINUTELY
         step = interval / 60
     else:
         rule = rrule.SECONDLY
         step = interval
     dts = rrule.rrule(rule, dtstart=use_start, until=use_end, interval=step)
     return dts
Exemple #47
0
def date_time_year(date, time, year=None, tz='Europe/Prague'):
    """Parses strings representing parts of datetime and combines them
    together. Resulting datetime is in UTC.
    """
    dt_string = u'{date} {time} {year}'.format(
        date=date,
        time=time,
        year=year or times.now().year,
    )
    possible_formats = (
        '%d. %m. %H:%M %Y',
        '%d. %m. %H.%M %Y',
    )
    dt = None
    for format in possible_formats:
        try:
            dt = datetime.datetime.strptime(dt_string, format)
        except ValueError:
            pass
        else:
            break
    if dt:
        return times.to_universal(dt, tz)
    else:
        raise ValueError(dt_string)
Exemple #48
0
Fichier : job.py Projet : nzinfo/rq
    def save(self):
        """Persists the current job instance to its corresponding Redis key."""
        key = self.key

        obj = {}
        obj["created_at"] = times.format(self.created_at or times.now(), "UTC")

        if self.func_name is not None:
            obj["data"] = dumps(self.job_tuple)
        if self.origin is not None:
            obj["origin"] = self.origin
        if self.description is not None:
            obj["description"] = self.description
        if self.enqueued_at is not None:
            obj["enqueued_at"] = times.format(self.enqueued_at, "UTC")
        if self.ended_at is not None:
            obj["ended_at"] = times.format(self.ended_at, "UTC")
        if self._result is not None:
            obj["result"] = dumps(self._result)
        if self.exc_info is not None:
            obj["exc_info"] = self.exc_info
        if self.timeout is not None:
            obj["timeout"] = self.timeout
        if self.result_ttl is not None:
            obj["result_ttl"] = self.result_ttl
        if self._status is not None:
            obj["status"] = self._status
        if self.meta:
            obj["meta"] = dumps(self.meta)

        self.connection.hmset(key, obj)
Exemple #49
0
    def enqueue_job(self, job, timeout=None, set_meta_data=True):
        """Enqueues a job for delayed execution.

        When the `timeout` argument is sent, it will overrides the default
        timeout value of 180 seconds.  `timeout` may either be a string or
        integer.

        If the `set_meta_data` argument is `True` (default), it will update
        the properties `origin` and `enqueued_at`.

        If Queue is instantiated with async=False, job is executed immediately.
        """
        if set_meta_data:
            job.origin = self.name
            job.enqueued_at = times.now()

        if timeout:
            job.timeout = timeout  # _timeout_in_seconds(timeout)
        else:
            job.timeout = 180  # default
        job.save()

        if self._async:
            self.push_job_id(job.id)
        else:
            job.perform()
            job.save()
        return job
Exemple #50
0
 def upcoming(cls, queryset):
     now = times.now() - timedelta(minutes=20)
     week_later = now + timedelta(days=cls.upcoming_days)
     return (
         queryset.filter(starts_at__gte=now)
                 .filter(starts_at__lte=week_later)
                 .order_by('starts_at')
     )
Exemple #51
0
 def get_jobs_to_queue(self, with_times=False):
     """
     Returns a list of job instances that should be queued
     (score lower than current timestamp).
     If with_times is True a list of tuples consisting of the job instance and
     it's scheduled execution time is returned.
     """
     return self.get_jobs(times.to_unix(times.now()), with_times=with_times)
Exemple #52
0
 def flatten(self, dtf=None):
     if not dtf:
         dtf = times.now()
     if type(dtf) in (str, unicode):
         dtf = self.parse_dt_str(dtf)
     dts = list(self.datetimes(end=dtf))
     flat = len(dts) and dts[-1] or False
     return flat
Exemple #53
0
 def test_interval_and_repeat_persisted_correctly(self):
     """
     Ensure that interval and repeat attributes get correctly saved in Redis.
     """
     job = self.scheduler.schedule(times.now(), say_hello, interval=10, repeat=11)
     job_from_queue = Job.fetch(job.id, connection=self.testconn)
     self.assertEqual(job_from_queue.meta['interval'], 10)
     self.assertEqual(job_from_queue.meta['repeat'], 11)
Exemple #54
0
 def get_jobs_to_queue(self, with_times=False):
     """
     Returns a list of job instances that should be queued
     (score lower than current timestamp).
     If with_times is True a list of tuples consisting of the job instance and
     it's scheduled execution time is returned.
     """
     return self.get_jobs(times.to_unix(times.now()), with_times=with_times)
Exemple #55
0
    def now(self, request):
        """Outputs server time and user time."""

        now = times.now()
        user = request.get_user()
        tz = self._get_user_timezone(user)
        local = times.to_local(now, tz)

        request.respond(u'Server time: {}\nLocal time:{}'.format(now, local))
Exemple #56
0
    def pending_test(self):
        """Pending status"""
        file = self.post_tests_dir / 'published.md'
        post = Post(file)
        post.timestamp = times.now() + timedelta(days=1)

        self.assertTrue(post.is_pending)
        self.assertFalse(post.is_published)
        self.assertFalse(post.is_draft)
Exemple #57
0
 def flatten(self, dtf=None):
     if not dtf:
         dtf = times.now()
     if type(dtf) in (str, unicode):
         dtf = self.parse_dt_str(dtf)
     dts = list(self.datetimes(end=dtf))
     s
     flat = len(dts) and dts[-1] or False
     return flat
Exemple #58
0
def datetimeIterator(from_date=None, to_date=None, use_utc=True, delta=timedelta(days=1)):
    if not from_date:
        if use_utc:
            from_date = times.now()
        else:
            from_date = datetime.now()
    while to_date is None or from_date <= to_date:
        yield from_date
        from_date = from_date + delta
    return