Пример #1
0
def silence(request, incident_id):
    try:
        incident = Incident.objects.get(id=incident_id)
        silence_for = request.POST.get('silence_for')
        url = request.POST.get("url")
        if IncidentSilenced.objects.filter(incident=incident).count() < 1:
            silenced_incident = IncidentSilenced()
            silenced_incident.incident = incident
            silenced_incident.silenced_until = timezone.now(
                ) + timezone.timedelta(hours=int(silence_for))
            silenced_incident.silenced = True
            silenced_incident.save()
            event_log_message = "%s silenced incident %s for %s hours" % (
                request.user.username, incident.incident_key, silence_for)
            event_log = EventLog()
            event_log.incident_key = incident
            event_log.action = 'silence_incident'
            event_log.user = request.user
            event_log.service_key = incident.service_key
            event_log.data = event_log_message
            event_log.occurred_at = timezone.now()
            event_log.save()
            ScheduledNotification.remove_all_for_incident(incident)
            incident.event_type = Incident.ACKNOWLEDGE
            incident.save()
            unsilence_incident.apply_async(
                (incident_id,), eta=silenced_incident.silenced_until)
        return HttpResponseRedirect(url)
    except Service.DoesNotExist:
        raise Http404
Пример #2
0
def log_message(msg):
	log = "%s: %s\r\n" % (timezone.localtime(timezone.now()), msg)
	if not default_storage.exists(settings.ARP_IMPORT_LOG):
		log = "%s: Log Started\r\n%s" % (timezone.localtime(timezone.now()), log)
	log_file = default_storage.open(settings.ARP_IMPORT_LOG, mode="a")
	log_file.write(log)
	log_file.close()
Пример #3
0
    def search(self, serie, numero, remitente, destinatario, sucursal, fecha):
        flat = serie or numero or remitente or destinatario or fecha
        tz = timezone.get_current_timezone()
        if flat:
            if fecha:
                "si se ingreso fecha"
                date = datetime.strptime(fecha, "%d/%m/%Y")
                end_date = timezone.make_aware(date, tz)
                start_date = end_date - timedelta(days=7)
            else:

                date = datetime.strptime("01/10/2015", "%d/%m/%Y")
                end_date = timezone.now()
                start_date = timezone.make_aware(date, tz)
        else:
            end_date = timezone.now()
            start_date = end_date - timedelta(days=7)

        busqueda = self.annotate(
            saldo=F('depositslip__total_amount')-F('amount')
        ).filter(
            depositslip__serie__icontains=serie,
            depositslip__number__icontains=numero,
            depositslip__sender__full_name__icontains=remitente,
            depositslip__addressee__full_name__icontains=destinatario,
            depositslip__state='2',
            depositslip__destination=sucursal,
            depositslip__created__range=(start_date, end_date)
        )
        return busqueda
Пример #4
0
    def test_clean_grading_no_expire_failure(self):
        rule = {
            "if_completed_before": now(),
            "credit_percent": 100
        }
        expected_error_msg = "grading rules may not expire"
        with self.assertRaises(ValidationError) as cm:
            fre = models.FlowRuleException(
                flow_id=factories.DEFAULT_FLOW_ID,
                participation=self.participation,
                kind=constants.flow_rule_kind.grading,
                rule=rule,
                expiration=now()
            )

            fre.clean()
        self.assertIn(expected_error_msg, str(cm.exception))

        self.assertEqual(
            self.mock_get_course_repo.call_count, 0,
            "The expensive operation should be skipped in this case")
        self.assertEqual(
            self.mock_get_flow_desc.call_count, 0,
            "The expensive operation should be skipped in this case")
        self.assertEqual(self.mock_validate_session_grading_rule.call_count, 0,
                         "The expensive operation should be skipped in this case")
Пример #5
0
    def setUp(self):
        super(GroupEventsOldestTest, self).setUp()
        self.login_as(user=self.user)

        project = self.create_project()
        min_ago = (timezone.now() - timedelta(minutes=1)).isoformat()[:19]
        two_min_ago = (timezone.now() - timedelta(minutes=2)).isoformat()[:19]

        self.event1 = self.store_event(
            data={
                'event_id': 'a' * 32,
                'environment': 'staging',
                'fingerprint': ['group_1'],
                'timestamp': two_min_ago
            },
            project_id=project.id,
        )

        self.event2 = self.store_event(
            data={
                'event_id': 'b' * 32,
                'environment': 'production',
                'fingerprint': ['group_1'],
                'timestamp': min_ago
            },
            project_id=project.id,
        )

        self.group = Group.objects.first()
Пример #6
0
    def setUp(self):
        super(CheckEntries, self).setUp()
        self.user = factories.User()
        self.user2 = factories.User()
        self.superuser = factories.Superuser()
        self.project = factories.Project(
            type__enable_timetracking=True, status__enable_timetracking=True,
            point_person=self.user)

        self.default_data = {
            'user': self.user,
            'project': self.project,
            'seconds_paused': 0,
            'status': Entry.VERIFIED,
        }
        self.good_start = timezone.now() - relativedelta(days=0, hours=8)
        self.good_end = timezone.now() - relativedelta(days=0)
        self.bad_start = timezone.now() - relativedelta(days=1, hours=8)
        self.bad_end = timezone.now() - relativedelta(days=1)
        # Create users for the test
        self.user.first_name = 'first1'
        self.user.last_name = 'last1'
        self.user.save()
        self.user2.first_name = 'first2'
        self.user2.last_name = 'last2'
        self.user2.save()
        self.all_users = [self.user, self.user2, self.superuser]
        # Create a valid entry for all users on every day since 60 days ago
        self.make_entry_bulk(self.all_users, 60)
Пример #7
0
    def test_go_live_page_will_be_published(self):
        # Connect a mock signal handler to page_published signal
        signal_fired = [False]
        signal_page = [None]
        def page_published_handler(sender, instance, **kwargs):
            signal_fired[0] = True
            signal_page[0] = instance
        page_published.connect(page_published_handler)


        page = SimplePage(
            title="Hello world!",
            slug="hello-world",
            live=False,
            go_live_at=timezone.now() - timedelta(days=1),
        )
        self.root_page.add_child(instance=page)

        page.save_revision(approved_go_live_at=timezone.now() - timedelta(days=1))

        p = Page.objects.get(slug='hello-world')
        self.assertFalse(p.live)
        self.assertTrue(PageRevision.objects.filter(page=p).exclude(approved_go_live_at__isnull=True).exists())

        management.call_command('publish_scheduled_pages')

        p = Page.objects.get(slug='hello-world')
        self.assertTrue(p.live)
        self.assertFalse(PageRevision.objects.filter(page=p).exclude(approved_go_live_at__isnull=True).exists())

        # Check that the page_published signal was fired
        self.assertTrue(signal_fired[0])
        self.assertEqual(signal_page[0], page)
        self.assertEqual(signal_page[0], signal_page[0].specific)
Пример #8
0
    def test_handle_without_motivation_text(self):
        patient1 = Patient.objects.first()
        measurement1 = Measurement.objects.create(
            type='O',
            value=96.5,
            patient=patient1,
            time=timezone.now()
        )
        alarm1 = Alarm.objects.create(
            measurement=measurement1,
            time_created=timezone.now()
        )

        self.force_authenticate('helselise')
        response = self.client.post(
            '/alarms/' + str(alarm1.id) + '/handle/',
            {
                'alarm': {
                    'is_treated': True,
                    'treated_text': 'Ensom',
                    'search_tag': 'Sorg'
                },
                'motivation_text': {'text': ''}
            },
            'json'
        )

        self.assertEqual(response.status_code, 200)  # OK
        self.assertEqual(MotivationText.objects.count(), 0)
        self.assertEqual(response.data['alarm']['search_tag'], 'Sorg')
Пример #9
0
    def test_list_only_untreated(self):
        self.force_authenticate('helselise')

        patient1 = Patient.objects.first()
        measurement1 = Measurement.objects.create(
            type='O',
            value=90.5,
            patient=patient1,
            time=timezone.now() - timedelta(days=1)
        )
        Alarm.objects.create(
            measurement=measurement1,
            time_created=timezone.now(),
            is_treated=True
        )

        patient2 = self.create_patient('karinordmann', 'Kari', 'Nordmann', '02105534875')
        measurement2 = Measurement.objects.create(
            type='P',
            value=63,
            patient=patient2,
            time=timezone.now()
        )
        Alarm.objects.create(
            measurement=measurement2,
            time_created=timezone.now(),
            is_treated=False
        )

        response = self.client.get('/alarms/?only_untreated=1')
        self.assertEqual(response.data['count'], 1)
        self.assertTrue('results' in response.data)
        alarms = response.data['results']
        self.assertEqual(alarms[0]['measurement']['type'], 'P')
Пример #10
0
    def fetch_collection(cls, feeds, prefix_log):
        """Fetches a collection of Feed.

        Args:
            feeds: the collection of Feed to fetch
            prefix_log: a prefix to use in the log to know who called it

        Returns:
            The time elapsed in seconds.
        """
        start = timezone.now()
        log_desc = '%s - Fetching %s Feeds' % (prefix_log, feeds.count())

        logger.info('%s => start' % (log_desc,))

        for feed in feeds:
            try:
                feed.fetch()
            except Exception as err:
                traceback.print_exc()
                print err
                logger.error('%s - Fetching => [KO]\n%s' % (feed.log_desc, err))

        delta = timezone.now() - start
        logger.info('%s in %ss => end' % (log_desc, delta.total_seconds()))

        return delta
Пример #11
0
    def test_list_filtered_by_patient(self):
        self.force_authenticate('helselise')

        patient1 = Patient.objects.first()
        measurement1 = Measurement.objects.create(
            type='O',
            value=90.5,
            patient=patient1,
            time=timezone.now()
        )
        Alarm.objects.create(
            measurement=measurement1,
            time_created=timezone.now()
        )

        patient2 = self.create_patient('karinordmann', 'Kari', 'Nordmann', '02105534879')
        measurement2 = Measurement.objects.create(
            type='O',
            value=96.5,
            patient=patient2,
            time=timezone.now()
        )
        Alarm.objects.create(
            measurement=measurement2,
            time_created=timezone.now()
        )

        response = self.client.get('/alarms/?patient_id=' + str(patient1.id))
        self.assertEqual(response.data['count'], 1)
        self.assertTrue('results' in response.data)
        alarms = response.data['results']
        alarm1 = alarms[0]
        self.assertTrue('treated_text' in alarm1)
        self.assertAlmostEqual(alarm1['measurement']['value'], 90.5)
        self.assertFalse('patient' in alarm1['measurement'])
    def test_dry_run_flag(self):
        """
        Test that the dry run flags sends no email and only logs the the number of email sent in each batch
        """
        user = UserFactory.create()
        verification = self.create_and_submit(user)
        verification.status = 'approved'
        verification.expiry_date = now() - timedelta(days=1)
        verification.save()

        start_date = now() - timedelta(days=1)  # using default days
        count = 1

        with LogCapture(LOGGER_NAME) as logger:
            call_command('send_verification_expiry_email', '--dry-run')
            logger.check(
                (LOGGER_NAME,
                 'INFO',
                 u"For the date range {} - {}, total Software Secure Photo verification filtered are {}"
                 .format(start_date.date(), now().date(), count)
                 ),
                (LOGGER_NAME,
                 'INFO',
                 u"This was a dry run, no email was sent. For the actual run email would have been sent "
                 u"to {} learner(s)".format(count)
                 ))
        self.assertEqual(len(mail.outbox), 0)
    def test_expiry_date_range(self):
        """
        Test that the verifications are filtered on the given range. Email is not sent for any verification with
        expiry date out of range
        """
        user = UserFactory.create()
        verification_in_range = self.create_and_submit(user)
        verification_in_range.status = 'approved'
        verification_in_range.expiry_date = now() - timedelta(days=1)
        verification_in_range.save()

        user = UserFactory.create()
        verification = self.create_and_submit(user)
        verification.status = 'approved'
        verification.expiry_date = now() - timedelta(days=5)
        verification.save()

        call_command('send_verification_expiry_email', '--days-range=2')

        # Check that only one email is sent
        self.assertEqual(len(mail.outbox), 1)

        # Verify that the email is not sent to the out of range verification
        expiry_email_date = SoftwareSecurePhotoVerification.objects.get(pk=verification.pk).expiry_email_date
        self.assertIsNone(expiry_email_date)
Пример #14
0
 def response_change(self, request, obj):
     """ custom method that cacthes a new 'save and edit next' action 
         Remember that the type of 'obj' is the current model instance, so we can use it dynamically!
     """
     if "_save_and_investigation_completed" in request.POST:
     
         #updates the date of transmission
         if obj.data_fine_istruttoria is None:
             obj.stato_istruttoria = True
             obj.data_fine_istruttoria = timezone.now()
             
             #revoke(obj.email_task_id, terminate=True)
             
             obj.save()
             
         return self.response_post_save_change(request, obj)
     elif "_save_and_investigation_not_completed" in request.POST:
     
         if obj.data_inizio_istruttoria is None:                
             obj.stato_istruttoria = False
             obj.data_inizio_istruttoria = timezone.now()
             obj.save()
             
         return self.response_post_save_change(request, obj)
     elif "_save" in request.POST:
         return self.response_post_save_change(request, obj)
     else:
         return super(SegnalazioneAdmin, self).response_change(request, obj)
Пример #15
0
  def is_active(self, value):
    from apps.grading.views.action import ActionMaker
    from apps.communication.views.email_class import Email
    from settings.people import operations_team

    if value and self.active_at and not self.deactive_at: #already active
      pass

    elif value: #activate
      self.active_at = timezone.now()
      self.deactive_at = None
      ActionMaker(action_type=ActionType.ADD_PRODUCT, product=self)
      Email('product/activated', self).sendTo([person.email for person in operations_team])

    elif not value: #deactivate
      self.deactive_at = timezone.now()
      #cancel orders of this product
      from apps.communication.views.order_events import cancelOrder
      for order in self.orders.all():
        if not order.is_shipped: #todo: and not order.is_cancelled
          cancelOrder(order)

      try:
        message = "R %d" % self.id
        message += "<br>%s" % self.seller.name
        Email(message=message).sendTo([person.email for person in operations_team])
      except Exception as e:
        ExceptionHandler(e, "in Product.is_active")

    #always
    self.in_holding = False
Пример #16
0
def add_page(request, category_name_slug):

    try:
        cat = Category.objects.get(slug=category_name_slug)
    except Category.DoesNotExist:
        cat = None

    if request.method == 'POST':
        form = PageForm(request.POST)
        if form.is_valid():
            if cat:
                page = form.save(commit=False)
                page.category = cat
                page.views = 0
                page.first_visit = timezone.now()
                page.last_visit = timezone.now()
                page.save()
                # DONE BUG: 这里必须redirect,否则再次创建有问题
                # return redirect('rango.views.index')
                return redirect('/rango/index/')
        else:
            print(form.errors)
    else:
        form = PageForm()

    context_dict = {'form': form, 'category': cat}

    return render(request, 'rango/add_page.html', context_dict)
    def test_root_url_shows_links_to_all_polls(self):
        # set up some polls
        poll1 = Poll(question='6 times 7', pub_date=timezone.now())
        poll1.save()
        poll2 = Poll(question='life, the universe and everything', pub_date=timezone.now())
        poll2.save()

        response = self.client.get('/')

        template_names_used = [t.name for t in response.templates]
        self.assertIn('home.html', template_names_used)

        # check we've passed the polls to the template
        polls_in_context = response.context['polls']
        self.assertEquals(list(polls_in_context), [poll1, poll2])

        # check the poll names appear on the page
        self.assertIn(poll1.question, response.content)
        self.assertIn(poll2.question, response.content)

        # check the page also contains the urls to individual polls pages
        poll1_url = reverse('polls.views.poll', args=[poll1.id,])
        self.assertIn(poll1_url, response.content)
        poll2_url = reverse('polls.views.poll', args=[poll2.id,])
        self.assertIn(poll2_url, response.content)
Пример #18
0
    def dispatch(self, request, *args, **kwargs):
        from pretix.base.services.cart import error_messages

        err = None
        v = request.GET.get('voucher')

        if v:
            v = v.strip()
            try:
                self.voucher = Voucher.objects.get(code=v, event=request.event)
                if self.voucher.redeemed:
                    err = error_messages['voucher_redeemed']
                if self.voucher.valid_until is not None and self.voucher.valid_until < now():
                    err = error_messages['voucher_expired']
            except Voucher.DoesNotExist:
                err = error_messages['voucher_invalid']
        else:
            return redirect(eventreverse(request.event, 'presale:event.index'))

        if request.event.presale_start and now() < request.event.presale_start:
            err = error_messages['not_started']
        if request.event.presale_end and now() > request.event.presale_end:
            err = error_messages['ended']

        if err:
            messages.error(request, _(err))
            return redirect(eventreverse(request.event, 'presale:event.index'))

        return super().dispatch(request, *args, **kwargs)
Пример #19
0
def obtainrefreshToken():
    print "startToken"
    token = EaseToken.objects.first()
    if token is not None:
        if timezone.now()<token.expires_in:
            print "not expired"
            return

    data = {'grant_type':'client_credentials','client_id':Ease_id,'client_secret':Ease_secret}
    url = EaseURL+"token"
    r = requests.post(url,json=data)
    if r.status_code==requests.codes.ok:
        response = r.json()
        token = EaseToken.objects.first()
        if token is not None:
            print "needs refresh"
            token.token = response["access_token"]
            token.expires_in = timegm(datetime.utcnow().utctimetuple()) + response["expires_in"]
            token.application = response["application"]
            token.save()
        else:
            print "needs a new one"
            token = response["access_token"]
            expire = timezone.now()+timedelta(seconds=response["expires_in"])
            application = response["application"]
            EaseToken.objects.create(token=token,expires_in=expire,application=application)

    else:
       #obtain fail
       pass
Пример #20
0
 def setUp(self):
     self.admin_user = User.objects.create_superuser(
         '*****@*****.**', 'example')
     self.future_date = timezone.now().date() + timedelta(days=10)
     self.job_open = mommy.make(
         Job,
         review_status=Job.OPEN,
     )
     self.job_under_review = mommy.make(
         Job,
         review_status=Job.UNDER_REVIEW,
         reviewer=self.admin_user,
     )
     self.job_ready_to_publish = mommy.make(
         Job,
         review_status=Job.READY_TO_PUBLISH,
         reviewer=self.admin_user,
     )
     self.job_published = mommy.make(
         Job,
         review_status=Job.PUBLISHED,
         reviewer=self.admin_user,
         published_date=timezone.now(),
         expiration_date=self.future_date,
     )
     self.job_rejected = mommy.make(
         Job,
         review_status=Job.REJECTED,
         reviewer=self.admin_user,
     )
     self.client.login(username=self.admin_user.email, password='******')
Пример #21
0
    def get_context_data(self, **kwargs):
        context = super(
            TicketedEventAdminListView, self
        ).get_context_data(**kwargs)

        queryset = TicketedEvent.objects.filter(
                date__gte=timezone.now()
            ).order_by('date')

        if self.request.method == 'POST':
            if "past" in self.request.POST:
                queryset = TicketedEvent.objects.filter(
                    date__lte=timezone.now()
                ).order_by('date')
                context['show_past'] = True
            elif "upcoming" in self.request.POST:
                queryset = queryset
                context['show_past'] = False

        if queryset.count() > 0:
            context['ticketed_events'] = True

        ticketed_event_formset = TicketedEventFormSet(
            data=self.request.POST if 'formset_submitted' in self.request.POST
            else None,
            queryset=queryset if 'formset_submitted' not in self.request.POST
            else None,
        )
        context['ticketed_event_formset'] = ticketed_event_formset
        context['sidenav_selection'] = 'ticketed_events'
        return context
Пример #22
0
    def save(self):
        published = False
        post = super(AdminPostForm, self).save(commit=False)

        if post.pk is None or Post.objects.filter(pk=post.pk, published=None).count():
            if self.cleaned_data["state"] == Post.STATE_CHOICES[-1][0]:
                post.published = timezone.now()
                published = True

        post.teaser_html = self.cleaned_data["teaser"]
        post.content_html = self.cleaned_data["content"]
        post.updated = timezone.now()
        post.save()

        r = Revision()
        r.post = post
        r.title = post.title
        r.teaser = self.cleaned_data["teaser"]
        r.content = self.cleaned_data["content"]
        r.author = post.author
        r.updated = post.updated
        r.published = post.published
        r.save()

        if can_tweet() and self.cleaned_data["tweet"]:
            post.tweet()

        if published:
            post_published.send(sender=Post, post=post)

        return post
Пример #23
0
    def render(self, **kwargs):
        if self.specific:
            specific_is_active = (
                self.specific.is_active
                and self.specific.active_from <= timezone.now()
                and (not self.specific.active_until or self.specific.active_until >= timezone.now())
            )

            if specific_is_active:
                banner = self.specific
                type = banner.type
            else:
                return ""
        else:
            try:
                banner = Banner.objects.active().filter(type=self.type).select_related("mediafile").order_by("?")[0]
                type = self.type
            except IndexError:
                return ""

        Banner.objects.filter(id=banner.id).update(embeds=F("embeds") + 1)

        return render_to_string(
            ["content/banner/%s.html" % type, "content/banner/default.html"],
            {"content": self, "banner": banner},
            context_instance=kwargs.get("context"),
        )
Пример #24
0
 def test_press_on_with_dup_found_and_older_date(self):
     for dup_checker in self.dup_checkers:
         # Note that the next case occurs prior to the current one
         onwards = dup_checker.press_on(
             Opinion,
             now(),
             now() - timedelta(days=1),
             lookup_value=self.content_hash,
             lookup_by='sha1'
         )
         if dup_checker.full_crawl:
             self.assertTrue(
                 onwards,
                 'DupChecker says to %s during a full crawl.' % onwards)
         else:
             self.assertFalse(
                 onwards,
                 "DupChecker returned %s but there should be a duplicate in "
                 "the database. dup_count is %s, and dup_threshold is %s" %
                 (onwards, dup_checker.dup_count, dup_checker.dup_threshold)
             )
             self.assertTrue(
                 dup_checker.emulate_break,
                 "We should have hit a break but didn't."
             )
Пример #25
0
    def test_encode_decode(self):
        # create a time that has a set millisecond
        now = timezone.now().replace(microsecond=1000)

        # our dictionary to encode
        source = dict(name="Date Test", age=10, now=now)

        # encode it
        encoded = dict_to_json(source)

        # now decode it back out
        decoded = json_to_dict(encoded)

        # should be the same as our source
        self.assertDictEqual(source, decoded)

        # test the same using our object mocking
        mock = dict_to_struct('Mock', json.loads(encoded), ['now'])
        self.assertEquals(mock.now, source['now'])

        # try it with a microsecond of 0 instead
        source['now'] = timezone.now().replace(microsecond=0)

        # encode it
        encoded = dict_to_json(source)

        # now decode it back out
        decoded = json_to_dict(encoded)

        # should be the same as our source
        self.assertDictEqual(source, decoded)

        # test the same using our object mocking
        mock = dict_to_struct('Mock', json.loads(encoded), ['now'])
        self.assertEquals(mock.now, source['now'])
Пример #26
0
def motion_details(request, days=3):
    devices = Device.objects.all()

    minutes = 10

    start = timezone.now() - timezone.timedelta(days=days)
    motion_time = []
    motion_count = []
    delta = timezone.timedelta(minutes=minutes)

    while start < timezone.now():
        cnt = Log.objects.filter(log_type__exact='MO', status=True, created__range=[start, start+delta]).count()
        motion_time.append(start)
        motion_count.append(cnt)

        start += delta
#    motion_datestmp = last_motion
    motion_time = mark_safe([timezone.localtime(m).strftime('%Y-%m-%d %H:%M:%S') for m in motion_time].__str__())
    #motion_state = mark_safe([ int(m.status) for m in last_motions].__str__())
    #motion_time = mark_safe(motion_time.__str__())
    motion_count = mark_safe(motion_count.__str__())

    range_start = mark_safe(timezone.localtime(timezone.now()-timezone.timedelta(days=1)).strftime('%Y-%m-%d %H:%M:%S'))
    range_end = mark_safe(timezone.localtime(timezone.now()).strftime('%Y-%m-%d %H:%M:%S'))

    return render_to_response(
        'motions.html',
        locals()
    )
Пример #27
0
    def test_press_on_with_a_dup_found(self):
        for dup_checker in self.dup_checkers:
            onwards = dup_checker.press_on(
                Opinion,
                now(),
                now(),
                lookup_value=self.content_hash,
                lookup_by='sha1'
            )
            if dup_checker.full_crawl:
                self.assertTrue(
                    onwards,
                    'DupChecker returned %s during a full crawl.' % onwards
                )

            elif dup_checker.full_crawl is False:
                self.assertFalse(
                    onwards,
                    "DupChecker returned %s but there should be a duplicate in "
                    "the database. dup_count is %s, and dup_threshold is %s" %
                    (onwards, dup_checker.dup_count, dup_checker.dup_threshold)
                )
                self.assertTrue(
                    dup_checker.emulate_break,
                    "We should have hit a break but didn't."
                )
Пример #28
0
def eventsJson(request):
    month_start = datetime.datetime(timezone.now().year, timezone.now().month, 1, 0)
    next_month = month_start+relativedelta(months=+1)
    month_end = next_month+relativedelta(seconds=-1)
    
    data = serializers.serialize("json", Event.objects.all())
    return HttpResponse(data, content_type="text/plain")
Пример #29
0
    def test_clean_unknown_exception_rule(self):
        unknown_flow_rule_kind = "unknown_kind"
        rule = {
            "if_before": now()
        }
        fre = models.FlowRuleException(
            flow_id=factories.DEFAULT_FLOW_ID,
            participation=self.participation,
            kind=unknown_flow_rule_kind,
            rule=rule,
            expiration=now()
        )

        with self.assertRaises(ValidationError) as cm:
            fre.clean()
        expected_error_msg = "invalid exception rule kind"
        self.assertIn(expected_error_msg, str(cm.exception))

        for call in (self.mock_get_course_repo,
                     self.mock_get_flow_desc,
                     self.mock_validate_session_access_rule,
                     self.mock_validate_session_access_rule,
                     self.mock_validate_session_access_rule,
                     self.mock_validate_session_access_rule):
            self.assertEqual(
                call.call_count, 0,
                "The expensive operation should be skipped in this case")
Пример #30
0
def env():
    o = Organizer.objects.create(name='Dummy', slug='dummy')
    event = Event.objects.create(
        organizer=o, name='Dummy', slug='dummy',
        date_from=now(), plugins='pretix.plugins.banktransfer'
    )
    user = User.objects.create_user('*****@*****.**', 'dummy')
    EventPermission.objects.create(user=user, event=event)
    o1 = Order.objects.create(
        code='1Z3AS', event=event,
        status=Order.STATUS_PENDING,
        datetime=now(), expires=now() + timedelta(days=10),
        total=23, payment_provider='banktransfer'
    )
    o2 = Order.objects.create(
        code='6789Z', event=event,
        status=Order.STATUS_CANCELED,
        datetime=now(), expires=now() + timedelta(days=10),
        total=23, payment_provider='banktransfer'
    )
    quota = Quota.objects.create(name="Test", size=2, event=event)
    item1 = Item.objects.create(event=event, name="Ticket", default_price=23)
    quota.items.add(item1)
    OrderPosition.objects.create(order=o1, item=item1, variation=None, price=23)
    return event, user, o1, o2
Пример #31
0
def post_list(request):
	posts = Post.objects.filter(published_date__lte=timezone.now()).order_by('published_date')
	return render(request, 'blog/post_list.html', {'posts': posts})
Пример #32
0
 def get_queryset(self):
     # __lte -> less than or equal to
     # __ -> used for SQL calls
     return Post.objects.filter(
         published_date__lte=timezone.now()).order_by("-published_date")
Пример #33
0
	def get_queryset(self):
		"""
		Excludes any questions that aren't published yet.
		"""
		return Question.objects.filter(pub_date__lte=timezone.now())
Пример #34
0
	def get_queryset(self):
		"""
		Return the last five published questions (not including those set to be
		published in the future).
		"""
		return Question.objects.filter(pub_date__lte=timezone.now()).order_by('-pub_date')[:5]
Пример #35
0
 def set_available(self, available):
     self.available = available
     self.update_available = timezone.now()
Пример #36
0
 def set_reachable(self, reachable):
     self.reachable = reachable
     self.update_reachable = timezone.now()
Пример #37
0
 def set_status(self, status):
     self.status = status
     self.update_status = timezone.now()
Пример #38
0
 def getNowTime(request):
     info_time = timezone.now()
     return info_time
Пример #39
0
 def get_queryset(self):
     return Question.objects.filter(
         pub_date__lte=timezone.now()).order_by('-pub_date')[:5]  # <------
Пример #40
0
 def publish(self):
     self.published_date = timezone.now()
     self.save()
Пример #41
0
def createExpenseItemsForEvents(request=None, datetimeTuple=None, rule=None, event=None):
    '''
    For each StaffMember-related Repeated Expense Rule, look for EventStaffMember
    instances in the designated time window that do not already have expenses associated
    with them.  For hourly rental expenses, then generate new expenses that are
    associated with this rule.  For non-hourly expenses, generate new expenses
    based on the non-overlapping intervals of days, weeks or months for which
    there is not already an ExpenseItem associated with the rule in question.
    '''

    # This is used repeatedly, so it is put at the top
    submissionUser = getattr(request, 'user', None)

    # Return the number of new expense items created
    generate_count = 0

    # First, construct the set of rules that need to be checked for affiliated events
    rule_filters = Q(disabled=False) & Q(rentalRate__gt=0) & \
        Q(Q(staffmemberwageinfo__isnull=False) | Q(staffdefaultwage__isnull=False))
    if rule:
        rule_filters = rule_filters & Q(id=rule.id)
    rulesToCheck = RepeatedExpenseRule.objects.filter(
        rule_filters).distinct().order_by(
            '-staffmemberwageinfo__category', '-staffdefaultwage__category'
        )

    # These are the filters placed on Events that overlap the window in which
    # expenses are being generated.
    event_timefilters = Q()

    if datetimeTuple and len(datetimeTuple) == 2:
        timelist = list(datetimeTuple)
        timelist.sort()
        event_timefilters = event_timefilters & (
            Q(event__startTime__gte=timelist[0]) & Q(event__startTime__lte=timelist[1])
        )

    if event:
        event_timefilters = event_timefilters & Q(event__id=event.id)

    # Now, we loop through the set of rules that need to be applied, then loop
    # through the Events in the window in question that involved the staff
    # member indicated by the rule.
    for rule in rulesToCheck:
        staffMember = getattr(rule, 'staffMember', None)
        staffCategory = getattr(rule, 'category', None)

        # No need to continue if expenses are not to be generated
        if (
                (not staffMember and not staffCategory) or
                (
                    not staffMember and not
                    getConstant('financial__autoGenerateFromStaffCategoryDefaults')
                )
        ):
            continue

        # For construction of expense descriptions
        replacements = {
            'type': _('Staff'),
            'to': _('payment to'),
            'for': _('for'),
        }

        # This is the generic category for all Event staff, but it may be overridden below
        expense_category = getConstant('financial__otherStaffExpenseCat')

        if staffCategory:
            if staffMember:
                # This staff member in this category
                eventstaff_filter = Q(staffMember=staffMember) & Q(category=staffCategory)
            elif getConstant('financial__autoGenerateFromStaffCategoryDefaults'):
                # Any staff member who does not already have a rule specified this category
                eventstaff_filter = (
                    Q(category=staffCategory) &
                    ~Q(staffMember__expenserules__category=staffCategory)
                )
            replacements['type'] = staffCategory.name

            # For standard categories of staff, map the EventStaffCategory to
            # an ExpenseCategory using the stored constants.  Otherwise, the
            # ExpenseCategory is a generic one.
            if staffCategory == getConstant('general__eventStaffCategoryAssistant'):
                expense_category = getConstant('financial__assistantClassInstructionExpenseCat')
            elif staffCategory in [
                    getConstant('general__eventStaffCategoryInstructor'),
                    getConstant('general__eventStaffCategorySubstitute')
            ]:
                expense_category = getConstant('financial__classInstructionExpenseCat')

        else:
            # We don't want to generate duplicate expenses when there is both a category-limited
            # rule and a non-limited rule for the same person, so we have to construct the list
            # of categories that are to be excluded if no category is specified by this rule.
            coveredCategories = list(staffMember.expenserules.filter(
                category__isnull=False).values_list('category__id', flat=True))
            eventstaff_filter = Q(staffMember=staffMember) & ~Q(category__id__in=coveredCategories)

        if rule.advanceDays is not None:
            if rule.advanceDaysReference == RepeatedExpenseRule.MilestoneChoices.end:
                event_timefilters = event_timefilters & Q(
                    event__endTime__lte=timezone.now() + timedelta(days=rule.advanceDays)
                )
            elif rule.advanceDaysReference == RepeatedExpenseRule.MilestoneChoices.start:
                event_timefilters = event_timefilters & Q(
                    event__startTime__lte=timezone.now() + timedelta(days=rule.advanceDays)
                )
        if rule.priorDays is not None:
            if rule.priorDaysReference == RepeatedExpenseRule.MilestoneChoices.end:
                event_timefilters = event_timefilters & Q(
                    event__endTime__gte=timezone.now() - timedelta(days=rule.priorDays)
                )
            elif rule.priorDaysReference == RepeatedExpenseRule.MilestoneChoices.start:
                event_timefilters = event_timefilters & Q(
                    event__startTime__gte=timezone.now() - timedelta(days=rule.priorDays)
                )
        if rule.startDate:
            event_timefilters = event_timefilters & Q(event__startTime__gte=timezone.now().replace(
                year=rule.startDate.year, month=rule.startDate.month, day=rule.startDate.day,
                hour=0, minute=0, second=0, microsecond=0,
            ))
        if rule.endDate:
            event_timefilters = event_timefilters & Q(event__startTime__lte=timezone.now().replace(
                year=rule.endDate.year, month=rule.endDate.month, day=rule.endDate.day,
                hour=0, minute=0, second=0, microsecond=0,
            ))

        # Loop through EventStaffMembers for which there are not already
        # directly allocated expenses under this rule, and create new
        # ExpenseItems for them depending on whether the rule requires hourly
        # expenses or non-hourly ones to be generated.

        staffers = EventStaffMember.objects.filter(eventstaff_filter & event_timefilters).exclude(
            Q(event__expenseitem__expenseRule=rule)).distinct()

        if rule.applyRateRule == rule.RateRuleChoices.hourly:
            for staffer in staffers:
                # Hourly expenses are always generated without checking for
                # overlapping windows, because the periods over which hourly
                # expenses are defined are disjoint.  However, hourly expenses
                # are allocated directly to events, so we just need to create
                # expenses for any events that do not already have an Expense
                # Item generate under this rule.
                replacements['event'] = staffer.event.name
                replacements['name'] = staffer.staffMember.fullName
                replacements['dates'] = staffer.event.startTime.strftime('%Y-%m-%d')
                if (
                        staffer.event.startTime.strftime('%Y-%m-%d') !=
                        staffer.event.endTime.strftime('%Y-%m-%d')
                ):
                    replacements['dates'] += ' %s %s' % (
                        _('to'), staffer.event.endTime.strftime('%Y-%m-%d')
                    )

                # Find or create the TransactionParty associated with the staff member.
                staffer_party = TransactionParty.objects.get_or_create(
                    staffMember=staffer.staffMember,
                    defaults={
                        'name': staffer.staffMember.fullName,
                        'user': getattr(staffer.staffMember, 'userAccount', None)
                    }
                )[0]

                params = {
                    'event': staffer.event,
                    'category': expense_category,
                    'expenseRule': rule,
                    'description': '%(type)s %(to)s %(name)s %(for)s: %(event)s, %(dates)s' % \
                        replacements,
                    'submissionUser': submissionUser,
                    'hours': staffer.netHours,
                    'wageRate': rule.rentalRate,
                    'total': staffer.netHours * rule.rentalRate,
                    'accrualDate': staffer.event.startTime,
                    'payTo': staffer_party,
                }

                ExpenseItem.objects.create(**params)
                generate_count += 1
        else:
            # Non-hourly expenses are generated by constructing the time
            # intervals in which the occurrence occurs, and removing from that
            # interval any intervals in which an expense has already been
            # generated under this rule (so, for example, monthly rentals will
            # now show up multiple times). So, we just need to construct the set
            # of intervals for which to construct expenses.  We first need to
            # split the set of EventStaffMember objects by StaffMember (in case
            # this rule is not person-specific) and then run this provedure
            # separated by StaffMember.
            members = StaffMember.objects.filter(eventstaffmember__in=staffers)

            for member in members:
                events = [x.event for x in staffers.filter(staffMember=member)]

                # Find or create the TransactionParty associated with the staff member.
                staffer_party = TransactionParty.objects.get_or_create(
                    staffMember=member,
                    defaults={
                        'name': member.fullName,
                        'user': getattr(member, 'userAccount', None)
                    }
                )[0]

                intervals = [
                    (x.localStartTime, x.localEndTime) for x in
                    EventOccurrence.objects.filter(event__in=events)
                ]
                remaining_intervals = rule.getWindowsAndTotals(intervals)

                for startTime, endTime, total, description in remaining_intervals:
                    replacements['when'] = description
                    replacements['name'] = member.fullName

                    params = {
                        'category': expense_category,
                        'expenseRule': rule,
                        'periodStart': startTime,
                        'periodEnd': endTime,
                        'description': '%(type)s %(to)s %(name)s %(for)s %(when)s' % replacements,
                        'submissionUser': submissionUser,
                        'total': total,
                        'accrualDate': startTime,
                        'payTo': staffer_party,
                    }

                    ExpenseItem.objects.create(**params)
                    generate_count += 1
    rulesToCheck.update(lastRun=timezone.now())
    return generate_count
Пример #42
0
 def get_queryset(self):
     return Question.objects.filter(pub_date__lte=timezone.now())
Пример #43
0
    def form_valid(self, form):

        riyosya = form.save(commit=False)
        y_start_day = self.request.POST['y_start_day']

        if y_start_day:
            riyosya.status = settings._RIYOSYA_STATUS_YOTEI
        else:
            # 利用開始予定日が入力されていない場合はとりあえず退所扱いとする。
            riyosya.status = settings._RIYOSYA_STATUS_TAISYO

        riyosya.created_by = self.request.user
        riyosya.created_at = timezone.now()
        riyosya.updated_by = self.request.user
        riyosya.updated_at = timezone.now()

        # birthday設定
        gengou = self.request.POST['gengou']
        g_year = self.request.POST['g_year']
        month = self.request.POST['month']
        day = self.request.POST['day']
        riyosya.birthday = wareki_to_seireki(gengou, g_year, month, day)
        riyosya.save()

        # RiyosyaRiyouKikan 作成 (予定として登録)
        RiyosyaRiyouKikan(
            riyosya=riyosya,
            start_day=datetime.strptime(self.request.POST['y_start_day'], '%Y/%m/%d') if self.request.POST['y_start_day'] !='' else None,
            start_time=self.request.POST['y_start_time'] if self.request.POST['y_start_time'] !='' else None,
            start_kbn=self.request.POST['y_start_kbn'],
            start_status=settings._RIYOSYA_STATUS_YOTEI,
            last_day=datetime.strptime(self.request.POST['y_last_day'], '%Y/%m/%d') if self.request.POST['y_last_day'] !='' else None,
            last_time=self.request.POST['y_last_time'] if self.request.POST['y_last_time'] !='' else None,
            last_kbn=self.request.POST['y_last_kbn'],
            last_status=settings._RIYOSYA_STATUS_YOTEI,
            created_by=self.request.user,
            created_at=timezone.now(),
            updated_by=self.request.user,
            updated_at=timezone.now()
        ).save()

        # RiyosyaRenrakusaki 作成
        RiyosyaRenrakusaki(
            riyosya=riyosya,
            name=self.request.POST['r_name_1'],
            furigana=self.request.POST['r_furigana_1'],
            zokugara=self.request.POST['r_zoku_1'],
            addr=self.request.POST['r_addr_1'],
            tel=self.request.POST['r_tel_1_1'],
            tel2=self.request.POST['r_tel_2_1'],
            primary_flg=True,
            created_by=self.request.user,
            created_at=timezone.now(),
            updated_by=self.request.user,
            updated_at=timezone.now()
        ).save()

        if self.request.POST['r_name_2']:
            RiyosyaRenrakusaki(
                riyosya=riyosya,
                name=self.request.POST['r_name_2'],
                furigana=self.request.POST['r_furigana_2'],
                zokugara=self.request.POST['r_zoku_2'],
                addr=self.request.POST['r_addr_2'],
                tel=self.request.POST['r_tel_1_2'],
                tel2=self.request.POST['r_tel_2_2'],
                primary_flg=False,
                created_by=self.request.user,
                created_at=timezone.now(),
                updated_by=self.request.user,
                updated_at=timezone.now()
            ).save()

        return redirect('riyosya_list')
Пример #44
0
def createGenericExpenseItems(request=None, datetimeTuple=None, rule=None):
    '''
    Generic repeated expenses are created by just entering an
    expense at each exact point specified by the rule, without
    regard for whether events are scheduled in the specified
    window,
    '''

    # These are used repeatedly, so they are put at the top
    submissionUser = getattr(request, 'user', None)

    # Return the number of new expense items created
    generate_count = 0

    # First, construct the set of rules that need to be checked for affiliated events
    rule_filters = Q(disabled=False) & Q(rentalRate__gt=0) & \
        Q(genericrepeatedexpense__isnull=False)
    if rule:
        rule_filters = rule_filters & Q(id=rule.id)
    rulesToCheck = RepeatedExpenseRule.objects.filter(rule_filters).distinct()

    # These are the filters place on Events that overlap the window in which
    # expenses are being generated.
    if datetimeTuple and len(datetimeTuple) == 2:
        timelist = list(datetimeTuple)
        timelist.sort()
    else:
        timelist = None

    # Now, we loop through the set of rules that need to be applied, check for an
    # existing expense item at each point specified by the rule, and create a new
    # expense if one does not exist.
    for rule in rulesToCheck:

        limits = timelist or [ensure_timezone(datetime.min), ensure_timezone(datetime.max)]

        if rule.advanceDays:
            limits[1] = min(limits[1], timezone.now() + timedelta(days=rule.advanceDays))
        if rule.priorDays:
            limits[0] = max(limits[0], timezone.now() - timedelta(days=rule.priorDays))

        if rule.startDate:
            limits[0] = max(
                limits[0],
                timezone.now().replace(
                    year=rule.startDate.year, month=rule.startDate.month, day=rule.startDate.day,
                    hour=0, minute=0, second=0, microsecond=0,
                )
            )
        if rule.endDate:
            limits[1] = min(
                limits[1],
                timezone.now().replace(
                    year=rule.endDate.year, month=rule.endDate.month, day=rule.endDate.day,
                    hour=0, minute=0, second=0, microsecond=0,
                )
            )

        # Find the first start time greater than the lower bound time.
        if rule.applyRateRule == RepeatedExpenseRule.RateRuleChoices.hourly:
            this_time = limits[0].replace(minute=0, second=0, microsecond=0)
            if this_time < limits[0]:
                this_time += timedelta(hours=1)
        elif rule.applyRateRule == RepeatedExpenseRule.RateRuleChoices.daily:
            this_time = limits[0].replace(hour=rule.dayStarts, minute=0, second=0, microsecond=0)
            if this_time < limits[0]:
                this_time += timedelta(days=1)
        elif rule.applyRateRule == RepeatedExpenseRule.RateRuleChoices.weekly:
            offset = limits[0].weekday() - rule.weekStarts
            this_time = limits[0].replace(
                day=limits[0].day - offset, hour=rule.dayStarts, minute=0, second=0, microsecond=0
            )
            if this_time < limits[0]:
                this_time += timedelta(days=7)
        else:
            this_time = limits[0].replace(
                day=rule.monthStarts, hour=rule.dayStarts, minute=0, second=0, microsecond=0
            )
            if this_time < limits[0]:
                this_time += relativedelta(months=1)

        while this_time <= limits[1]:
            defaults_dict = {
                'category': rule.category,
                'description': rule.name,
                'submissionUser': submissionUser,
                'total': rule.rentalRate,
                'accrualDate': this_time,
                'payTo': rule.payTo,
            }
            item, created = ExpenseItem.objects.get_or_create(
                expenseRule=rule,
                periodStart=this_time,
                periodEnd=this_time,
                defaults=defaults_dict
            )
            if created:
                generate_count += 1
            if rule.applyRateRule == RepeatedExpenseRule.RateRuleChoices.hourly:
                this_time += timedelta(hours=1)
            elif rule.applyRateRule == RepeatedExpenseRule.RateRuleChoices.daily:
                this_time += timedelta(days=1)
            elif rule.applyRateRule == RepeatedExpenseRule.RateRuleChoices.weekly:
                this_time += timedelta(days=7)
            else:
                this_time += relativedelta(months=1)
    rulesToCheck.update(lastRun=timezone.now())
    return generate_count
Пример #45
0
 def get_queryset(self):
     ''' Return the last five published questions '''
     return Question.objects.filter(
         pub_date__lte=timezone.now()).order_by('-pub_date')[:5]
Пример #46
0
def createExpenseItemsForVenueRental(request=None, datetimeTuple=None, rule=None, event=None):
    '''
    For each Location or Room-related Repeated Expense Rule, look for Events
    in the designated time window that do not already have expenses associated
    with them.  For hourly rental expenses, then generate new expenses that are
    associated with this rule.  For non-hourly expenses, generate new expenses
    based on the non-overlapping intervals of days, weeks or months for which
    there is not already an ExpenseItem associated with the rule in question.
    '''

    # These are used repeatedly, so they are put at the top
    submissionUser = getattr(request, 'user', None)
    rental_category = getConstant('financial__venueRentalExpenseCat')

    # Return the number of new expense items created
    generate_count = 0

    # First, construct the set of rules that need to be checked for affiliated events
    rule_filters = Q(disabled=False) & Q(rentalRate__gt=0) & \
        (Q(locationrentalinfo__isnull=False) | Q(roomrentalinfo__isnull=False))
    if rule:
        rule_filters = rule_filters & Q(id=rule.id)
    rulesToCheck = RepeatedExpenseRule.objects.filter(rule_filters).distinct()

    # These are the filters place on Events that overlap the window in which
    # expenses are being generated.
    event_timefilters = Q()

    if datetimeTuple and len(datetimeTuple) == 2:
        timelist = list(datetimeTuple)
        timelist.sort()
        event_timefilters = event_timefilters & (
            Q(startTime__gte=timelist[0]) & Q(startTime__lte=timelist[1])
        )
    if event:
        event_timefilters = event_timefilters & Q(id=event.id)

    # Now, we loop through the set of rules that need to be applied, then loop through the
    # Events in the window in question that occurred at the location indicated by the rule.
    for rule in rulesToCheck:

        venue = (
            getattr(rule, 'location', None) if
            isinstance(rule, RoomRentalInfo) else
            getattr(rule, 'location', None)
        )
        loc = getattr(venue, 'location') if isinstance(venue, Room) else venue
        event_locfilter = Q(room=venue) if isinstance(venue, Room) else Q(location=venue)

        # Find or create the TransactionParty associated with the location.
        loc_party = TransactionParty.objects.get_or_create(
            location=loc, defaults={'name': loc.name}
        )[0]

        if rule.advanceDays:
            if rule.advanceDaysReference == RepeatedExpenseRule.MilestoneChoices.end:
                event_timefilters = event_timefilters & \
                    Q(endTime__lte=timezone.now() + timedelta(days=rule.advanceDays))
            elif rule.advanceDaysReference == RepeatedExpenseRule.MilestoneChoices.start:
                event_timefilters = event_timefilters & \
                    Q(startTime__lte=timezone.now() + timedelta(days=rule.advanceDays))
        if rule.priorDays:
            if rule.priorDaysReference == RepeatedExpenseRule.MilestoneChoices.end:
                event_timefilters = event_timefilters & \
                    Q(endTime__gte=timezone.now() - timedelta(days=rule.priorDays))
            elif rule.priorDaysReference == RepeatedExpenseRule.MilestoneChoices.start:
                event_timefilters = event_timefilters & \
                    Q(startTime__gte=timezone.now() - timedelta(days=rule.priorDays))
        if rule.startDate:
            event_timefilters = event_timefilters & Q(
                event__startTime__gte=timezone.now().replace(
                    year=rule.startDate.year, month=rule.startDate.month, day=rule.startDate.day,
                    hour=0, minute=0, second=0, microsecond=0,
                )
            )
        if rule.endDate:
            event_timefilters = event_timefilters & Q(
                event__startTime__lte=timezone.now().replace(
                    year=rule.endDate.year, month=rule.endDate.month, day=rule.endDate.day,
                    hour=0, minute=0, second=0, microsecond=0,
                )
            )

        # For construction of expense descriptions
        replacements = {
            'type': _('Event/Series venue rental'),
            'of': _('of'),
            'location': venue.name,
            'for': _('for'),
        }

        # Loop through Events for which there are not already directly allocated
        # expenses under this rule, and create new ExpenseItems for them depending
        # on whether the rule requires hourly expenses or non-hourly ones to
        # be generated.
        events = Event.objects.filter(event_locfilter & event_timefilters).exclude(
            Q(expenseitem__expenseRule=rule)).distinct()

        if rule.applyRateRule == rule.RateRuleChoices.hourly:
            for this_event in events:
                # Hourly expenses are always generated without checking for
                # overlapping windows, because the periods over which hourly expenses
                # are defined are disjoint.  However, hourly expenses are allocated
                # directly to events, so we just need to create expenses for any events
                # that do not already have an Expense Item generate under this rule.
                replacements['name'] = this_event.name
                replacements['dates'] = this_event.localStartTime.strftime('%Y-%m-%d')
                if (
                        event.localStartTime.strftime('%Y-%m-%d') != \
                        this_event.localEndTime.strftime('%Y-%m-%d')
                ):
                    replacements['dates'] += ' %s %s' % (
                        _('to'), this_event.localEndTime.strftime('%Y-%m-%d')
                    )

                ExpenseItem.objects.create(
                    event=this_event,
                    category=rental_category,
                    payTo=loc_party,
                    expenseRule=rule,
                    description='%(type)s %(of)s %(location)s %(for)s: %(name)s, %(dates)s' % \
                        replacements,
                    submissionUser=submissionUser,
                    total=this_event.duration * rule.rentalRate,
                    accrualDate=this_event.startTime,
                )
                generate_count += 1
        else:
            # Non-hourly expenses are generated by constructing the time
            # intervals in which the occurrence occurs, and removing from that
            # interval any intervals in which an expense has already been
            # generated under this rule (so, for example, monthly rentals will
            # now show up multiple times). So, we just need to construct the set
            # of intervals for which to construct expenses
            intervals = [
                (x.localStartTime, x.localEndTime) for x in \
                    EventOccurrence.objects.filter(event__in=events)
            ]
            remaining_intervals = rule.getWindowsAndTotals(intervals)

            for startTime, endTime, total, description in remaining_intervals:
                replacements['when'] = description

                ExpenseItem.objects.create(
                    category=rental_category,
                    payTo=loc_party,
                    expenseRule=rule,
                    periodStart=startTime,
                    periodEnd=endTime,
                    description='%(type)s %(of)s %(location)s %(for)s %(when)s' % replacements,
                    submissionUser=submissionUser,
                    total=total,
                    accrualDate=startTime,
                )
                generate_count += 1
    rulesToCheck.update(lastRun=timezone.now())
    return generate_count
Пример #47
0
    def get_user_stack_status(self, request_data, suffix=''):
        """
        Update the user stack status and return it.

        """
        settings = get_xblock_settings()
        course_id, student_id = self.get_block_ids()
        initialize = request_data.get("initialize", False)
        reset = request_data.get("reset", False)

        stack = Stack.objects.select_for_update().get(student_id=student_id,
                                                      course_id=course_id,
                                                      name=self.stack_name)

        def _launch_stack(reset=False):
            # Run
            result = self.launch_stack_task(
                settings, {
                    "stack_id": stack.id,
                    "reset": reset,
                    "learner_id": stack.learner.id
                })

            # Update stack
            stack.status = LAUNCH_PENDING
            stack.error_msg = ""
            stack.launch_task_id = result.id
            stack.launch_timestamp = timezone.now()

            logger.info("Fired async launch task [%s] for [%s]" %
                        (result.id, self.stack_name))

            return result

        def _check_result(result):
            if result and result.ready() and not result.successful():
                raise LaunchError(repr(result.result))

        if stack.status in DOWN_STATES or not stack.status:
            # Launch the stack if there's no known status, or if it's known to
            # be down.
            logger.info("Launching stack [%s] with previous status [%s]." %
                        (self.stack_name, stack.status))
            try:
                _check_result(_launch_stack(reset))
            except LaunchError as e:
                stack.status = LAUNCH_ERROR
                stack.error_msg = e.error_msg
        elif stack.status in UP_STATES:
            # The stack is up.  Reset it, if requested.
            if reset:
                logger.info("Resetting successfully launched stack [%s]." %
                            (self.stack_name))
                try:
                    _check_result(_launch_stack(reset))
                except LaunchError as e:
                    stack.status = LAUNCH_ERROR
                    stack.error_msg = e.error_msg

            else:
                logger.info("Successful launch detected for [%s], "
                            "with status [%s]" %
                            (self.stack_name, stack.status))
        elif stack.status == LAUNCH_PENDING:
            # The stack is pending launch.
            try:
                # Check if the Celery task hasn't blown up.
                task_id = stack.launch_task_id
                _check_result(self.launch_stack_task_result(task_id))
            except LaunchError as e:
                stack.status = LAUNCH_ERROR
                stack.error_msg = e.error_msg
            else:
                # Calculate time since launch
                time_since_launch = 0
                launch_timestamp = stack.launch_timestamp
                if launch_timestamp:
                    time_since_launch = (timezone.now() -
                                         launch_timestamp).seconds
                launch_timeout = self.get_launch_timeout(settings)

                # Check if the pending task hasn't timed out.
                if time_since_launch <= launch_timeout:
                    # The pending task still has some time to finish.
                    # Please wait.
                    logger.debug("Launch pending for [%s]" % (self.stack_name))

                elif initialize or reset:
                    # Timeout reached, but the user just entered the page or
                    # requested a reset.  Try launching the stack again.
                    if initialize:
                        logger.info("Launch timeout detected on initialize. "
                                    "Launching stack [%s]" % (self.stack_name))
                    else:
                        logger.info("Launch timeout detected on reset. "
                                    "Resetting stack [%s]" % (self.stack_name))
                    try:
                        _check_result(_launch_stack(reset))
                    except LaunchError as e:
                        stack.status = LAUNCH_ERROR
                        stack.error_msg = e.error_msg
                else:
                    # Timeout reached.  Consider the task a failure and let the
                    # user retry manually.
                    logger.error("Launch timeout reached for [%s] "
                                 "after %s seconds" %
                                 (self.stack_name, time_since_launch))
                    stack.status = LAUNCH_ERROR
                    stack.error_msg = "Timeout when launching stack."
        elif stack.status in PENDING_STATES:
            # The stack is otherwise pending.  Report and let the user retry
            # manually.
            logger.error("Detected pending stack [%s], "
                         "with status [%s]" % (self.stack_name, stack.status))
        elif initialize or reset:
            # Detected an unforeseen state, but the user just entered the page,
            # or requested a retry or reset, so start from scratch.
            if reset:
                logger.info("Resetting failed stack [%s]." % (self.stack_name))
            else:
                logger.info("Retrying previously failed stack [%s]." %
                            (self.stack_name))
            try:
                _check_result(_launch_stack(reset))
            except LaunchError as e:
                stack.status = LAUNCH_ERROR
                stack.error_msg = e.error_msg
        else:
            # Detected a failed stack.  Report the error and let the user retry
            # manually.
            logger.error("Failed stack [%s] detected with status [%s]." %
                         (self.stack_name, stack.status))

        # Reset the dead man's switch
        stack.suspend_timestamp = timezone.now()

        # Save changes to the database
        stack.save()

        return {
            "status": stack.status,
            "error_msg": stack.error_msg,
            "name": stack.name
        }
Пример #48
0
 def get_queryset(self):
     '''
     Exclude any questions that aren't published yet
     '''
     # the = here is the same as the SQL <=
     return Question.objects.filter(pub_date__lte=timezone.now())
Пример #49
0
    def post(self, request):
        """
        Respond to POSTed username/password with token.
        """
        serializer = serializers.AuthCustomTokenSerializer(data=request.data)

        CONFIG = settings.REST_FRAMEWORK_TEMPORARY_TOKENS

        if (serializer.is_valid() or (
                'USE_AUTHENTICATION_BACKENDS'
                in CONFIG and CONFIG['USE_AUTHENTICATION_BACKENDS'])):

            user = None
            try:
                user = serializer.validated_data['user']
            except KeyError:
                if ('email' in request.data and
                        'username' in request.data and
                        'password' in request.data):

                    user = authenticate(
                        email=request.data['email'],
                        username=request.data['username'],
                        password=request.data['password']
                    )

                elif ('email' in request.data and
                        'password' in request.data):

                    user = authenticate(
                        email=request.data['email'],
                        password=request.data['password']
                    )

            token = None
            if user:
                token, _created = TemporaryToken.objects.get_or_create(
                    user=user
                )

            if token and token.expired:
                # If the token is expired, generate a new one.
                token.delete()
                expires = timezone.now() + timezone.timedelta(
                    minutes=CONFIG['MINUTES']
                )

                token = TemporaryToken.objects.create(
                    user=user, expires=expires)

            if token:
                data = {'token': token.key}
                return Response(data)
            else:
                error = _('Could not authenticate user')
                return Response(
                    {'error': error},
                    status=status.HTTP_400_BAD_REQUEST
                )

        return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)
Пример #50
0
    def keepalive(self, data, suffix=''):
        """
        Reset the dead man's switch.

        """
        self.update_stack({"suspend_timestamp": timezone.now()})
Пример #51
0
def latestdates(date):
    if now() - date < timedelta(hours=24):
        return 'red'
    else:
        return 'normal'
Пример #52
0
def review_overdue(queryset):
    month_start = timezone.now().date().replace(day=1)
    queryset = filter_accross_subtypes(
        queryset, next_review_date__isnull=False, next_review_date__lt=month_start
    )
    return add_review_date_annotations(queryset).order_by("-next_review_date")
Пример #53
0
 def calculate_scheduled_fire(self, contact):
     return self.calculate_scheduled_fire_for_value(contact.get_field_value(self.relative_to), timezone.now())
Пример #54
0
import json

from django.db import transaction
from django.db.models import Max, CharField, Q, Value, Subquery
from django.db.models.functions import Cast, Concat
from django.http import JsonResponse, HttpResponseForbidden, HttpResponse, HttpResponseNotFound, StreamingHttpResponse, HttpResponseBadRequest
from django.views.decorators.cache import cache_control
from django.views.decorators.csrf import csrf_exempt
from django.views.decorators.http import require_http_methods
from django.utils import timezone
import hashlib
from .loader import get_class_by_document_id
from .models import Change, ReplicationLog, ReplicationHistory
from django.conf import settings

start_time = int(timezone.now().timestamp() * 1000 * 1000)
server_uuid = hashlib.sha1(settings.SECRET_KEY.encode()).hexdigest()[:32]


@require_http_methods(["GET"])
@cache_control(must_revalidate=True)
def index(request):
    return JsonResponse({
        'couchdb': 'Welcome',
        'vendor': {
            'name': 'Django Sofa Sync Gateway',
            'version': '1.0',  # TODO: version from package
        },
        'version': 'Django Sofa Sync Gateway/1.0',
    })
Пример #55
0
 def cached_total_cards(self):
     if not self._cache_totals_last_update or self._cache_totals_last_update < timezone.now() - datetime.timedelta(hours=self._cache_totals_days):
         self.force_cache_totals()
     return self._cache_total_cards
Пример #56
0
 def is_firing_soon(self):
     return self.scheduled < timezone.now()
Пример #57
0
def post_process_group(event, is_new, is_regression, is_sample, **kwargs):
    """
    Fires post processing hooks for a group.
    """
    from sentry.models import GroupRuleStatus, Project

    project = Project.objects.get_from_cache(id=event.group.project_id)

    if settings.SENTRY_ENABLE_EXPLORE_CODE:
        record_affected_code.delay(event=event)

    if settings.SENTRY_ENABLE_EXPLORE_USERS:
        record_affected_user.delay(event=event)

    for plugin in plugins.for_project(project):
        plugin_post_process_group.apply_async(
            kwargs={
                'plugin_slug': plugin.slug,
                'event': event,
                'is_new': is_new,
                'is_regresion': is_regression,
                'is_sample': is_sample,
            },
            expires=300,
        )

    for rule in get_rules(project):
        match = rule.data.get('action_match', 'all')
        condition_list = rule.data.get('conditions', ())

        if not condition_list:
            continue

        # TODO(dcramer): this might not make sense for other rule actions
        # so we should find a way to abstract this into actions
        # TODO(dcramer): this isnt the most efficient query pattern for this
        rule_status, _ = GroupRuleStatus.objects.get_or_create(
            rule=rule,
            group=event.group,
            defaults={
                'project': project,
                'status': GroupRuleStatus.INACTIVE,
            },
        )

        state = EventState(
            is_new=is_new,
            is_regression=is_regression,
            is_sample=is_sample,
            rule_is_active=rule_status.status == GroupRuleStatus.ACTIVE,
            rule_last_active=rule_status.last_active,
        )

        condition_iter = (condition_matches(project, c, event, state, rule)
                          for c in condition_list)

        if match == 'all':
            passed = all(condition_iter)
        elif match == 'any':
            passed = any(condition_iter)
        elif match == 'none':
            passed = not any(condition_iter)
        else:
            rules_logger.error('Unsupported action_match %r for rule %d',
                               match, rule.id)
            continue

        if passed and rule_status.status == GroupRuleStatus.INACTIVE:
            # we only fire if we're able to say that the state has changed
            GroupRuleStatus.objects.filter(
                id=rule_status.id,
                status=GroupRuleStatus.INACTIVE,
                last_active=timezone.now(),
            ).update(status=GroupRuleStatus.ACTIVE)
        elif not passed and rule_status.status == GroupRuleStatus.ACTIVE:
            # update the state to suggest this rule can fire again
            GroupRuleStatus.objects.filter(
                id=rule_status.id,
                status=GroupRuleStatus.ACTIVE,
                last_active=timezone.now(),
            ).update(status=GroupRuleStatus.INACTIVE)

        if passed:
            execute_rule.apply_async(
                kwargs={
                    'rule_id': rule.id,
                    'event': event,
                    'state': state,
                },
                expires=120,
            )
Пример #58
0
 def update_cache_member(self):
     self._cache_member_last_update = timezone.now()
     self._cache_member_name = self.member.name
     self._cache_member_japanese_name = self.member.japanese_name
     self._cache_member_image = self.member.image
Пример #59
0
 def _now(self):
     return timezone.now()
 def get_queryset(self):
     # return Question.objects.order_by("-pubDate")[:5]
     return Question.objects.filter(pubDate__lte=timezone.now()).order_by("-pubDate")[:5]