def calculate_rrule_exclusions(start_date, end_date, exclusion_commands, settings):
    exclusion_ruleset = rruleset()

    exclusion_types = exclusion_commands.exclusionList

    for exclusion_type in exclusion_types:

        if hasattr(exclusion_type, "exclusionRange"):
            from_date, _ = convert_date_time(exclusion_type.exclusionRange.fromDateTime, settings)
            to_date, _ = convert_date_time(exclusion_type.exclusionRange.toDateTime, settings)
            exclusion_ruleset.rrule(rrule(freq=DAILY, dtstart=from_date, until=to_date))

        elif hasattr(exclusion_type, "exclusionDateTime"):
            real_date, _ = convert_date_time(exclusion_type.exclusionDateTime, settings)
            exclusion_ruleset.rrule(rrule(freq=DAILY, dtstart=real_date, until=real_date))

        elif hasattr(exclusion_type, "exclusionMacro"):
            macro_value = exclusion_type.exclusionMacro
            exclusion_rule = EXCLUSION_MAP[macro_value]['rule'](start=start_date, end=end_date)
            exclusion_ruleset.rrule(exclusion_rule)
        else:
            # in that case, I have no idea what this is (the parser should have caught it) so just
            # raise an error or something
            raise UnknownExclusionTypeError

    matched_dates = list(exclusion_ruleset.between(after=start_date, before=end_date, inc=True))

    return len(matched_dates)
Example #2
0
def pay_periods_remaining(start_date, frequency):
    """Calculate the pay periods remaining from start date to the end of year.

    Args:
        start_date: the date to start calculation from.
        frequency: payment frequency (models.Frequency instance)
    Returns:
        the integer count of pay periods remaining.
    """
    if frequency == Frequency.DAILY:
        rule = rrule(DAILY, until=datetime(start_date.year, 12, 31),
                     dtstart=start_date)
    elif frequency == Frequency.WEEKLY:
        # pay each Friday
        rule = rrule(WEEKLY, until=datetime(start_date.year, 12, 31),
                     dtstart=start_date, byweekday=[FR])
    elif frequency == Frequency.SEMI_MONTHLY:
        # pay on 15th and last day of each month
        rule = rrule(MONTHLY, until=datetime(start_date.year, 12, 31),
                     dtstart=start_date, bymonthday=[15, -1])
    elif frequency == Frequency.MONTHLY:
        # pay on last day of each month
        rule = rrule(MONTHLY, until=datetime(start_date.year, 12, 31),
                     dtstart=start_date, bymonthday=[-1])
    elif frequency == Frequency.QUARTERLY:
        # pay on last day of each quarter
        rule = rrule(MONTHLY, bymonth=(3, 6, 9, 12),
                     bymonthday=[-1],
                     dtstart=start_date,
                     until=datetime(start_date.year, 12, 31))
    else:
        raise NotImplementedError
    return len(list(rule))
Example #3
0
def recursive_fixed(request):
        campaign = get_object_or_404(Campaign,slug="taxes")
        if request.method == 'GET':
                form = CreateForm()
        else:
                form = CreateForm(request.POST) # Bind data from request.POST into a PostForm
                if form.is_valid():
                        start = form.cleaned_data['start']
                        end = form.cleaned_data['end']
                        choice = form.cleaned_data['choices']
                        subject = form.cleaned_data['subject']
                        content = form.cleaned_data['content']
                        send_time = form.cleaned_data['time']
                        option = form.cleaned_data['option']
                        if choice == "monthly":
                                for r in rrule.rrule(rrule.MONTHLY, bymonthday=(start.day, -1), bysetpos=1, dtstart=start, until=end):
                                        send_date = r
                                        send_date = send_date.replace(hour=send_time.hour,minute=send_time.minute,second=send_time.second)
                                        email = FixedEmail.objects.create(send_date=send_date,subject=subject,content=content,option=option)
                        elif choice == "yearly":
                                for r in rrule.rrule(rrule.YEARLY, bysetpos=1, dtstart=start, until=end):
                                        send_date = r
                                        send_date = send_date.replace(hour=send_time.hour,minute=send_time.minute,second=send_time.second)
                                        email = FixedEmail.objects.create(send_date=send_date,subject=subject,content=content,option=option)
                        #return HttpResponse(x)
                        return redirect('/admin/campaigns/fixedemail/')
        return render(request, 'campaigns/create.html', {
            'form': form,'campaign':campaign,
              })
Example #4
0
    def test_timezone_serializing(self):
        """
        Serializing with timezones test
        """
        tzs = dateutil.tz.tzical("test_files/timezones.ics")
        pacific = tzs.get('US/Pacific')
        cal = base.Component('VCALENDAR')
        cal.setBehavior(icalendar.VCalendar2_0)
        ev = cal.add('vevent')
        ev.add('dtstart').value = datetime.datetime(2005, 10, 12, 9, tzinfo = pacific)
        evruleset = rruleset()
        evruleset.rrule(rrule(WEEKLY, interval=2, byweekday=[2,4], until=datetime.datetime(2005, 12, 15, 9)))
        evruleset.rrule(rrule(MONTHLY, bymonthday=[-1,-5]))
        evruleset.exdate(datetime.datetime(2005, 10, 14, 9, tzinfo = pacific))
        ev.rruleset = evruleset
        ev.add('duration').value = datetime.timedelta(hours=1)

        # breaking date?
        #self.assertEqual(
        #    cal.serialize().replace('\r\n', '\n'),
        #    """BEGIN:VCALENDAR\nVERSION:2.0\nPRODID:-//PYVOBJECT//NONSGML Version 1//EN\nBEGIN:VTIMEZONE\nTZID:US/Pacific\nBEGIN:STANDARD\nDTSTART:20001029T020000\nRRULE:FREQ=YEARLY;BYDAY=-1SU;BYMONTH=10\nTZNAME:PST\nTZOFFSETFROM:-0700\nTZOFFSETTO:-0800\nEND:STANDARD\nBEGIN:DAYLIGHT\nDTSTART:20000402T020000\nRRULE:FREQ=YEARLY;BYDAY=1SU;BYMONTH=4\nTZNAME:PDT\nTZOFFSETFROM:-0800\nTZOFFSETTO:-0700\nEND:DAYLIGHT\nEND:VTIMEZONE\nBEGIN:VEVENT\nUID:20150108T142459Z - 64333@testing-worker-linux-12-2-29839-linux-6-46319\n358\nDTSTART;TZID=US/Pacific:20051012T090000\nDURATION:PT1H\nEXDATE;TZID=US/Pacific:20051014T090000\nRRULE:FREQ=WEEKLY;BYDAY=WE,FR;INTERVAL=2;UNTIL=20051215T090000\nRRULE:FREQ=MONTHLY;BYMONTHDAY=-5,-1\nEND:VEVENT\nEND:VCALENDAR\n"""
        #)

        apple = tzs.get('America/Montreal')
        ev.dtstart.value = datetime.datetime(2005, 10, 12, 9, tzinfo = apple)
def total_incomes(request):
    form = DateSelectForm(data=request.GET)
    if form.is_valid():
        incomes_start_date = form.cleaned_data.get('incomes_start_date', None)
        incomes_end_date = form.cleaned_data.get('incomes_end_date', None)

    monthly_incomes = {}
    reservation_orders = ReservationOrder.objects.filter(status=ReservationOrder.STATUS_PAID,
                                                         start_date__gte=incomes_start_date,
                                                         end_date__lte=incomes_end_date)

    for dt in rrule(MONTHLY, dtstart=incomes_start_date, until=incomes_end_date):
        for reservation_order in reservation_orders:
            if dt.date() < reservation_order.start_date and (
                        dt + relativedelta(months=1) ).date() > reservation_order.start_date:
                monthly_incomes.setdefault(dt.strftime('%Y-%m'), 0)
                monthly_incomes[dt.strftime('%Y-%m')] += reservation_order.price

    months = []
    for dt in rrule(MONTHLY, dtstart=incomes_start_date, until=incomes_end_date):
        months.append({
            'date': dt.strftime('%Y-%m'),
            'income': monthly_incomes.get(dt.strftime('%Y-%m'), 0),
        })

    context = {
        'months': months
    }

    return render_to_response('reports/report_total_income.html', context)
    def iter_start_time(start, end, repetition):
        from indico.modules.rb.models.reservations import RepeatFrequency

        repeat_frequency, repeat_interval = repetition

        if repeat_frequency == RepeatFrequency.NEVER:
            return [start]

        if repeat_frequency == RepeatFrequency.DAY:
            if repeat_interval == 1:
                return rrule.rrule(rrule.DAILY, dtstart=start, until=end)
            else:
                raise IndicoError(u'Unsupported interval')

        elif repeat_frequency == RepeatFrequency.WEEK:
            if 0 < repeat_interval < 4:
                return rrule.rrule(rrule.WEEKLY, dtstart=start, until=end, interval=repeat_interval)
            else:
                raise IndicoError(u'Unsupported interval')

        elif repeat_frequency == RepeatFrequency.MONTH:

            if repeat_interval == 1:
                position = int(ceil(start.day / 7.0))
                if position == 5:
                    # The fifth weekday of the month will always be the last one
                    position = -1
                return rrule.rrule(rrule.MONTHLY, dtstart=start, until=end, byweekday=start.weekday(),
                                   bysetpos=position)
            else:
                raise IndicoError(u'Unsupported interval {}'.format(repeat_interval))

        raise IndicoError(u'Unexpected frequency {}'.format(repeat_frequency))
    def get_dates_from_weekly_setting(self, cr, uid, date_start, weight, weekdays, date_end=False, count=False, context=None):
        dates = []
        switch_date = {
            'monday':relativedelta.MO,
            'tuesday':relativedelta.TU,
            'wednesday':relativedelta.WE,
            'thursday':relativedelta.TH,
            'friday':relativedelta.FR,
            'saturday':relativedelta.SA,
            'sunday':relativedelta.SU
        }
        weekdays_todo = [switch_date.get(key) for key in weekdays if key in switch_date.keys()]

        if not context:
            context = self.pool.get('res.users').context_get(cr, uid, uid)

        date_start = fields.datetime.context_timestamp(cr, uid, datetime.strptime(date_start, '%Y-%m-%d %H:%M:%S'),context=context)
        if date_end:
            until = fields.datetime.context_timestamp(cr, uid, datetime.strptime(date_end, '%Y-%m-%d %H:%M:%S'),context=context)
            dates = rrule.rrule(rrule.WEEKLY, byweekday=weekdays_todo, interval=weight, dtstart=date_start, until=until)
        elif count:
            dates = rrule.rrule(rrule.WEEKLY, byweekday=weekdays_todo, interval=weight, dtstart=date_start, count=count)
        else:
            raise osv.except_osv(_('Error'), _('Missing parameter: date_end or count'))
        if dates:
            dates = list(dates)
        return dates
    def iter_start_time(start, end, repetition):
        from indico.modules.rb.models.reservations import RepeatFrequency

        repeat_frequency, repeat_interval = repetition

        if repeat_frequency == RepeatFrequency.NEVER:
            return [start]

        if repeat_frequency == RepeatFrequency.DAY:
            return rrule.rrule(rrule.DAILY, dtstart=start, until=end)

        elif repeat_frequency == RepeatFrequency.WEEK:
            if 0 < repeat_interval < 4:
                return rrule.rrule(rrule.WEEKLY, dtstart=start, until=end, interval=repeat_interval)
            else:
                raise IndicoError('Unsupported interval')

        elif repeat_frequency == RepeatFrequency.MONTH:
            if repeat_interval == 1:
                position = start.day // 7 + 1
                return rrule.rrule(rrule.MONTHLY, dtstart=start, until=end, byweekday=start.weekday(),
                                   bysetpos=position)
            else:
                raise IndicoError('Unsupported interval')

        elif repeat_frequency == RepeatFrequency.YEAR:
            raise IndicoError('Unsupported frequency')

        raise IndicoError('Unexpected frequency')
 def get_dates_from_weekdaymonthly_setting(self, cr, uid, date_start, weight, relative_position, weekday, date_end=False, count=False, context=None):
     dates = []
     switch = {
             'first':1,
             'second':2,
             'third':3,
             'fourth':4,
             'last':-1
     }
     switch_date = {
         'monday':relativedelta.MO,
         'tuesday':relativedelta.TU,
         'wednesday':relativedelta.WE,
         'thursday':relativedelta.TH,
         'friday':relativedelta.FR,
         'saturday':relativedelta.SA,
         'sunday':relativedelta.SU
     }
     if not context:
         context = self.pool.get('res.users').context_get(cr, uid, uid)
     date_start = fields.datetime.context_timestamp(cr, uid, datetime.strptime(date_start, '%Y-%m-%d %H:%M:%S'),context=context)
     if date_end:
         until = fields.datetime.context_timestamp(cr, uid, datetime.strptime(date_end, '%Y-%m-%d %H:%M:%S'),context=context)
         dates = rrule.rrule(rrule.MONTHLY, interval=weight, dtstart=date_start, until=until,
                                 byweekday=switch_date[weekday](switch[relative_position]))
     elif count:
         dates = rrule.rrule(rrule.MONTHLY, interval=weight, dtstart=date_start, count=count,
                                 byweekday=switch_date[weekday](switch[relative_position]))
     else:
         raise osv.except_osv(_('Error'), _('Missing parameter: date_end or count'))
     if dates:
         dates = list(dates)
     return dates
Example #10
0
 def __get_rrule(self):
     start_date = self.programme.start_date
     if self.schedule_board.start_date and start_date < self.schedule_board.start_date:
         start_date = self.schedule_board.start_date
     if self.programme.end_date:
         end_date = self.programme.end_date
         if self.schedule_board.end_date and end_date > self.schedule_board.end_date:
             end_date = self.schedule_board.end_date
         # Due to rrule we need to add 1 day
         end_date = end_date + datetime.timedelta(days=1)
         return rrule.rrule(
             rrule.WEEKLY, byweekday=[self.day],
             dtstart=datetime.datetime.combine(start_date, self.start_hour),
             until=end_date
         )
     else:
         end_date = self.schedule_board.end_date
         if end_date:
             # Due to rrule we need to add 1 day
             end_date = end_date + datetime.timedelta(days=1)
             return rrule.rrule(
                 rrule.WEEKLY, byweekday=[self.day],
                 dtstart=datetime.datetime.combine(start_date, self.start_hour),
                 until=end_date
             )
         else:
             return rrule.rrule(
                 rrule.WEEKLY, byweekday=[self.day],
                 dtstart=datetime.datetime.combine(start_date, self.start_hour)
             )
Example #11
0
def trends_json(request):
   if request.method == 'GET':
      GET = request.GET
      if GET.has_key('sDate') and GET.has_key('eDate'):
         eStats = EvernoteStatistics(request.user.profile)
         startDate = date.fromtimestamp(float(GET['sDate'])/1000)
         endDate = date.fromtimestamp(float(GET['eDate'])/1000)
         filt = eStats.create_date_filter(startDate, endDate)
         if GET.has_key('tag'):
            filt = eStats.create_guid_filter(GET['tag'],False,filt)    
         if GET.has_key('notebook'):
            filt = eStats.create_guid_filter(GET['notebook'],True,filt)    
         #if the time frame is across multiple years then use months
         formattedTrend = [["Date","Notes"]]
         if (endDate.year - startDate.year) > 1:
            dateTrends = eStats.get_date_trends(True,filt)
            for dt in rrule.rrule(rrule.MONTHLY, dtstart=startDate, 
                                                 until=endDate):
               formattedTrend.append([dt.strftime("%b \'%y"),
                                     dateTrends[dt.strftime("%b \'%y")]])
         else:
            dateTrends = eStats.get_date_trends(False,filt)
            for dt in rrule.rrule(rrule.DAILY, dtstart=startDate, 
                                               until=endDate):
               formattedTrend.append([dt.strftime("%d %b"),
                                     dateTrends[dt.strftime("%d %b")]])
         jsonText = json.dumps({'data': formattedTrend,
                                'title': "Total Notes"})
         return HttpResponse(jsonText,content_type='application/json')
Example #12
0
def workday_calendar(start=None, stop=None, excludes=None,
                     workday_calendar=None):
    '''
    Return a dateutil.rrule.rruleset object describing a work calendar.

    By default the returned rruleset object excludes weekends.

    Optional Arguments
    ------------------

    start
        Start date for calendar, default today.
    stop
        Stop date for calendar, default None.
    excludes
        An iterable of datetime.datetime objects to exclude in addition to
        weekends from the returned rruleset object. excludes is intended to
        provide support for both a holiday calendar and individual vacation
        schedules. It is recommended that excludes not be used for sick days,
        as schedule variance due to illness should affect your forecasting.
    workday_calendar
        A dateutil.rrule.rruleset object representing the base work calendar.
        This argument is useful when your normal schedule does not align with
        the default assumption that MO - FR are work-days.
    '''
    dates = workday_calendar or _default_ruleset()
    if start:
        dates.rrule(rrule.rrule(rrule.DAILY, dtstart=start))
    if stop:
        dates.rrule(rrule.rrule(rrule.DAILY, until=stop))
    if excludes:
        for exclude in excludes:
            dates.exdate(exclude)
    return dates
Example #13
0
            def obj_opening(self):
                if self.el['calendars'] == []:
                    # yes, sometimes it's a list
                    return NotAvailable

                if self.el['calendars'].get('everyday'):
                    rule = OpeningRule()
                    rule.dates = rrule.rrule(rrule.DAILY)
                    rule.times = [(time(0, 0), time(23, 59, 59))]
                    rule.is_open = True

                    res = OpeningHours()
                    res.rules = [rule]
                    return res

                rules = []
                for day, hours in self.el['calendars'].items():
                    rule = OpeningRule()
                    rule.is_open = True

                    day = parsedate(day)
                    rule.dates = rrule.rrule(rrule.DAILY, count=1, dtstart=day)
                    rule.times = [(parsetime(t[0]), parsetime(t[1])) for t in hours if t[0] != 'closed']
                    rule.is_open = True

                    if rule.times:
                        rules.append(rule)

                res = OpeningHours()
                res.rules = rules
                return res
Example #14
0
 def _gen_dtend_rrule(dtstarts, vevent):
     """Generate an rdate or rrule from a list of dates and add it to the vevent"""
     interval = Remind._interval(dtstarts)
     if interval > 0 and interval % 7 == 0:
         rset = rrule.rruleset()
         rset.rrule(rrule.rrule(freq=rrule.WEEKLY, interval=interval//7, count=len(dtstarts)))
         vevent.rruleset = rset
     elif interval > 1:
         rset = rrule.rruleset()
         rset.rrule(rrule.rrule(freq=rrule.DAILY, interval=interval, count=len(dtstarts)))
         vevent.rruleset = rset
     elif interval > 0:
         if isinstance(dtstarts[0], datetime):
             rset = rrule.rruleset()
             rset.rrule(rrule.rrule(freq=rrule.DAILY, count=len(dtstarts)))
             vevent.rruleset = rset
         else:
             vevent.add('dtend').value = dtstarts[-1] + timedelta(days=1)
     else:
         rset = rrule.rruleset()
         if isinstance(dtstarts[0], datetime):
             for dat in dtstarts:
                 rset.rdate(dat)
         else:
             for dat in dtstarts:
                 rset.rdate(datetime(dat.year, dat.month, dat.day))
         # temporary set dtstart to a different date, so it's not
         # removed from rset by python-vobject works around bug in
         # Android:
         # https://github.com/rfc2822/davdroid/issues/340
         vevent.dtstart.value = dtstarts[0] - timedelta(days=1)
         vevent.rruleset = rset
         vevent.dtstart.value = dtstarts[0]
         if not isinstance(dtstarts[0], datetime):
             vevent.add('dtend').value = dtstarts[0] + timedelta(days=1)
Example #15
0
def SeqDates(datestart,datestop,typetime,parse):
  
   '''
      Make a sequence of dates.
      Input: datestart, datestop, parse
      Output: list of datetime objects
      
      Format: DAY MONTH YEAR HH:MM ex.: 08 07 2013 15:30
   '''
   
   from datetime import datetime
   from dateutil import rrule

   if typetime == "min":
     interv = rrule.MINUTELY
   elif typetime == "hour":
     interv = rrule.HOURLY
   elif typetime == "day":
     interv = rrule.DAILY
   
   if isinstance(datestart,str) and isinstance(datestop,str):
     d1 = datetime.strptime(datestart,"%d %m %Y %H:%M")
     d2 = datetime.strptime(datestop,"%d %m %Y %H:%M")
     return list(rrule.rrule(interv,interval=parse,dtstart=d1,until=d2))

   else:
     return tuple(rrule.rrule(interv,interval=parse,dtstart=datestart,until=datestop))
	def calculate_climatology( self ):
		begin = len( [ i for i in rrule( MONTHLY, dtstart=self.begin_subset_date, until=self.climatology_begin_date ) ] ) - 1
		end = len( [ i for i in rrule( MONTHLY, dtstart=self.begin_subset_date, until=self.climatology_end_date ) ] )
		data_sub = self.data[ begin:end, ... ]
		clim_arr = np.dstack([ np.mean( data_sub[ j, ... ], axis=0 ) for j in [ range( i-1, 360, 12 ) for i in range( 1, 12+1, 1 ) ] ])
		clim_arr = np.rollaxis( clim_arr, axis=-1 )
		return clim_arr
Example #17
0
def make_date_range_tuples(start, end, gap):
    """Make an iterable of date tuples for use in iterating forms

    For example, a form might allow start and end dates and you want to iterate
    it one week at a time starting on Jan 1 and ending on Feb 3:

    >>> make_date_range_tuples(date(2017, 1, 1), date(2017, 2, 3), 7)
    [(Jan 1, Jan 7), (Jan 8, Jan 14), (Jan 15, Jan 21), (Jan 22, Jan 28),
     (Jan 29, Feb 3)]

    :param start: date when the query should start.
    :param end: date when the query should end.
    :param gap: the number of days, inclusive, that a query should span at a
    time.

    :rtype list(tuple)
    :returns: list of start, end tuples
    """
    # We create a list of start dates and a list of end dates, then zip them
    # together. If end_dates is shorter than start_dates, fill the last value
    # with the original end date.
    start_dates = [d.date() for d in rrule(DAILY, interval=gap, dtstart=start,
                                           until=end)]
    end_start = start + datetime.timedelta(days=gap - 1)
    end_dates = [d.date() for d in rrule(DAILY, interval=gap, dtstart=end_start,
                                         until=end)]
    return list(zip_longest(start_dates, end_dates, fillvalue=end))
Example #18
0
def date_range(start, end, q):
    """Date  Range

    >>> from datetime import datetime
    >>> from django.utils.timezone import utc

    >>> s_date = datetime(2012, 07, 11, 0, 0, 0, 0).replace(tzinfo=utc)

    >>> e_date = datetime(2012, 07, 12, 23, 59, 59, 99999).replace(tzinfo=utc)

    >>> date_range(s_date, e_date, 2)
    [datetime.datetime(2012, 7, 11, 0, 0), datetime.datetime(2012, 7, 12, 0, 0)]

    """
    r = (end + timedelta(days=1) - start).days
    if int(q) <= 2:
        return list(rrule(DAILY,
               dtstart=parse(str(start)),
               until=parse(str(end))))
    if int(q) >= 3:
        return list(rrule(HOURLY, interval=1,
               dtstart=parse(str(start)),
               until=parse(str(end))))
    else:
        return [start + timedelta(days=i) for i in range(r)]
Example #19
0
    def get_area_graph_data(self, **kwargs):
        # TODO: save chosen product groups between sessions
        if 'group_ids' in kwargs:
            groups = ProductGroup.objects.by_user(self.request.user).filter(pk__in=kwargs['group_ids'])  # TODO if groups is empty - raise 404?
        else:
            groups = ProductGroup.objects.get_main_groups(self.request.user)
        # Column names
        col_names = []
        for group in groups:
            col_names.append(group.get_full_name())
        start_date = None
        if 'start_date' in kwargs:
            start_date = kwargs['start_date']
        else: 
        # Starting date for the area graph = date of first purchase from groups
            try:
                start_date = Purchase.objects.filter(productGroup__in=groups).aggregate(Min('purchase_date')).values()[0].date()
            except:
                pass
        end_date = None
        if 'end_date' in kwargs:
            end_date = kwargs['end_date']
        else:    
            end_date = datetime.date.today()
#        if start_date is None:
#            start_date = end_date-datetime.timedelta(days=365*2)
#        start_date = start_date.replace(day=1)
        # Generate data for area graph (daily, monthly, yearly money spent within groups)
        context_data = []
        resolution = None
        if 'resolution' in kwargs:
            resolution = kwargs['resolution']
        else:
            resolution = 'm'
        if resolution == 'd':
            for dt in rrule.rrule(rrule.DAILY, dtstart=start_date, until=end_date):
                data_row = []
                data_row.append(','.join([str(dt.day), str(dt.month), str(dt.year)]))
                for group in groups:
                    data_row.append(group.get_money_spent_dt(year=dt.year, month=dt.month, day=dt.day))
                context_data.append(data_row)
        elif resolution == 'm':
            for dt in rrule.rrule(rrule.MONTHLY, dtstart=start_date, until=end_date):
                data_row = []
                data_row.append(','.join([str(dt.month), str(dt.year)]))
                for group in groups:
                    group.year = dt.year
                    group.month = dt.month
                    data_row.append(group.get_money_spent())
                context_data.append(data_row)
        else:  # resolution == 'y
            for dt in rrule.rrule(rrule.YEARLY, dtstart=start_date, until=end_date):
                data_row = []
                data_row.append(str(dt.year))
                for group in groups:
                    data_row.append(group.get_money_spent_dt(year=dt.year))
                context_data.append(data_row)
        
        return [context_data, col_names]
Example #20
0
def exams_list(request): 
    exams = Exam.objects.all().order_by('date_time')        # 'список' усіх екзаменів вітсортованих по даті та часу
    groups = Group.objects.all()                            # 'список' усіх груп
    teachers = Teacher.objects.all()                        # 'список' усіх викладачів                      
    # вормуємо список місяців одного року для меню місяці
    date1 = datetime(2016,1,1)
    date2 = datetime(2016,12,1)
    list_months = list(rrule(MONTHLY, dtstart = date1, until = date2))
    # вормуємо список років для всіх екзаменів
    date3 = exams[0].date_time            # початковий рік так як exams відсортований по 'date_time'
    date4 = exams[len(exams)-1].date_time # кінцевий рік    
    list_years = list(rrule(YEARLY, dtstart = date3, until = date4+relativedelta(years=+1)))  # список років для меню роки
    
    # Сортуємо екзамени або по прізвищу пикладача або по імені викладача
    order_by = request.GET.get('order_by', '')
    if order_by == 'first_name':
        exams = exams.order_by("teaсher__first_name")
    elif order_by == 'last_name':
        exams = exams.order_by("teaсher__last_name")
    
    # Фільтруємо екзамени по вказаному викладачу
    teacher_id=request.GET.get('teacher_id','')
        # Вираз teachers.values_list('id',flat=True) формує список всіх існуючих 'id' в QuerySet 'teachers'
        # list(map(str, ***) перероблює список чисел в список строк
    if teacher_id in list(map(str, teachers.values_list('id',flat=True))):
        exams = exams.filter(teaсher__id = teacher_id)

    # Фільтруємо екзамени по вказаній групі
    group_id=request.GET.get('group_id','')
    if group_id in list(map(str, groups.values_list('id',flat=True))):
        exams = exams.filter(group__id = group_id)  

    # Фільтруємо екзамени по вказаному року
    year_id=request.GET.get('year_id','')
    if year_id:
        exams = exams.filter(date_time__year = year_id)

    # Фільтруємо екзамени по вказаному місяцю
    month_id=request.GET.get('month_id','')
    if month_id:
        exams = exams.filter(date_time__month = month_id)
     
    paginator = Paginator(exams, 5) # Show 5 exams per page

    page = request.GET.get('page')
    try:
        exams = paginator.page(page)
    except PageNotAnInteger:
        # If page is not an integer, deliver first page.
        exams = paginator.page(1)
    except EmptyPage:
        # If page is out of range (e.g. 9999), deliver last page of results.
        exams = paginator.page(paginator.num_pages)

    return render(request,'students/exams_list.html',{'exams':exams,
                                                      'groups':groups,
                                                      'teachers':teachers,
                                                      'list_months':list_months,
                                                      'list_years':list_years})
Example #21
0
def generate_dates(start=None, end=None, by='week'):
    if by == 'month':
        start = get_month_start(start)
        return rrule.rrule(rrule.MONTHLY, dtstart=start, until=end)
    if by == 'week':
        start = get_week_start(start)
        return rrule.rrule(rrule.WEEKLY, dtstart=start, until=end, byweekday=0)
    if by == 'day':
        return rrule.rrule(rrule.DAILY, dtstart=start, until=end)
Example #22
0
 def get_rrule_object(self):
     if self.rule is not None:
         params, empty = self._event_params()
         frequency = self.rule.rrule_frequency()
         if not empty:
             return rrule.rrule(frequency, dtstart=self.start, **params)
         else:
             year = self.start.year - 1
             return rrule.rrule(frequency, dtstart=self.start, until=self.start.replace(year=year))
Example #23
0
def redis_interbargraph_set(r_serv, year, month, overwrite):
    """Create a Redis sorted set.

    :param r_serv: -- connexion to redis database
    :param year: -- (integer) The year to process
    :param month: -- (integer) The month to process
    :param overwrite: -- (bool) trigger the overwrite mode

    This function create inside redis the intersection of all days in
    a month two by two.
    Example:
    For a month of 31days it will create 30 sorted set between day and
    day+1 until the last day.
    The overwrite mode delete the intersets and re-create them.

    """
    a = date(year, month, 01)
    b = date(year, month, cal.monthrange(year, month)[1])

    if overwrite:
        r_serv.delete("InterSet")

        for dt in rrule(DAILY, dtstart = a, until = b - timedelta(1)):
            dayafter = dt+timedelta(1)

            r_serv.delete(str(dt.strftime("%Y%m%d"))+str(dayafter.strftime("%Y%m%d")))

            r_serv.zinterstore(
                str(dt.strftime("%Y%m%d"))+str(dayafter.strftime("%Y%m%d")),
                {str(dt.strftime("%Y%m%d")):1,
                str(dayafter.strftime("%Y%m%d")):-1})

            r_serv.zadd(
                "InterSet",
                1,
                str(dt.strftime("%Y%m%d"))+str(dayafter.strftime("%Y%m%d")))
    else:
        for dt in rrule(DAILY, dtstart = a, until = b - timedelta(1)):
            dayafter = dt+timedelta(1)

            if r_serv.zcard(str(dt.strftime("%Y%m%d"))+str(dayafter.strftime("%Y%m%d"))) == 0:

                r_serv.zinterstore(
                    str(dt.strftime("%Y%m%d"))+str(dayafter.strftime("%Y%m%d")),
                    {str(dt.strftime("%Y%m%d")):1,
                    str(dayafter.strftime("%Y%m%d")):-1})

                r_serv.zadd(
                    "InterSet",
                    1,
                    str(dt.strftime("%Y%m%d"))+str(dayafter.strftime("%Y%m%d")))

                publisher.info(str(dt.strftime("%Y%m%d"))+str(dayafter.strftime("%Y%m%d"))+" Intersection Created")

            else:
                publisher.warning("Data already exist, operation aborted.")
Example #24
0
def vectorize_weekly(grid, crime_type=None, seasonal=False):
    '''
    Special vectorization for weekly data

    :param grid_size: size of the cell dimension for the grid
    :param crime_type: type of crime to be trained, None value will
        train all
    :param seasonal: implement seasonality or not
    :rtype: returns the resulting vector
    '''
    filters = {}
    if crime_type is not None:
        filters['primary_type'] = crime_type
    first_data = CriminalRecord.objects.first()
    last_data = CriminalRecord.objects.last()
    start = first_data.date
    dtstart = start + timedelta(days=7)
    end = last_data.date
    if seasonal:
        timesteps = {}
        for dt in rrule.rrule(rrule.WEEKLY, dtstart=dtstart, until=end):
            season_key = '-'.join(
                ["%02d" % getattr(start, k) for k in ['month', 'day']])
            try:
                season = timesteps[season_key]
            except KeyError:
                timesteps[season_key] = []
            vector = []
            for i in xrange(len(grid)):
                g = grid[i]
                filters['date__range'] = (start, dt)
                filters['date__lte'] = end
                filters['location__intersects'] = g
                crimes = CriminalRecord.objects.filter(**filters).count()
                has_crime = 1 if crimes > 0 else -1
                vector.append(has_crime)
            start = dt
            timesteps[season_key].append(vector)
        print timesteps
        vectors = []
        for k1 in sorted(timesteps):
            vectors.append(timesteps[k1])
    else:
        vectors = []
        for dt in rrule.rrule(rrule.WEEKLY, dtstart=dtstart, until=end):
            vector = []
            for i in xrange(len(grid)):
                g = grid[i]
                filters['date__range'] = (start, dt)
                filters['location__intersects'] = g
                crimes = CriminalRecord.objects.filter(**filters).count()
                has_crime = 1 if crimes > 0 else -1
                vector.append(has_crime)
            start = dt
            vectors.append(vector)
    return vectors
	def get_subset_data( self ):
		data = self.get_data( )
		dates = self.get_dates( )
		begin = dates[ 0 ]
		end = dates[ len( dates ) - 1 ]
		sub_begin = len( [ i for i in rrule( MONTHLY, dtstart=begin, until=self.begin_subset_date ) ] ) - 1
		sub_end = len( [ i for i in rrule( MONTHLY, dtstart=begin, until=self.end_subset_date ) ] )
		date_sub = dates[ sub_begin:sub_end ]
		data_sub = data[ sub_begin:sub_end, ... ]
		return date_sub, data_sub
Example #26
0
    def test_yearly_counters(self):
        start = date(2009, 1, 1)
        end = date(2010, 3, 1)
        self.create_entry_on_day(date(2009, 1, 1), 3)
        self.create_entry_on_day(date(2009, 1, 4), 1)
        self.create_entry_on_day(date(2010, 12, 15), 9)

        EXPECTED = {date(2009, 1, 1): 4, date(2010, 1, 1): 9}
        self.assertEqual(EXPECTED, Entry.cache.yearly_counts(created=rrule(YEARLY, dtstart=start, until=end)))
        self.assertEqual(EXPECTED, Entry.cache.yearly_counts(created=rrule(YEARLY, dtstart=start, until=end)))
Example #27
0
 def as_rrule(self):
     if self.frequency == WEEKDAYS:
         dw = []
         l = self.interval
         for i in range(7):
             if l & 1 == 1:
                 dw.append(weekdays[i])
             l >>= 1
         return rules.rrule(rules.DAILY, byweekday=dw, dtstart=self.start)
     else:
         return rules.rrule(frq_to_rrl[self.frequency], interval=self.interval, dtstart=self.start)
Example #28
0
def workday_count(alpha, omega):
    """
    Расчёт рабочих дней с учетов выходных.

    """
    dates = rrule.rruleset()  # create an rrule.rruleset instance
    dates.rrule(rrule.rrule(DAILY, dtstart=alpha, until=omega))  # this set is INCLUSIVE of alpha and omega
    dates.exrule(rrule.rrule(DAILY,
                             byweekday=(rrule.SA, rrule.SU),
                             dtstart=alpha))  # here's where we exclude the weekend dates
    return len(list(dates))  # there's probably a faster way to handle this
Example #29
0
def binned_density_map(
        point_table, grid_table, startdate, enddate, t_interval = 'WEEKLY', database='gdelt',
        user='******', create_pntcnt_table=True):
    if t_interval == 'DAILY':
        dates = rrule.rrule(rrule.DAILY, dtstart=startdate, until=enddate)
        pg_t_interval = 'DAY'
    elif t_interval == 'WEEKLY':
        dates = rrule.rrule(rrule.WEEKLY, dtstart=startdate, until=enddate)
        pg_t_interval = 'WEEK'
    elif t_interval == 'MONTHLY':
        dates = rrule.rrule(rrule.MONTHLY, dtstart=startdate, until=enddate)
        pg_t_interval = 'MONTH'
    elif t_interval == 'YEARLY':
        dates = rrule.rrule(rrule.YEARLY, dtstart=startdate, until=enddate)
        pg_t_interval = 'YEAR'
    else:
        print "t_interval must be DAILY, WEEKLY, MONTHLY, or YEARLY"
        sys.exit(1)

    t1 = datetime.datetime.now()

    ##initialize a copy of the grid table, to be later filled w/ point in polygon count for each month
    #the spatial column must be named geom and by of type geometry, rather than geography, as the PostGIS
    #function ST_Contains only works w/ the geometry type.
    #to create a grid table from a shpfile, use the following PostGIS command:
    #   shp2pgsql -s 4326 -g geom -I <file.shp> public.<grid_table> | psql -d <database>
    #I do not know how much error this introduces

    ##the code below uses parameterized queries, which allow for sql injection and are dangerous
    #if another user is allowed to set the parameters.  So long as this is only run locally, sql injection
    #shouldn't be an issue
    if create_pntcnt_table:
        connection = None
        try:
            print "connecting to postgres"
            connection = psycopg2.connect(database = database, user = user)
            cursor = connection.cursor()

            print "Copying grid table %s to new table %s_pntcnt" %(grid_table, point_table)
            cursor.execute("CREATE TABLE %(point_table)s_pntcnt AS SELECT * FROM %(grid_table)s" %{
                'point_table': point_table, 'grid_table': grid_table })

            connection.commit()
            print "commit successful: created table %s_pntcnt \n" %(point_table)

        except psycopg2.DatabaseError, e:
            if connection:
                connection.rollback()

            print 'commit failed - Error %s' % e + "\n"
            sys.exit(1)

        finally:
    def get_dates_from_setting(self, cr, uid, id, context=None):
        recurrence = self.browse(cr, uid, id, context=context)
        dates = []
        periodicity = recurrence.recur_periodicity
        date_start = fields.datetime.context_timestamp(cr, uid, datetime.strptime(recurrence.date_start, '%Y-%m-%d %H:%M:%S'),context=context)
        date_end = fields.datetime.context_timestamp(cr, uid, datetime.strptime(recurrence.date_end, '%Y-%m-%d %H:%M:%S'),context=context) if recurrence.date_end else False

        switch_date = {
            'monday':relativedelta.MO,
            'tuesday':relativedelta.TU,
            'wednesday':relativedelta.WE,
            'thursday':relativedelta.TH,
            'friday':relativedelta.FR,
            'saturday':relativedelta.SA,
            'sunday':relativedelta.SU
        }
        if not periodicity:
            periodicity = 1
        if recurrence.recur_type == 'daily':
            dates = rrule.rrule(rrule.DAILY, interval=periodicity, dtstart=date_start, until=date_end)
            #if date_end <> last_occurence, we add it to dates generated (means date_end > last_occurence)
        elif recurrence.recur_type == 'weekly':
            #get weekdays to generate
            weekdays = [val for key,val in switch_date.items() if recurrence['recur_week_'+key]]
            dates = rrule.rrule(rrule.WEEKLY, byweekday=weekdays, interval=periodicity, dtstart=date_start, until=date_end)
            #if date_end <> last_occurence, we add it to dates generated (means date_end > last_occurence)
        elif recurrence.recur_type == 'monthly':
            #get nb of occurences to generate
            count = recurrence.recur_occurrence_nb
            if not count:
                count = 1
            switch = {
                'first':1,
                'second':2,
                'third':3,
                'fourth':4,
                'last':-1
                }
            if recurrence.recur_month_relative_day and recurrence.recur_month_relative_weight:
                dates = rrule.rrule(rrule.MONTHLY, interval=periodicity, dtstart=date_start, count=count,
                                    byweekday=switch_date[recurrence.recur_month_relative_day](switch[recurrence.recur_month_relative_weight]))

            elif recurrence.recur_month_absolute:
                dates = rrule.rrule(rrule.MONTHLY, bymonthday=recurrence.recur_month_absolute, interval=periodicity, dtstart=date_start, count=count)
            else:
                raise osv.except_osv(_('Error'),_('You must provide a complete setting for once of monthly recurrence method'))
        else:
            raise osv.except_osv(_('Error'), _('You must set an existing type of recurrence'))
        ret = list(dates)
        #remove date_start values from list if generated too by recurrence
        if ret[0] == date_start:
            ret.pop(0)
        return ret
    def test_create_and_get_recurring_series(self):
        url = '/api/pickup-date-series/'
        recurrence = rrule.rrule(
            freq=rrule.WEEKLY,
            byweekday=[0, 1]  # Monday and Tuesday
        )
        start_date = self.group.timezone.localize(datetime.now().replace(
            hour=20, minute=0))
        pickup_series_data = {
            'max_collectors': 5,
            'store': self.store.id,
            'rule': str(recurrence),
            'start_date': start_date
        }
        start_date = start_date.replace(second=0, microsecond=0)
        self.client.force_login(user=self.member)
        response = self.client.post(url, pickup_series_data, format='json')
        self.assertEqual(response.status_code, status.HTTP_201_CREATED,
                         response.data)
        series_id = response.data['id']
        self.assertEqual(parse(response.data['start_date']), start_date)
        del response.data['id']
        del response.data['start_date']
        expected_series_data = {
            'max_collectors': 5,
            'store': self.store.id,
            'rule': str(recurrence),
            'description': ''
        }
        self.assertEqual(response.data, expected_series_data)

        response = self.client.get(url)
        self.assertEqual(response.status_code, status.HTTP_200_OK)
        for _ in response.data:
            self.assertEqual(parse(_['start_date']), start_date)
            del _['id']
            del _['start_date']
        self.assertEqual(response.data, [expected_series_data])

        response = self.client.get(url + str(series_id) + '/')
        self.assertEqual(response.status_code, status.HTTP_200_OK)
        self.assertEqual(parse(response.data['start_date']), start_date)
        del response.data['id']
        del response.data['start_date']
        self.assertEqual(response.data, expected_series_data)

        url = '/api/pickup-dates/'
        created_pickup_dates = []
        # do recurrence calculation in local time to avoid daylight saving time problems
        tz = self.group.timezone
        dates_list = recurrence.replace(dtstart=timezone.now(
        ).astimezone(tz).replace(
            hour=20, minute=0, second=0, microsecond=0, tzinfo=None)).between(
                timezone.now().astimezone(tz).replace(tzinfo=None),
                timezone.now().astimezone(tz).replace(tzinfo=None) +
                relativedelta(weeks=4))
        dates_list = [tz.localize(d) for d in dates_list]

        response = self.get_results(url)
        self.assertEqual(response.status_code, status.HTTP_200_OK)

        # verify date field
        for response_data_item, expected_date in zip(response.data,
                                                     dates_list):
            self.assertEqual(parse(response_data_item['date']), expected_date,
                             response_data_item['date'])

        # verify non-date fields, don't need parsing
        for _ in response.data:
            del _['id']
            del _['date']
        for _ in dates_list:
            created_pickup_dates.append({
                'max_collectors': 5,
                'series': series_id,
                'collector_ids': [],
                'store': self.store.id,
                'description': ''
            })
        self.assertEqual(response.data, created_pickup_dates)
from dateutil.rrule import rrule, MONTHLY
from pyspark.sql import Row
import pyspark.sql.functions as F
#Get the list of invoices and start_dates
end_date ='09/01/2018'
country_list = spark.sql("select country from (select  country, count(*) from RO_Detail_repair where country<> '' group by country order by count(*) desc limit 100)").collect()
country_list = [i[0] for i in country_list]
def reduce_country(country):
  if country in country_list:
    return country
  else: return 'other'
select_invoice = "select substring(Accountid,0,2) operation,  ShipToCountry country, to_date(CAST(UNIX_TIMESTAMP(concat(month(to_date(CAST(UNIX_TIMESTAMP(InvoiceDate, 'MM/dd/yy') AS TIMESTAMP ))),'"+'-'+"', year(to_date(CAST(UNIX_TIMESTAMP(InvoiceDate, 'MM/dd/yy') AS TIMESTAMP )))), 'MM-yyyy') AS TIMESTAMP ))  as month, PartNumber, sum(ShipQuantity) ShipQuantity from (select InvoiceRecordId,  PartNumber, sum(ShipQuantity) ShipQuantity from Invoice_Line_Item where  ShipQuantity >0  group by InvoiceRecordId,  PartNumber) A, Invoice_Header B where B.InvoiceRecordId = A.InvoiceRecordId group by operation,country, month, PartNumber"
invoice_df = spark.sql(select_invoice)
end_date =datetime.strptime(end_date,'%m/%d/%Y')

all_time_df = invoice_df.rdd.flatMap(lambda row: [(row[0],row[1],row[2], x, row[3],row[4]) for x in [dt.date() for dt in rrule(MONTHLY, dtstart=row[2], until=end_date)]]).toDF(["operation", "country", "Month", "Date", "PartNumber", "ShipQuantity"])


all_time_df.registerTempTable("all_time_products")
total_dataset_df = spark.sql("select A.*, (datediff(Month, Date)/365) as Age, B.REPLACE_PN, B.FAIL_QTY from all_time_products A left outer join (select replace_pn, fail_qty,RCVD_DT,  substring(Cust_id,0,2) operation, country, part, to_date(CAST(UNIX_TIMESTAMP(concat(month(to_date(CAST(UNIX_TIMESTAMP(Sales, 'MM/dd/yy') AS TIMESTAMP ))),'"+'-'+"', year(to_date(CAST(UNIX_TIMESTAMP(Sales, 'MM/dd/yy') AS TIMESTAMP )))), 'MM-yyyy') AS TIMESTAMP ))  as sales_month from RO_Detail_Repair where replace_pn <> '') B on A.PartNumber = B.Part and A.operation = B.operation and A.country = B.country and A.month = b.sales_month and month(A.Date) = month(to_date(CAST(UNIX_TIMESTAMP(RCVD_DT, 'MM/dd/yy') AS TIMESTAMP ))) and year(A.Date) = year(to_date(CAST(UNIX_TIMESTAMP(RCVD_DT, 'MM/dd/yy') AS TIMESTAMP )))")
total_dataset_df = total_dataset_df.rdd.map(lambda row: (row[0],reduce_country(row[1]),row[2],row[3],row[4],row[5],row[6],row[7],row[8])).toDF(["operation", "country", "Month", "Date", "PartNumber", "ShipQuantity", "Age", "replace_pn", "fail_qty"], sampleRatio =0.2)
total_dataset_df.write.mode("overwrite").saveAsTable("total_dataset_v3")

# COMMAND ----------



# COMMAND ----------

# MAGIC %sql select distinct country from total_dataset_v2 where fail_qty >0
Example #33
0
 def __init__(self, freq, **kwargs):
     self._construct = kwargs.copy()
     self._construct["freq"] = freq
     self._rrule = rrule(**self._construct)
def generate_sdm_series(site, first_day, last_day):

    return [
        SiteDailyMetricsFactory(site=site, date_for=dt)
        for dt in rrule(DAILY, dtstart=first_day, until=last_day)
    ]
import numpy as np
import pandas as pd
import datetime as dt
from dateutil import rrule
import metpy.calc as mcalc
from metpy.units import units

main = '/home/vkvalappil/Data/masdar_station_data/'
scripts = main + '/scripts/'
date = str(sys.argv[1])

date_1 = dt.datetime.strptime(date, '%Y%m%d') + dt.timedelta(hours=0)
date_2 = dt.datetime.strptime(date, '%Y%m%d') + dt.timedelta(hours=23)
date_list = [
    x.strftime('%Y-%m-%d %H:%M')
    for x in rrule.rrule(rrule.HOURLY, dtstart=date_1, until=date_2)
]

metar_file = main + '/wyoming/' + date[0:6] + '/AbuDhabi_surf_' + date[
    0:8] + '.csv'
outFile = main + '/wyoming/' + date[0:6] + '/AbuDhabi_surf_mr' + date[
    0:8] + '.csv'

metar_data = pd.read_csv(metar_file)

metar_data = metar_data[[
    'STN', 'TIME', 'ALTM', 'TMP', 'DEW', 'RH', 'DIR', 'SPD', 'VIS'
]]
#metar_data=metar_data.drop('Unnamed: 9',axis=1)
metar_data = metar_data.drop(metar_data.index[0])
metar_data['TIME'] = date_list
    def create_concept_invoice_line(self, concept, invoice_id, end_date):
        concept_product = concept.concept_id.product_id
        account_id = concept_product.product_tmpl_id.property_account_income_id.id
        if not account_id:
            account_id = concept_product.categ_id.property_account_income_categ_id.id

        # se rellena con la fecha de última factura o
        # la fecha de alta de la cuenta
        start_date = concept.last_invoice_date and \
            datetime.strptime(
             concept.last_invoice_date + " 00:00:00", "%Y-%m-%d %H:%M:%S") + \
            relativedelta(days=+1) or datetime.strptime(
             self.date_start + " 00:00:00", "%Y-%m-%d %H:%M:%S")
        # fecha en la que se está facturando
        end_date = datetime.strptime(
            end_date + " 23:59:59", "%Y-%m-%d %H:%M:%S")
        # fecha de baja de la cuenta analítica o fecha de facturación
        end_date = (
            self.date and datetime.strptime(
              self.date + " 23:59:59", "%Y-%m-%d %H:%M:%S") < end_date) and \
            datetime.strptime(self.date + " 23:59:59", "%Y-%m-%d %H:%M:%S") \
            or end_date

        except_months = concept._get_except_months()[concept.id]
        if end_date.month in except_months:
            return False

        rset = rruleset()
        if except_months:
            rset.exrule(rrule(DAILY,  dtstart=start_date, until=end_date,
                              bymonth=except_months))
        rset.rrule(rrule(DAILY, dtstart=start_date, until=end_date))
        months = list(set([(x.year, x.month) for x in list(rset)]))
        amount = 0.0
        if concept.freq == 'q':
            days = 90
        else:
            days = 30
        duration = 0
        for month in months:
            days_in_month = calendar.monthrange(month[0], month[1])[1]
            first_month_day = datetime.strptime(
                str(month[0]) + "-" + str(month[1]).zfill(2) +
                "-01", "%Y-%m-%d")
            last_month_day = datetime.strptime(
                str(month[0]) + "-" + str(month[1]).zfill(2) +
                "-" + str(days_in_month), "%Y-%m-%d")
            rset_month = rset.between(first_month_day, last_month_day,
                                      inc=True)
            month_days = len(list(rset_month))
            if month_days == days_in_month:
                duration += 30
            else:
                duration += month_days
        amount += (duration * concept.amount) / days

        if self.date and datetime.strptime(
                self.date + " 23:59:59", "%Y-%m-%d %H:%M:%S") <= end_date:
            self.close_analytic()

        if not amount and concept.amount:
            return False
        invoice_line = self.env['account.invoice.line'].create({
                'name': self._process_concept_name(concept, end_date),
                'origin': self.name,
                'invoice_id': invoice_id.id,
                'uom_id': concept_product.uom_id and
                concept_product.uom_id.id or False,
                'product_id': concept_product.id,
                'account_id':
                self.partner_id.property_account_position_id.map_account(
                    account_id),
                'price_unit': amount,
                'discount': 0.0,
                'quantity': 1.0,
                'invoice_line_tax_ids': [
                    (6, 0,
                     self.partner_id.property_account_position_id.map_tax(
                        concept_product.taxes_id).ids)],
                'account_analytic_id': self.id,
        })

        res = self._invoice_line_hook(concept, invoice_line, end_date)
        return res and invoice_line
Example #37
0
def get_institutional_deliveries_data_chart(domain,
                                            config,
                                            loc_level,
                                            show_test=False):
    month = datetime(*config['month'])
    three_before = datetime(*config['month']) - relativedelta(months=3)

    config['month__range'] = (three_before, month)
    del config['month']

    chart_data = AggCcsRecordMonthly.objects.filter(**config).values(
        'month', '%s_name' % loc_level).annotate(
            in_month=Sum('institutional_delivery_in_month'),
            eligible=Sum('delivered_in_month'),
        ).order_by('month')

    if not show_test:
        chart_data = apply_exclude(domain, chart_data)

    data = {
        'blue': OrderedDict(),
    }

    dates = [dt for dt in rrule(MONTHLY, dtstart=three_before, until=month)]

    for date in dates:
        miliseconds = int(date.strftime("%s")) * 1000
        data['blue'][miliseconds] = {'y': 0, 'all': 0, 'in_month': 0}

    best_worst = defaultdict(lambda: {'in_month': 0, 'all': 0})
    for row in chart_data:
        date = row['month']
        in_month = row['in_month']
        location = row['%s_name' % loc_level]
        valid = row['eligible']

        best_worst[location]['in_month'] = in_month
        best_worst[location]['all'] = (valid or 0)

        date_in_miliseconds = int(date.strftime("%s")) * 1000
        data_for_month = data['blue'][date_in_miliseconds]

        data_for_month['all'] += valid
        data_for_month['in_month'] += in_month
        data_for_month['y'] = data_for_month['in_month'] / float(
            data_for_month['all'] or 1)

    top_locations = sorted([
        dict(loc_name=key,
             percent=(value['in_month'] * 100) / float(value['all'] or 1))
        for key, value in six.iteritems(best_worst)
    ],
                           key=lambda x: x['percent'],
                           reverse=True)

    return {
        "chart_data": [{
            "values": [{
                'x': key,
                'y': value['y'],
                'all': value['all'],
                'in_month': value['in_month']
            } for key, value in six.iteritems(data['blue'])],
            "key":
            "% Institutional deliveries",
            "strokeWidth":
            2,
            "classed":
            "dashed",
            "color":
            ChartColors.BLUE
        }],
        "all_locations":
        top_locations,
        "top_five":
        top_locations[:5],
        "bottom_five":
        top_locations[-5:],
        "location_type":
        loc_level.title()
        if loc_level != LocationTypes.SUPERVISOR else 'Sector'
    }
Example #38
0
 url_list = list()
 for index, rows in car_msg_df_new.iterrows():
     car = rows
     # print("*"*100)
     # if int(car["salesdescid"]) in sid_list:
     if car["salesdescid"] in sid_list and car["salesdescid"] not in na_sid_list:
         # url_list = list()
         data_list = list()
         max_reg_year = int(car["max_reg_year"])
         max_reg_year_new = year_now if max_reg_year + 2 > year_now else max_reg_year + 2
         for year in range(int(car["min_reg_year"]), max_reg_year_new + 1):
             month_list = [1, 12] if month_now == 12 else [1, month_now + 1] if year == year_now else [1, 12]
             for month in month_list:
                 registerDate = str(year) + "-" + str(month)
                 end_date = datetime.strptime(registerDate, '%Y-%m')
                 car_age = rrule.rrule(rrule.MONTHLY, dtstart=end_date, until=start_date).count()
                 mile = "0.1" if car_age == 0 else str(round((2 / 12 * car_age), 2))
                 mile = mile.replace('.0', '')
                 for city, prov in city_dic.items():
                     meta = {
                         "brand": car["brandid"],
                         "series": car["familyid"],
                         "model": car["salesdescid"],
                         "registerDate": registerDate,
                         "city": city,
                         "prov": prov,
                         "mile": mile
                     }
                     shuffle(partnerId)
                     s = f"brand={meta['brand']}&city={meta['city']}&mileAge={mile}&model={meta['model']}&prov={meta['prov']}&registerDate={registerDate}&series={meta['series']}njB6TTeQvTnGN4To"
                     md = get_md5_value(s)
Example #39
0
    def add_occurrences(self, start_time, end_time, **rrule_params):
        '''
    Add one or more occurences to the event using a comparable API to
    ``dateutil.rrule``.

    If ``rrule_params`` does not contain a ``freq``, one will be defaulted
    to ``rrule.DAILY``.

    Because ``rrule.rrule`` returns an iterator that can essentially be
    unbounded, we need to slightly alter the expected behavior here in order
    to enforce a finite number of occurrence creation.

    If both ``count`` and ``until`` entries are missing from ``rrule_params``,
    only a single ``Occurrence`` instance will be created using the exact
    ``start_time`` and ``end_time`` values.

    Returns a queryset of occurrences created or matched.
    '''
        for key in rrule_params.keys():
            rrule_value = rrule_params[key]
            if type(rrule_value) == list:
                # because for whatever reason, the value could be a list
                rrule_value = rrule_value[0]
            try:
                rrule_value = getattr(rrule, rrule_value.upper(), rrule_value)
            except:
                pass

            if key.lower() != 'byday':
                rrule_params[key.lower()] = rrule_value
                if key.lower() != key:
                    del rrule_params[key]
            else:
                # some weekday values include numbers which aren't parsed by our
                # handy library by default, so we parse them out here
                if isinstance(rrule_value, str):
                    day_function = rrule_days[rrule_value[-2:]]
                    if len(rrule_value) > 2:
                        value = int(rrule_value[:-2])
                        rrule_value = day_function(value)
                    else:
                        rrule_value = day_function()
                rrule_params['byweekday'] = rrule_value
                del rrule_params[key]

        tz = timezone.get_default_timezone()
        occurrence_pks = []
        if 'count' not in rrule_params and 'until' not in rrule_params:
            occurrence, created = self.occurrence_set.get_or_create(
                start_time=start_time, end_time=end_time)
            occurrence_pks += [occurrence.pk]
        else:
            delta = end_time - start_time
            for ev in rrule.rrule(dtstart=start_time, **rrule_params):
                # recalculate and localize the date, because we may cross a DST boundary
                # apparently, rrule doesn't really care about DST
                newtime = datetime(ev.year, ev.month, ev.day, ev.hour,
                                   ev.minute, ev.second)
                newtime = tz.localize(newtime).astimezone(pytz.UTC)
                try:
                    occurrence, created = self.occurrence_set.get_or_create(
                        start_time=newtime, end_time=newtime + delta)
                except:
                    occurrence, created = self.occurrence_set.get_or_create(
                        start_time=ev, end_time=ev + delta)
                occurrence_pks += [occurrence.pk]

        return occurrence_pks
Example #40
0
 def service_length(self):
     if self.entrydate:
         return rrule.rrule(rrule.YEARLY,
                            dtstart=self.entrydate,
                            until=datetime.now()).count()
     return ""
Example #41
0
def index(request):
    if request.method == 'POST':
        logger.debug('got POST request from {}'.format(get_client_ip(request)))
        city = request.POST.get('town', '')
        start_date_str = request.POST.get('start_date', '')
        end_date_str = request.POST.get('end_date', '')
        logger.debug('city: {}, start date: {}, end date: {}'.format(
            city, start_date_str, end_date_str))

        msg = ''

        if city and start_date_str and end_date_str:
            city_code = city.split('|')[0]
            city_name = city.split('|')[1].split()[-1]
            get_dates_re = re.compile(r'\d+')
            start_dates = get_dates_re.findall(start_date_str)
            end_dates = get_dates_re.findall(end_date_str)

            start_date = validate_start_date(start_dates)
            end_date = validate_end_date(end_dates)
            logger.debug('parsed dates: {} - {}'.format(start_date, end_date))

            if start_date > end_date:
                start_date, end_date = end_date, start_date

            delta_m = relativedelta.relativedelta(end_date, start_date)
            logger.debug('{} month(s) date difference'.format(delta_m.months))

            weather_lst = []

            for dt in rrule.rrule(rrule.MONTHLY,
                                  dtstart=start_date,
                                  until=end_date):
                year = dt.year
                month = dt.month

                date_str = str(year) + '-' + str(month)
                weather_query = WeatherByMonth.objects.filter(
                    month=date_str, city_code=city_code)

                if weather_query.exists():
                    logger.debug(
                        'weather data for {} of {} found in database'.format(
                            city_name, date_str))
                    weather_str = weather_query.first().weather_str
                else:
                    logger.debug(
                        'weather data for {} of {} not in database'.format(
                            city_name, date_str))
                    weather_str = fetch_html(city=city_code,
                                             year=year,
                                             month=month)
                    logger.debug(
                        'got http response javascript, saving to database')
                    new_weather, created = WeatherByMonth.objects.get_or_create(
                        city_code=city_code, month=date_str)
                    new_weather.city_name = city_name
                    new_weather.weather_str = weather_str
                    new_weather.save()

                month_weather_data = parse_weather(weather_str)
                logger.debug('parsing data from string {}'.format(
                    'succeeded' if month_weather_data else 'failed'))
                while not month_weather_data:
                    logger.debug('retrying downloading html')
                    weather_str = fetch_html(city=city_code,
                                             year=year,
                                             month=month)
                    logger.debug(
                        'got http response javascript, saving to database')
                    new_weather, created = WeatherByMonth.objects.get_or_create(
                        city_code=city_code, month=date_str)
                    new_weather.city_name = city_name
                    new_weather.weather_str = weather_str
                    new_weather.save()

                    month_weather_data = parse_weather(weather_str)
                    logger.debug('parsing data from string {}'.format(
                        'succeeded' if month_weather_data else 'failed'))

                weather_lst += month_weather_data

            weather_data = [[
                '日期', '最高溫度', '最低溫度', '天氣', '風向', '風力', 'AQI', 'AQI水平'
            ]]
            header_names = [
                'ymd', 'bWendu', 'yWendu', 'tianqi', 'fengxiang', 'fengli',
                'aqi', 'aqiInfo'
            ]
            for weather in weather_lst:
                try:
                    d = datetime.strptime(weather.get(header_names[0], ''),
                                          '%Y-%m-%d')
                    logger.debug('got date from weather dict: {}'.format(d))
                    if start_date <= d <= end_date:
                        weather_data.append(dict2list(weather, header_names))
                except Exception as e:
                    logger.debug(
                        'failed to get date from weather dict, {}'.format(e))
                    pass
            logger.debug('got data for {} days'.format(len(weather_data)))

            filename = quote('{}{}-{}天氣'.format(city_name, start_date_str,
                                                end_date_str))
            logger.debug('generating excel file {}'.format(filename))

            return ExcelResponse(weather_data, output_filename=filename)
        else:
            msg = '信息不全'

        context = {
            'message': msg,
        }

        return render(request, 'index.html', context=context)
    else:
        return render(request, 'index.html')
Example #42
0
 def set(self, **kwargs):
     self._construct.update(kwargs)
     self._rrule = rrule(**self._construct)
Example #43
0
    def add_occurrences(self, **kwargs):
        '''
        Add one or more occurences to the event using a comparable API to 
        ``dateutil.rrule``. 
        
        If ``rrule_params`` does not contain a ``freq``, one will be defaulted
        to ``rrule.DAILY``.
        
        Because ``rrule.rrule`` returns an iterator that can essentially be
        unbounded, we need to slightly alter the expected behavior here in order
        to enforce a finite number of occurrence creation.
        
        If both ``count`` and ``until`` entries are missing from ``rrule_params``,
        only a single ``Occurrence`` instance will be created using the exact
        ``start_time`` and ``end_time`` values.
        '''

        # freq
        # dtstart
        # interval
        # wkst
        # count
        # until
        # bysetpos
        # bymonth
        # bymonthday
        # byyearday
        # byweekno
        # byweekday
        # byeaster

        freq = int(kwargs.pop('freq'))
        all_day = kwargs.pop('all_day', False)
        duration = kwargs.pop('duration')
        start_time = kwargs.pop('start_time')
        end_time = start_time + duration
        if all_day == True:
            #duration = Occurrence.set_all_day_times(start_time)
            duration = timedelta(days=1)

        #if (kwargs.get('count') or kwargs.get('until')):
        #    kwargs.setdefault('freq', rrule.DAILY)
        #    #occurrences = []
        #    #for ev in rrule.rrule(**kwargs):
        #    #    occurrences.append(Occurrence(event=self, start_time=ev, duration=duration, notes=None, all_day=all_day))
        #    #self.occurrence_set.bulk_create(occurrences)
        if kwargs.get('until', None) == None:
            year = timedelta(365)
            if freq == rrule.YEARLY:
                kwargs['until'] = kwargs[
                    'dtstart'] + 25 * year  # Add 25 years in occurrence table
            elif freq == rrule.MONTHLY:
                kwargs['until'] = kwargs[
                    'dtstart'] + 25 * year  # Add 25 years in occurrence table
            elif freq == rrule.WEEKLY:
                kwargs['until'] = kwargs[
                    'dtstart'] + 15 * year  # Add 25 years in occurrence table
            elif freq == rrule.DAILY:
                kwargs['until'] = kwargs[
                    'dtstart'] + 10 * year  # Add 25 years in occurrence table
            #self.calculatedoccurrence_set.create(event=self, start_time=start_time, end_time=end_time, **kwargs)

        occurrences = [
            Occurrence(event=self,
                       start_time=occurrence,
                       end_time=end_time,
                       notes=None,
                       all_day=all_day,
                       multi_day=False)
            for occurrence in rrule.rrule(freq, **kwargs)
        ]
        self.occurrence_set.bulk_create(occurrences)
Example #44
0
def api(request):
    # INNER JOIN:
    # 'SELECT name FROM tracker_ConfirmedCase'\
    # 'INNER JOIN tracker_State'\
    # 'ON tracker_State.id = tracker_ConfirmedCase.state_id'
    cases = []
    confirmed_by_age = {}
    healed_by_age = {}
    suspected_by_age = {}

    for c in ConfirmedCase.objects.select_related('state_id'):
        cases.append({
            'sex': c.sex,
            'age': c.age,
            'healed': c.healed,
            'state_id': c.state_id.id,
            'state_name': c.state_id.name,
            'state_latitude': c.state_id.latitude,
            'state_longitude': c.state_id.longitude,
            'status': 'healed' if c.healed else 'confirmed'
        })

    for c in SuspectedCase.objects.select_related('state_id'):
        cases.append({
            'sex': c.sex,
            'age': c.age,
            'state_id': c.state_id.id,
            'state_name': c.state_id.name,
            'state_latitude': c.state_id.latitude,
            'state_longitude': c.state_id.longitude,
            'status': 'suspected'
        })

    age_step = 5
    for i in range(0, 90, age_step):
        confirmed_by_age[
            f'{i} - {i + age_step}'] = ConfirmedCase.objects.filter(
                healed=False, age__gte=i, age__lt=i + age_step).count()

        healed_by_age[f'{i} - {i + age_step}'] = ConfirmedCase.objects.filter(
            healed=True, age__gte=i, age__lt=i + age_step).count()

        suspected_by_age[
            f'{i} - {i + age_step}'] = SuspectedCase.objects.filter(
                age__gte=i, age__lt=i + age_step).count()

    # https://github.com/jesusmartinoza/COVID-19-MX/issues/4
    weeks = list(
        rrule(WEEKLY,
              dtstart=datetime.date(2020, 2, 19),
              until=datetime.datetime.now()))
    cases_by_date = list()
    for i in range(1, len(weeks)):
        cc_trends, sc_trends, hc_trends = 0, 0, 0
        date = None
        for dt in rrule(DAILY, dtstart=weeks[i - 1], until=weeks[i]):
            date = dt
            cc_trends += ConfirmedCase.objects.filter(symptoms_date=dt).count()
            hc_trends = ConfirmedCase.objects.filter(symptoms_date=dt,
                                                     healed=True).count(),
            sc_trends += SuspectedCase.objects.filter(symptoms_date=dt).count()

        cases_by_date.append({
            'date': date,
            'cases_confirmed': cc_trends,
            'cases_healed': hc_trends,
            'cases_suspected': sc_trends
        })

    cc_trends, sc_trends, hc_trends = 0, 0, 0
    for dt in rrule(DAILY, dtstart=weeks[-1], until=datetime.datetime.now()):
        date = dt
        cc_trends += ConfirmedCase.objects.filter(symptoms_date=dt).count()
        sc_trends += SuspectedCase.objects.filter(symptoms_date=dt).count()

    cases_by_date.append({
        'date': date,
        'cases_confirmed': cc_trends,
        'cases_healed': hc_trends,
        'cases_suspected': sc_trends
    })
    country = Country.objects.get(name='Mexico')
    context = {
        'total_confirmed': ConfirmedCase.objects.filter(healed=False).count(),
        'total_healed': DailyReport.objects.get(country=country).recovered,
        'total_suspected': SuspectedCase.objects.all().count(),
        'cases': sorted(cases, key=lambda c: c['state_name']),
        'confirmed_by_age': confirmed_by_age,
        'healed_by_age': healed_by_age,
        'suspected_by_age': suspected_by_age,
        'cases_by_date': cases_by_date
    }

    return JsonResponse(context, safe=False)
Example #45
0
#!/usr/bin/python

import sys ;import os ; import numpy as np ; import pandas as pd ; import datetime as dt ; from dateutil import tz, rrule ; import pytz
import matplotlib.pyplot as plt; from pylab import savefig 

import tensorflow as tf  
from sklearn.metrics import explained_variance_score, mean_absolute_error,  median_absolute_error ,mean_squared_error
from sklearn.model_selection import train_test_split  


main='/home/vkvalappil/Data/oppModel' ; output=main+'/output/output/stat/' ; inp=output=main+'/output/output/'
date='2018050106'

date_1=dt.datetime.strptime(date,'%Y%m%d%H')+dt.timedelta(days=0)
date_2=dt.datetime.strptime(date,'%Y%m%d%H')+dt.timedelta(days=25)
date_list=[x.strftime('%Y%m%d%H') for x in rrule.rrule(rrule.DAILY,dtstart=date_1,until=date_2)]

bias_day1 = [] ; rmse_day1 = [] ; bias_day2 = [] ; rmse_day2 = [] ; bias_hour_day1=[] ; bias_hour_day2=[]
mod_hour_day1=[] ; obs_hour_day1=[] ; mod_hour_day2=[] ; obs_hour_day2=[] ;

for dte in date_list[:]:
    
    file_2=inp+'domain_2/surfaceLevel/hourly'+dte+'.csv'
    if (os.path.isfile(file_2)):

        mod_dom_2=pd.read_csv(file_2) ; mod_dom_2=mod_dom_2.iloc[72:144,:] ; 

        o_date_1=dte ; 
        o_date_2=(dt.datetime.strptime(dte,'%Y%m%d%H')+dt.timedelta(days=1)).strftime('%Y%m%d%H')
        o_date_3=(dt.datetime.strptime(dte,'%Y%m%d%H')+dt.timedelta(days=2)).strftime('%Y%m%d%H')
        o_date_4=(dt.datetime.strptime(dte,'%Y%m%d%H')+dt.timedelta(days=3)).strftime('%Y%m%d%H')
def get_early_initiation_breastfeeding_chart(domain,
                                             config,
                                             loc_level,
                                             show_test=False):
    month = datetime(*config['month'])
    three_before = datetime(*config['month']) - relativedelta(months=3)

    config['month__range'] = (three_before, month)
    del config['month']

    chart_data = AggChildHealthMonthly.objects.filter(**config).values(
        'month', '%s_name' % loc_level).annotate(
            birth=Sum('bf_at_birth'),
            in_month=Sum('born_in_month'),
        ).order_by('month')

    if not show_test:
        chart_data = apply_exclude(domain, chart_data)

    data = {'blue': OrderedDict()}

    dates = [dt for dt in rrule(MONTHLY, dtstart=three_before, until=month)]

    for date in dates:
        miliseconds = int(date.strftime("%s")) * 1000
        data['blue'][miliseconds] = {'y': 0, 'all': 0, 'birth': 0}

    best_worst = {}
    for row in chart_data:
        date = row['month']
        in_month = row['in_month']
        location = row['%s_name' % loc_level]
        birth = row['birth']

        best_worst[location] = (birth or 0) * 100 / float(in_month or 1)

        date_in_miliseconds = int(date.strftime("%s")) * 1000
        data_for_month = data['blue'][date_in_miliseconds]

        data_for_month['all'] += in_month
        data_for_month['birth'] += birth
        data_for_month['y'] = data_for_month['birth'] / float(
            data_for_month['all'] or 1)

    top_locations = sorted([
        dict(loc_name=key, percent=val) for key, val in best_worst.iteritems()
    ],
                           key=lambda x: x['percent'],
                           reverse=True)

    return {
        "chart_data": [{
            "values": [{
                'x': key,
                'y': val['y'],
                'all': val['all'],
                'birth': val['birth']
            } for key, val in data['blue'].iteritems()],
            "key":
            "% Early Initiation of Breastfeeding",
            "strokeWidth":
            2,
            "classed":
            "dashed",
            "color":
            BLUE
        }],
        "all_locations":
        top_locations,
        "top_five":
        top_locations[:5],
        "bottom_five":
        top_locations[-5:],
        "location_type":
        loc_level.title()
        if loc_level != LocationTypes.SUPERVISOR else 'Sector'
    }
    def __init__(self,
                 worker_list=["W1", "W2", "W3", "W4"],
                 shift=['morning', 'evening', 'night'],
                 emp_req=10,
                 timeline=14,
                 start_date=None,
                 end_date=None,
                 cmb_=None):
        if start_date == None:
            self.days = []
        else:
            from dateutil import rrule, parser
            date1 = start_date
            date2 = end_date

            dates = list(
                rrule.rrule(rrule.DAILY,
                            dtstart=parser.parse(date1),
                            until=parser.parse(date2)))
            holiday_dates = []
            for dd in dates:
                if dd.weekday() == 4:
                    holiday_dates.append(dd)

            dates = [
                str(dt).split()[0] for dt in dates if dt not in holiday_dates
            ]
            holiday_dates = [str(dt).split()[0] for dt in holiday_dates]
            print(dates)
            print("HL : ", holiday_dates)
            print("No of days : {}".format(len(dates)))
            print("Total : {}".format(emp_req))
            self.days = dates
        self.shifts = shift
        self.days_shifts = {day: self.shifts for day in self.days}
        self.workers = worker_list[:int(emp_req)]
        print(self.workers)
        self.model = ConcreteModel()
        # binary variables representing if a worker is scheduled somewhere
        self.model.works = Var(((worker, day, shift) for worker in self.workers
                                for day in self.days
                                for shift in self.days_shifts[day]),
                               within=Binary,
                               initialize=0)

        # binary variables representing if a worker is necessary
        self.model.needed = Var(self.workers, within=Binary, initialize=0)

        # binary variables representing if a worker worked on sunday but not on saturday (avoid if possible)
        self.model.no_pref = Var(self.workers, within=Binary, initialize=0)
        print("[INFO] Model Binaries --------------")
        # add objective function to the model. rule (pass function) or expr (pass expression directly)
        self.model.obj = Objective(rule=self.obj_rule, sense=minimize)

        self.model.constraints = ConstraintList(
        )  # Create a set of constraints
        print("[INFO] Model Constraint --------------")
        # Constraint: all shifts are assigned
        if cmb_ == None:
            if (emp_req // 2) > 4:
                for day in self.days:
                    for shift in self.days_shifts[day]:
                        if day in self.days[:] and shift in ['morning']:
                            # weekdays' and Saturdays' day shifts have exactly two workers
                            self.model.constraints.add(  # to add a constraint to model.constraints set
                                (emp_req // 3) == sum(
                                    self.model.works[worker, day, shift]
                                    for worker in
                                    self.workers)  #--- dynamic ratio -hambira
                            )
                        elif day in self.days[:] and shift in ['evening']:
                            # weekdays' and Saturdays' day shifts have exactly two workers
                            self.model.constraints.add(  # to add a constraint to model.constraints set
                                (emp_req // 3) == sum(
                                    self.model.works[worker, day, shift]
                                    for worker in
                                    self.workers)  #--- dynamic ratio -hambira
                            )
                        else:
                            # Sundays' and nights' shifts have exactly one worker
                            self.model.constraints.add(
                                (emp_req // 3) -
                                1 == sum(self.model.works[worker, day, shift]
                                         for worker in self.workers
                                         )  #--- dynamic ratio -hambira
                            )

            # elif (emp_req//2) >=2 and (emp_req//2) <= 4:
            #     for day in self.days:
            #         for shift in self.days_shifts[day]:
            #             if day in self.days[:-1] and shift in ['morning']:
            #                 # weekdays' and Saturdays' day shifts have exactly two workers
            #                 self.model.constraints.add(  # to add a constraint to model.constraints set
            #                     (emp_req//2)-1 == sum(self.model.works[worker, day, shift] for worker in self.workers) #--- dynamic ratio -hambira
            #                 )
            #             else:
            #                 # Sundays' and nights' shifts have exactly one worker
            #                 self.model.constraints.add(
            #                     (emp_req//2)-1 == sum(self.model.works[worker, day, shift] for worker in self.workers) #--- dynamic ratio -hambira
            #                 )

            # else:
            #     for day in self.days:
            #         for shift in self.days_shifts[day]:
            #             if day in self.days[:-1] and shift in ['morning']:
            #                 # weekdays' and Saturdays' day shifts have exactly two workers
            #                 self.model.constraints.add(  # to add a constraint to model.constraints set
            #                     emp_req-1 == sum(self.model.works[worker, day, shift] for worker in self.workers) #--- dynamic ratio -hambira
            #                 )
            #             else:
            #                 # Sundays' and nights' shifts have exactly one worker
            #                 self.model.constraints.add(
            #                     1 == sum(self.model.works[worker, day, shift] for worker in self.workers) #--- dynamic ratio -hambira
            #                 )
        else:
            ms, es, ns = int(cmb_[0]), int(cmb_[1]), int(cmb_[2])
            print("---->>>>> {} {} {}".format(ms, es, ns))
            print("---->>>>> {} {} {}".format(type(ms), es, ns))
            #---------------------------
            for day in self.days:
                for shift in self.days_shifts[day]:
                    if day in self.days[:] and shift in ['morning']:
                        # weekdays' and Saturdays' day shifts have exactly two workers
                        self.model.constraints.add(  # to add a constraint to model.constraints set
                            int(ms) == sum(self.model.works[worker, day, shift]
                                           for worker in self.workers[:5]))
                    elif day in self.days[:] and shift in ['evening']:
                        # weekdays' and Saturdays' day shifts have exactly two workers
                        self.model.constraints.add(  # to add a constraint to model.constraints set
                            int(es) == sum(
                                self.model.works[worker, day, shift]
                                for worker in
                                self.workers[5:8])  #--- dynamic ratio -hambira
                        )
                    else:
                        # Sundays' and nights' shifts have exactly one worker
                        self.model.constraints.add(
                            int(ns) == sum(self.model.works[worker, day,
                                                            "night"]
                                           for worker in self.workers[8:]
                                           )  #--- dynamic ratio -hambira
                        )

        # Constraint: no more than 40 hours worked
        if len(dates) > 7 and emp_req > 7:
            print("-------------->>>>>> Condiion 1")
            for worker in self.workers:
                self.model.constraints.add((len(dates) * 8) >= sum(
                    8 * self.model.works[worker, day, shift]
                    for day in self.days for shift in self.days_shifts[day]))
        else:
            print("-------------->>>>>>  Condiion 3")

            for worker in self.workers:
                self.model.constraints.add((len(dates) * 8) >= sum(
                    8 * self.model.works[worker, day, shift]
                    for day in self.days for shift in self.days_shifts[day]))
        # elif len(dates) > 7 and emp_req < 7:
        #     print("-------------->>>> Start- date ------- ")
        #     print("-------------->>>>>> Condiion 2")

        #     for worker in self.workers:
        #         self.model.constraints.add(
        #             88 >= sum(8 * self.model.works[worker, day, shift] for day in self.days for shift in self.days_shifts[day])
        #         )
        # else:
        #     print("-------------->>>>>>  Condiion 3")

        #     for worker in self.workers:
        #         self.model.constraints.add(
        #             (len(dates)*8) >= sum(8 * self.model.works[worker, day, shift] for day in self.days for shift in self.days_shifts[day])
        #         )

        # Constraint: rest between two shifts is of 12 hours (i.e., at least two shifts)
        for worker in self.workers:
            for j in range(len(self.days)):
                try:
                    # if working in morning, cannot work again that day
                    # self.model.constraints.add(
                    #     1 >= sum(self.model.works[worker, self.days[j], shift] for shift in self.days_shifts[self.days[j]]) #+ self.model.works[worker, self.days[(j + 1) % len(self.days)], 'morning']
                    # )
                    # if working in morning, cannot work again that day
                    self.model.constraints.add(
                        1 >= sum(self.model.works[worker, self.days[j], shift]
                                 for shift in ['morning', 'night']) +
                        self.model.works[worker, self.days[
                            (j + 1) % 7], 'evening'])
                    # if working in evening, until next evening (note that after sunday comes next monday)
                    self.model.constraints.add(
                        1 >= sum(self.model.works[worker, self.days[j], shift]
                                 for shift in ['evening', 'night']) +
                        self.model.works[worker, self.days[
                            (j + 1) % 7], 'morning'])
                    # # if working in night, until next night
                    self.model.constraints.add(
                        1 >= self.model.works[worker, self.days[j], 'night'] +
                        sum(self.model.works[worker, self.days[
                            (j + 1) % 7], shift]
                            for shift in ['morning', 'evening']))
                except Exception as e:
                    print("Exception : {}".format(e))
                # # if working in evening, until next evening (note that after Sunday comes next monday)
                # self.model.constraints.add(
                #     1 >= sum(self.model.works[worker, self.days[j], shift] for shift in ['evening','night']) +
                #     self.model.works[worker, self.days[(j + 1) % len(dates)], 'morning']
                # )
                # # if working in night, until next night
                # self.model.constraints.add(
                #     1 >= self.model.works[worker, self.days[j], 'night'] +
                #     sum(self.model.works[worker, self.days[(j + 1) % 7], shift] for shift in ['morning','evening'])
                # )

        # Constraint (def of model.needed)
        for worker in self.workers:
            self.model.constraints.add(
                10000 * self.model.needed[worker] >= sum(
                    self.model.works[worker, day, shift] for day in self.days
                    for shift in self.days_shifts[day])
            )  # if any model.works[worker, ·, ·] non-zero, model.needed[worker] must be one; else is zero to reduce the obj function
Example #48
0
def metrics_per_url(url):
    """
    This fully trusts that an url rating contains all manipulations of the url and all related endpoints.
    So if an endpoint was in a previous rating, but not in this one, the endpoint died (or there where no relevant
    metrics for it to store it in the url rating).

    todo: dead endpoints are not removed from url_ratings. mail.ameland.nl is dead at some point, but still stored
    as a rating. :( Or a new rating is not written somehow.

    The best choice is to have the smallest granularity for ratings: these are ratings on an endpoint per day.

    Url ratings are stored using deduplication. This saves several gigabytes of data.
    Url ratings on the test dataset = 22013 items. The average url rating is about 3 kilobyte. = 64 megabyte.
    Without deduplication it would be urls * days * 3kb. = 7000 * 400 * 3kb = 8 gigabyte.

    Thus this function applies duplication to "fill" periods between different urlratings. That is the way grafana
    want's to see the data. You can't do a fill in influx, as it doesn't know when a url/endpoint stops existing etc.
    """

    url_ratings = UrlRating.objects.all().filter(url=url).order_by(
        'when')  # earliest first (asc)

    if not url_ratings:
        return []

    # duplicate the url_ratings
    earliest_rating_date = url_ratings[0].when
    now = datetime.datetime.now(pytz.utc)
    metrics = []
    yesterdays_metrics = []
    yesterdays_relevant_rating = None

    for dt in rrule.rrule(rrule.DAILY, dtstart=earliest_rating_date,
                          until=now):
        todays_metrics = []

        # prevent insertion of duplicate points on different times. Also for the metric of today.
        dt = dt.replace(hour=23,
                        minute=59,
                        second=59,
                        microsecond=999999,
                        tzinfo=pytz.utc)

        # get the rating that is earlier than the next rating
        relevant_rating = todays_relevant_url_rating(dt, url_ratings)

        # if the relevant rating today is the same as yesterday, then we can simply copy yesterdays ratings
        # and change the date. That save a few database hits, making it 4x as fast on the test dataset.
        if relevant_rating == yesterdays_relevant_rating:
            # update yesterdays metrics: to todays date.
            for metric in yesterdays_metrics:
                metric_copy = metric.copy()
                metric_copy["time"] = dt
                todays_metrics.append(metric_copy)
        else:
            yesterdays_relevant_rating = relevant_rating

            if 'endpoints' not in relevant_rating.calculation.keys():
                logger.debug(
                    "No endpoints in this calculation. Url died, became non-resolvable or it's endpoints died."
                    "No metrics needed anymore for this one.")
                return []

            for endpoint in relevant_rating.calculation['endpoints']:
                for rating in endpoint['ratings']:
                    for organization in relevant_rating.url.organization.all():

                        if 'low' not in rating.keys():
                            # When an url still is resolvable, but all endpoints themselves don't exist anymore.
                            # logger.info("No (low) rating in this endpoint. Is it a repeated finding? Those should "
                            #             "have been all gone by now. What went wrong? %s" % endpoint)
                            continue

                        todays_metrics.append({
                            "measurement": "url_rating",
                            # removed tld: most of it will be a country tld. But we use the country field for this.
                            "tags": {
                                "ip_version":
                                endpoint['ip_version'],  # 2
                                "port":
                                endpoint['port'],  # 10
                                "protocol":
                                endpoint['protocol'],  # 2
                                "scan_type":
                                rating['type'],  # 6
                                # "url": relevant_rating.url.url,  # 4000 lower cardinality.
                                "subdomain":
                                tldextract.extract(
                                    relevant_rating.url.url).subdomain,  # 500
                                "organization":
                                organization.name,  # 400
                                "organization_type":
                                organization.type.name,  # 2
                                "country":
                                organization.country.name,  # 1
                                "explanation":
                                rating['explanation'],  # 10
                            },
                            "time": dt,
                            # removed exists: you can do that with count on any field
                            # removed points, as they are not really telling anything
                            # todo: not the fields as an integer, default = float.
                            "fields": {
                                "low": rating['low'],
                                "medium": rating['medium'],
                                "high": rating['high'],
                            }
                        })

        metrics += todays_metrics
        yesterdays_metrics = todays_metrics

    return metrics
Example #49
0
    def get_xaf(self, options):
        def cust_sup_tp(partner_id):
            if partner_id.customer and partner_id.supplier:
                return 'B'
            if partner_id.customer:
                return 'C'
            if partner_id.supplier:
                return 'S'
            return 'O'

        def acc_tp(account_id):
            if account_id.user_type_id.type in ['income', 'expense']:
                return 'P'
            if account_id.user_type_id.type in ['asset', 'liability']:
                return 'B'
            return 'M'

        def jrn_tp(journal_id):
            if journal_id.type == 'bank':
                return 'B'
            if journal_id.type == 'cash':
                return 'C'
            if journal_id.type == 'situation':
                return 'O'
            if journal_id.type in ['sale', 'sale_refund']:
                return 'S'
            if journal_id.type in ['purchase', 'purchase_refund']:
                return 'P'
            return 'Z'

        def amnt_tp(move_line_id):
            return 'C' if move_line_id.credit else 'D'

        def compute_period_number(date_str):
            date = fields.Date.from_string(date_str)
            return date.strftime('%y%m')[1:]

        def change_date_time(record):
            return record.write_date.strftime('%Y-%m-%dT%H:%M:%S')

        company_id = self.env.user.company_id

        msgs = []

        if not company_id.vat:
            msgs.append(_('- VAT number'))
        if not company_id.country_id:
            msgs.append(_('- Country'))

        if msgs:
            msgs = [_('Some fields must be specified on the company:')] + msgs
            raise UserError('\n'.join(msgs))

        date_from = options['date']['date_from']
        date_to = options['date']['date_to']
        partner_ids = self.env['res.partner'].search([
            '|', ('customer', '=', True), ('supplier', '=', True), '|',
            ('company_id', '=', False), ('company_id', '=', company_id.id)
        ])
        account_ids = self.env['account.account'].search([('company_id', '=',
                                                           company_id.id)])
        tax_ids = self.env['account.tax'].search([('company_id', '=',
                                                   company_id.id)])
        journal_ids = self.env['account.journal'].search([('company_id', '=',
                                                           company_id.id)])
        # Retrieve periods values
        periods = []
        Period = namedtuple('Period', 'number name date_from date_to')
        for period in rrule(freq=MONTHLY,
                            bymonth=(),
                            dtstart=fields.Date.from_string(date_from),
                            until=fields.Date.from_string(date_to)):
            period_from = fields.Date.to_string(period.date())
            period_to = period.replace(
                day=calendar.monthrange(period.year, period.month)[1])
            period_to = fields.Date.to_string(period_to.date())
            periods.append(
                Period(number=compute_period_number(period_from),
                       name=period.strftime('%B') + ' ' + date_from[0:4],
                       date_from=period_from,
                       date_to=period_to))
        # Retrieve move lines values
        total_query = """
            SELECT COUNT(*), SUM(l.debit), SUM(l.credit)
            FROM account_move_line l, account_move m
            WHERE l.move_id = m.id
            AND l.date >= %s
            AND l.date <= %s
            AND l.company_id = %s
            AND m.state != 'draft'
        """
        self.env.cr.execute(total_query, (
            date_from,
            date_to,
            company_id.id,
        ))
        moves_count, moves_debit, moves_credit = self.env.cr.fetchall()[0]
        journal_x_moves = {}
        for journal in journal_ids:
            journal_x_moves[journal] = self.env['account.move'].search([
                ('date', '>=', date_from), ('date', '<=', date_to),
                ('state', '!=', 'draft'), ('journal_id', '=', journal.id)
            ])
        values = {
            'company_id': company_id,
            'partner_ids': partner_ids,
            'account_ids': account_ids,
            'journal_ids': journal_ids,
            'journal_x_moves': journal_x_moves,
            'compute_period_number': compute_period_number,
            'periods': periods,
            'tax_ids': tax_ids,
            'cust_sup_tp': cust_sup_tp,
            'acc_tp': acc_tp,
            'jrn_tp': jrn_tp,
            'amnt_tp': amnt_tp,
            'change_date_time': change_date_time,
            'fiscal_year': date_from[0:4],
            'date_from': date_from,
            'date_to': date_to,
            'date_created': fields.Date.context_today(self),
            'software_version': release.version,
            'moves_count': moves_count,
            'moves_debit': moves_debit,
            'moves_credit': moves_credit,
        }
        audit_content = self.env['ir.qweb'].render(
            'l10n_nl_reports.xaf_audit_file', values)
        with tools.file_open('l10n_nl_reports/data/xml_audit_file_3_2.xsd',
                             'rb') as xsd:
            _check_with_xsd(audit_content, xsd)
        return audit_content
Example #50
0
 def weeks(self):
     weeks = rrule.rrule(rrule.WEEKLY, dtstart=self.start, until=self.stop)
     return weeks.count()
Example #51
0
def getting_date_range(start_date, end_date):
	date_range = []
	for date_input in rrule(DAILY, dtstart=start_date, until=end_date):
		date_range.append(date_input.strftime("%Y-%m-%d"))
	return date_range
Example #52
0
    def write(self, values):
        for contract in self:
            old_date_start = contract.date_start
            old_date_end = contract.date_end
            old_state = contract.state
            analytic_pool = self.env['employee.attendance.analytic']
            res = super(HrContract, self).write(values)

            if values.get('state') in ('open', 'pending', 'close') \
                    and old_state in ('draft', 'cancel'):
                contract.attach_attendance()
                return res
            elif values.get('state') == 'cancel':
                lines = analytic_pool.search([('contract_id', '=', contract.id)
                                              ])
                employee = contract.employee_id
                if lines:
                    contract.remove_from_attendance(lines, employee)
                    return res
            if values.get('resource_calendar_id') \
                    or 'rate_per_hour' in values.keys():
                lines = analytic_pool.search([('contract_id', '=', contract.id)
                                              ])
                if lines:
                    for line in lines:
                        date_from = str(line.name) + ' 00:00:00'

                        dates = list(
                            rrule.rrule(rrule.DAILY,
                                        dtstart=parser.parse(date_from),
                                        until=parser.parse(date_from)))
                        for date_line in dates:
                            analytic_pool.recalculate_line(
                                line_date=str(date_line),
                                employee_id=contract.employee_id)

            if values.get('date_end'):
                if old_date_end:
                    dates = calculate_days(old_date_end,
                                           values.get('date_end'))

                    for date_line in dates:
                        analytic_pool.recalculate_line(
                            line_date=str(date_line),
                            employee_id=contract.employee_id)
                else:
                    lines = analytic_pool.search([
                        ('contract_id', '=', contract.id),
                        ('attendance_date', '>', values.get('date_end'))
                    ])
                    if lines:
                        dates = list(
                            rrule.rrule(rrule.DAILY,
                                        dtstart=parser.parse(
                                            values.get('date_end')),
                                        until=parser.parse(lines[-1].name)))
                        for date_line in dates:
                            analytic_pool.recalculate_line(
                                line_date=str(date_line),
                                employee_id=contract.employee_id)
            elif 'date_end' in values.keys():
                line = analytic_pool.search([('contract_id', '=', contract.id),
                                             ('attendance_date', '=',
                                              old_date_end)])
                lines = analytic_pool.search([
                    ('sheet_id', '=', line.sheet_id.id),
                    ('attendance_date', '>', old_date_end)
                ])
                if lines:
                    dates = list(
                        rrule.rrule(rrule.DAILY,
                                    dtstart=parser.parse(old_date_end),
                                    until=parser.parse(lines[-1].name)))
                    for date_line in dates:
                        analytic_pool.recalculate_line(
                            line_date=str(date_line),
                            employee_id=contract.employee_id)
            if values.get('date_start'):

                dates = calculate_days(old_date_start,
                                       values.get('date_start'))
                for date_line in dates:
                    analytic_pool.recalculate_line(
                        line_date=str(date_line),
                        employee_id=contract.employee_id)
            return res
Example #53
0
def get_data(start_date, end_date, target, sleep_sec=2):
    s = time.time()
    start_date = datetime.datetime.strptime(start_date, '%Y/%m/%d')
    end_date = datetime.datetime.strptime(end_date, '%Y/%m/%d')
    date_list = []

    for dt in rrule(DAILY, dtstart=start_date, until=end_date):
        date_list.append(dt.date())

    url = "https://www.taifex.com.tw/chinese/3/7_12_3.asp"
    headers = {
        'user-agent':
        'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/69.0.3497.100 Safari/537.36'
    }

    indexs = [
        '日期', '身分', '多方交易口數', '多方契約金額', '空方交易口數', '空方契約金額', '多空淨額口數',
        '多空淨額契約金額', '未平倉多方交易口數', '未平倉多方契約金額', '未平倉空方交易口數', '未平倉空方契約金額',
        '未平倉多空淨額口數', '未平倉多空淨額契約金額'
    ]
    df = pd.DataFrame(columns=indexs)

    for d in date_list:
        Y = str(d.timetuple().tm_year)
        M = str(d.timetuple().tm_mon)
        D = str(d.timetuple().tm_mday)
        datestart = "{}/{}/{}".format(Y, M, D)

        post_data = {
            'goday': '',
            'DATA_DATE_Y': Y,
            'DATA_DATE_M': M,
            'DATA_DATE_D': D,
            'syear': Y,
            'smonth': M,
            'sday': D,
            'datestart': datestart,
            'COMMODITY_ID': target
        }  #這部分是網頁契約,空字串''表示全部,TXF表示臺股期貨,EXF表示電子期貨.....

        time.sleep(sleep_sec)

        response = requests.post(url, headers=headers,
                                 data=post_data)  # 這個網站是透過post要求資料的
        response.encoding = "utf-8"

        soup = BeautifulSoup(response.text, "lxml")  # 網頁html
        rows = soup.select(
            'table.table_f tr'
        )  # 搜尋期貨契約表格下的一列列tr資料,table_f 是目標table的class name, 以tr為單位存進list
        if rows == []:  # 跳過未開市日期
            pass
        else:
            for row in range(3, 6):
                # 每個商品名稱都有三個身分別,自營商、投信、外資,一個商品有三個tr,每個商品第二及第三個tr都比第一個tr少兩個td
                if row == 3:
                    data_day = [
                        rows[row].contents[i * 2 - 1].get_text().replace(
                            ',', "").rstrip() for i in range(3, 16)
                    ]
                else:
                    data_day = [
                        rows[row].contents[i * 2 - 1].get_text().replace(
                            ',', "").rstrip() for i in range(1, 14)
                    ]
                data_day.insert(0, d)
                df = df.append(pd.Series(data_day, index=indexs),
                               ignore_index=True)

    print('get_data cost time:', time.time() - s)
    return df
Example #54
0
    def attendance_analysis(self, cr, uid, timesheet_id, context=None):
        attendance_obj = self.pool.get('hr.attendance')
        date_format, time_format = self._get_user_datetime_format(
            cr, uid, context)
        timesheet = self.browse(cr, uid, timesheet_id, context=context)
        employee_id = timesheet.employee_id.id
        start_date = timesheet.date_from
        end_date = timesheet.date_to
        previous_month_diff = self.get_previous_month_diff(
            cr, uid, employee_id, start_date, context)
        current_month_diff = previous_month_diff
        if not context:
            context = {}
        res = {'previous_month_diff': previous_month_diff, 'hours': []}

        # Done BY Addition IT Solutions: BEGIN
        # TS dates needed to find leaves during that period
        ctx = dict(context)
        ctx.update({'date_from': start_date, 'date_to': end_date})
        dates = list(
            rrule.rrule(rrule.DAILY,
                        dtstart=parser.parse(start_date),
                        until=parser.parse(end_date))
        )  # Removed datetime.utcnow to parse till end date
        # END

        total = {
            'worked_hours': 0.0,
            'duty_hours': 0.0,
            'diff': current_month_diff
        }

        #total = collections.OrderedDict()
        #total['duty_hours'] = 0.0
        #total['worked_hours'] = 0.0
        #total['diff'] = current_month_diff
        for date_line in dates:

            dh = self.calculate_duty_hours(cr,
                                           uid,
                                           employee_id,
                                           date_line,
                                           context=ctx)
            worked_hours = 0.0
            # Done BY Addition IT Solutions: BEGIN
            for att in timesheet.period_ids:
                if att.name == date_line.strftime('%Y-%m-%d'):
                    worked_hours = att.total_attendance
            # END

            diff = worked_hours - dh
            current_month_diff += diff
            if context.get('function_call', False):
                res['hours'].append({
                    _('Date'):
                    date_line.strftime(date_format),
                    _('Duty Hours'):
                    attendance_obj.float_time_convert(dh),
                    _('Worked Hours'):
                    attendance_obj.float_time_convert(worked_hours),
                    _('Difference'):
                    self.sign_float_time_convert(diff),
                    _('Running'):
                    self.sign_float_time_convert(current_month_diff)
                })
            else:
                res['hours'].append({
                    'name':
                    date_line.strftime(date_format),
                    'dh':
                    attendance_obj.float_time_convert(dh),
                    'worked_hours':
                    attendance_obj.float_time_convert(worked_hours),
                    'diff':
                    self.sign_float_time_convert(diff),
                    'running':
                    self.sign_float_time_convert(current_month_diff)
                })
            total['duty_hours'] += dh
            total['worked_hours'] += worked_hours
            total['diff'] += diff
            total['diff'] -= previous_month_diff
            res['total'] = total
        return res
Example #55
0
def months(start_month, start_year, end_month, end_year):
    start = datetime(start_year, start_month, 1)
    end = datetime(end_year, end_month, 1)
    return list(rrule(MONTHLY, dtstart=start, until=end))
Example #56
0
#from matplotlib.cm import get_cmap ; import cartopy.crs as crs
#from cartopy.feature import NaturalEarthFeature, LAND,OCEAN 

import matplotlib.pyplot as plt ; from mpl_toolkits.basemap import  maskoceans ; import matplotlib.colors as mcolors

main='/home/vkvalappil/Data/modelWRF/' ; scripts=main+'/scripts/' ; output=main+'ARW/output/'

date=str(sys.argv[1]) ;  fcs_leaddays=3 ; #provide date as argument, forecast start and end date defined




fcs_st_date=dt.datetime.strptime(date,'%Y%m%d%H') ; fcs_ed_date=(dt.datetime.strptime(date,'%Y%m%d%H')+dt.timedelta(days=fcs_leaddays));  

file_date_list=[x.strftime('%Y-%m-%d_%H:%S:%S') for x in rrule.rrule(rrule.HOURLY,dtstart=fcs_st_date,until=fcs_ed_date)] ;
tint=1 ; stim=0 ; etim=len(file_date_list)-1 ; #tmp.shape[0]-1 


###########################################################################################################

svp1=0.6112 ; svp2=17.67 ; svp3=29.65 ; svpt0=273.15 ; r_d=287. ; r_v=461.6 ; ep_2=r_d/r_v  ; ep_3=0.622

COEFLC = 144.7 ; COEFLP = 2.24 ; COEFFC = 327.8 ; COEFFP = 10.36 ; EXPLC  = 0.88
EXPLP  = 0.75 ; EXPFC  = 1. ; EXPFP  = 0.7776


###############################################################################################################

files=main+'ARW/wrf_output/'+date+'/wrfout_d02_' ; 
wrf_list=[nf.Dataset(files+ii) for ii in file_date_list[:]]
def weeks_between(start_date, end_date):
    ''' This fucntion calculates the weeks between a start date and end date.
    I used this for some plots to get an array of ints'''
    weeks = rrule.rrule(rrule.WEEKLY, dtstart=start_date, until=end_date)
    return weeks.count()
Example #58
0
 def get_rrule_object(self):
     if self.rule is not None:
         params = self.rule.get_params()
         frequency = 'rrule.%s' % self.rule.frequency
         return rrule.rrule(eval(frequency), dtstart=self.start, **params)
Example #59
0
 def age(self):
     if self.birthday:
         return rrule.rrule(rrule.YEARLY,
                            dtstart=self.birthday,
                            until=datetime.now()).count()
     return ""
Example #60
0
def days_back_list(days_back):
    end_date = prev_day(datetime.datetime.now())
    start_date = days_from(end_date, abs(days_back) * -1)
    return [day for day in rrule(DAILY, dtstart=start_date, until=end_date)]