Exemple #1
0
    def ywd_to_date(year, week_label, day):
        # Sundays mess with strptime, make sure it is 0.
        if day == 7: day = 0

        datestruct = strptime("%d %d %d" % (year, week_label, day), "%Y %W %w")
        date = datetime(datestruct[0], datestruct[1], datestruct[2])

        isocal_week = int(date.isocalendar()[1])
        formatted_week = int(date.strftime("%W"))

        # week number by "%W" is zero when a new year has occoured
        # within the week and the weekday is in January. That won't
        # do any good to comparison betweeen isocalendar().
        #
        # here we make sure the week is not zero by asking for the
        # weeknumber from Monday the same week.
        if formatted_week == 0:
            before_monday = date.isocalendar()[2] - 1
            monday = date - timedelta(days=before_monday)
            formatted_week = int(monday.strftime("%W"))

        # for years beginning on a Monday, all is fine since
        # the "week label" (the one PEOPLE use) and the calculated
        # week is the same.
        if isocal_week == formatted_week:
            return date.date()

        # for the rest, turn the clock backwards since we are 1 week
        # ahead of time.
        behave = timedelta(days=7)
        return (datetime(datestruct[0], datestruct[1], datestruct[2]) - behave).date()
Exemple #2
0
    def getSamplesDegroupedByWeek(self, df, funds):
        '''
        Degrouped by calendar week
        :param df:
        :param period:
        :return:
        '''
        samples_sets = []
        fund_samples = []
        indexes = []

        b_y = df.index[0].isocalendar()[0]
        b_w = df.index[0].isocalendar()[1]
        #print b_y, b_w
        for date in df.index:
            year = date.isocalendar()[0]
            week = date.isocalendar()[1]
            if (b_y == year) and (b_w == week):
                indexes.append(date)
            else:
                # df_sample = df.loc[df.index.isin(indexes)]
                samples_sets.append(df.loc[df.index.isin(indexes)])
                indexes = []
                b_y, b_w = year, week
                indexes.append(date)
        return samples_sets
Exemple #3
0
def weekly_stats(dictionary):
    #Monday is first day of week
    #This is done poorly and needs to be rewritten
    count_dict = {}  #create keys (weeks)
    weekly_runs = {}  #dictionary to hold the actual runs
    for date in dictionary:
        week_number = date.isocalendar()[1]
        count_dict[str(date.year) + "-" + str(week_number)] = []  #create list
        weekly_runs[str(date.year) + "-" + str(week_number)] = {}  #create dict
    for date in dictionary:
        week_number = date.isocalendar()[1]
        count_dict[str(date.year) + "-" + str(week_number)].append(
            float(dictionary[date]
                  ['distance_miles']))  #list of distances for each week
        weekly_runs[str(date.year) + "-" +
                    str(week_number)][date] = dictionary[date]

    final_dict = {}
    for week in count_dict:

        final_dict[week] = {}
        final_dict[week]['run_dict'] = weekly_runs[
            week]  #add the actual runs to the final dictionary
        final_dict[week]['run_count'] = len(count_dict[week])
        final_dict[week]['miles_ran'] = sum(count_dict[week])
        week_name = week.split('-')
        final_dict[week]['year'] = str(week_name[0])
        final_dict[week]['week'] = str(week_name[1])
        week_datetime = datetime.datetime.strptime(week + '-1', "%Y-%W-%w")
        final_dict[week]['date_human'] = str(week_datetime.year) + "-" + str(
            week_datetime.month) + "-" + str(week_datetime.day)

    return final_dict
 def create_report(self, cr, uid, date, user_id, context = None):
     month = date.month
     year = str(date.year)
     quarter = 0
     if month <4:
         quarter = 1
     elif month <7:
         quarter = 2
     elif month < 10:
         quarter = 3
     else:
         quarter = 4
     
     vals = {} 
     vals = {
         'recruiter_id': user_id,
         'date': date,
         'open_deals': self.count_deal_open(cr, uid, date, user_id, context=context),
         'open_handover_deals': self.count_deal_open_hendover(cr, uid, date, user_id, context=context),
         #'meeting_deals': self.count_meeting_deal(cr, uid, date, user_id, context=context),
         'won_deals': self.count_deal_won(cr, uid, date, user_id, context=context),
         #'transfer_candidates': self.count_transfer_candidates(cr, uid, date, user_id, context=context),
         'lost_deals': self.count_deal_lost(cr, uid, date, user_id, context=context),
         'cease_deals': self.count_deal_cease(cr, uid, date, user_id, context=context),
         'closed_tasks': self.count_task_closed(cr, uid, date, user_id, context=context),
         'name': str(user_id)+'-'+str(date.isocalendar()[1]).zfill(2),
         'year': year,
         'month': year+'-M'+ str(month).zfill(2),
         'week_number': year+'-W'+str(date.isocalendar()[1]).zfill(2),
         'quarter': year+'-Q'+str(quarter),
     }
     return super(jp_report_recruitment, self).create(cr, uid, vals, context=context)
    def get_two_week_sprint_name(self, timeStruct):
        """ timeInSec = Some time in the sprint assuming sprint starts at monday"""

        weekDay = date.weekday(timeStruct)
        weekNumber = date.isocalendar(timeStruct)[1]

        if (weekNumber & 1) == 1:  # odd weekNumber
            sprintName = timeStruct.strftime("%y") + "W" + str(
                weekNumber).zfill(2)
            sprintStartDate = timeStruct + timedelta(days=-weekDay)
            sprintEndDate = timeStruct + timedelta(weeks=2, days=-weekDay)

        else:  # even weekNumber
            tempTimeStruct = timeStruct + timedelta(weeks=-1)
            year = date.isocalendar(tempTimeStruct)[0]
            sprintName = str(year)[-2] + str(year)[-1] + "W" + str(
                date.isocalendar(tempTimeStruct)[1]).zfill(
                    2)  # subtracting a week to make sprint name
            sprintStartDate = tempTimeStruct + timedelta(days=-weekDay)
            sprintEndDate = tempTimeStruct + timedelta(weeks=2, days=-weekDay)

        sprintStart = sprintStartDate.strftime(
            "%Y/%m/%d 12:00")  #'yyyy/mm/dd 12:00'
        sprintEnd = sprintEndDate.strftime(
            "%Y/%m/%d 12:00")  #'yyyy/mm/dd 12:00'

        return sprintName, sprintStart, sprintEnd
Exemple #6
0
def weekly_stats(dictionary):
    #same as monthly status but for every week sunday - monday
    #point to find week that has highest miles or most runs
    count_dict = {}  #create keys (weeks)
    for date in dictionary:
        #day_of_month = datetime.datetime.now().day
        #week_number = ((date.day - 1) // 7)
        week_number = date.isocalendar()[1]
        count_dict[str(date.year) + "-" + str(week_number)] = []
    for date in dictionary:
        week_number = date.isocalendar()[1]
        count_dict[str(date.year) + "-" + str(week_number)].append(
            float(dictionary[date]['distance_miles']))

    final_dict = {}
    for week in count_dict:
        final_dict[week] = {}
        final_dict[week]['run_count'] = len(count_dict[week])
        final_dict[week]['miles_ran'] = sum(count_dict[week])
        week_name = week.split('-')
        final_dict[week]['year'] = str(week_name[0])
        final_dict[week]['week'] = str(week_name[1])
        week_datetime = datetime.datetime.strptime(week + '-1', "%Y-%W-%w")
        final_dict[week]['date_human'] = str(week_datetime.year) + "-" + str(
            week_datetime.month) + "-" + str(week_datetime.day)

    return final_dict
Exemple #7
0
    def queryset(self, request, queryset):
        """
        Returns the filtered queryset based on the value
        provided in the query string and retrievable via
        `self.value()`.
        """
        today = date.today()
        #isocalendar : Return a 3-tuple, (ISO year, ISO week number, ISO weekday) weekday starts at sunday = 0
        isocal = date.isocalendar(today)
        curr_year = today.year
        curr_month = today.month
        week_beginning = today- timedelta(days=date.isocalendar(today)[2])
        week_end = today + timedelta(days=(6 - date.isocalendar(today)[2]))


        if self.value() == 'ended':
            return queryset.filter(end_date__lt=today)
        if self.value() == 'week':
            return queryset.filter(end_date__range=(week_beginning, week_end))
        if self.value() == 'month':
            return queryset.filter(end_date__month=curr_month)
        if self.value() == 'year':
            return queryset.filter(end_date__year=2016)
        if self.value() == 'nextyear':
            return queryset.filter(end_date__year=curr_year+1)
Exemple #8
0
 def create_report(self, cr, uid, date, user_id, context = None):
     month = date.month
     year = str(date.year)
     
     quarter = 0
     if month <4:
         quarter = 1
     elif month <7:
         quarter = 2
     elif month < 10:
         quarter = 3
     else:
         quarter = 4
     vals={}
     vals = {
         'user_id': user_id,
         'date': date-timedelta(days=1),
         'created_deals': self.count_deal_new(cr, uid, date, user_id, context=context),
         'invoice_deals': self.count_deal_invoice(cr, uid, date, user_id, context=context),
         'added_leads': self.count_lead_new(cr, uid, date, user_id, context=context),
         'closed_tasks': self.count_task_closed(cr, uid, date, user_id, context=context),
         'created_offers': self.count_offers_new(cr, uid, date, user_id, context=context),
         'created_contracts': self.count_contracts_new(cr, uid, date, user_id, context=context),
         'name': str(user_id)+'-'+str(date.isocalendar()[1]).zfill(2),
         'year': year,
         'month': year+'-M'+ str(month).zfill(2),
         'week_number': year+'-W'+str(date.isocalendar()[1]).zfill(2),
         'quarter': year+'-Q'+str(quarter),
     }
     return super(jp_report_sales, self).create(cr, uid, vals, context=context)
Exemple #9
0
def get_menu_by_day(day):
    week_number_of_given_day = date.isocalendar(day)[1]
    week_name_of_given_day = f'{date.isocalendar(day)[0]}W{date.isocalendar(day)[1]}'

    # detect invalid input
    current_week_number = date.isocalendar(date.today())[1]
    # if week of given day is in future
    if week_number_of_given_day > current_week_number:
        raise ObjectDoesNotExist

    query_response = table.query(
        KeyConditionExpression=Key('year_week').eq(week_name_of_given_day))

    # if no entry in db
    if query_response['Count'] == 0:
        # detect invalid input
        # if day is in the past and no records are found
        if week_number_of_given_day < current_week_number:
            raise ObjectDoesNotExist

        pdf_obj = BytesIO(scrap_pdf())
        menu = ReadFromMenu(pdf_obj)
        lunch_list = menu.get_lunch_list()
        dinner_list = menu.get_dinner_list()

        week = {'meals': {}}

        for i in range(7):
            week['meals'][str(i)] = {
                'lunch': lunch_list[i],
                'dinner': dinner_list[i],
            }

        meals_of_week = week['meals']

        # create item in ddb table
        table.put_item(Item={
            'year_week': week_name_of_given_day,
            'week': meals_of_week
        })

        upload_obj_to_pdf_bucket(pdf_obj, week_name_of_given_day + '.pdf')

        weekday_of_given_day = str(date.weekday(day))

        lunch_of_given_day = meals_of_week[weekday_of_given_day]['lunch']
        dinner_of_given_day = meals_of_week[weekday_of_given_day]['dinner']

        return lunch_of_given_day, dinner_of_given_day

    # object exist in db
    else:
        meals_of_given_week = query_response['Items'][0]['week']
        weekday_of_given_day = str(date.weekday(day))

        lunch_of_given_day = meals_of_given_week[weekday_of_given_day]['lunch']
        dinner_of_given_day = meals_of_given_week[weekday_of_given_day][
            'dinner']
        return lunch_of_given_day, dinner_of_given_day
 def add_features(self):
     '''
     Adds new columns to DataFrame which are need for csv file output.
     '''
     # add columns needed for analysis
     self.df['pub_date'] = pd.to_datetime(self.df['pub_date'])
     self.df['pub_week'] = self.df.pub_date.map(lambda x: date.isocalendar(x)[1])
     self.df['pub_year'] = self.df.pub_date.map(lambda x: date.isocalendar(x)[0])
     self.df['pub_month'] = self.df.pub_date.map(lambda x: x.month)
     self.df['pub_week_date'] = \
         self.df.pub_date.map(lambda x : x.date() + tdel(0-x.date().weekday()))
                                                  # 0 -> Monday of the pub_week
     self.df['pub_week_date_str'] = \
         self.df.pub_date.map(lambda x : (x.date() + tdel(0-x.date().weekday()))
                                         .strftime("%Y-%m-%d"))
Exemple #11
0
def parse_week(url, date, canteen):
    url += '/{0}/{1:0>2}/'.format(*date.isocalendar())
    document = parse(urlopen(url).read())
    week_data = document.find('table', id='week-menu')
    if week_data is None:
        print('week not found')
        return
    weekDays = extractWeekDates(week_data.thead.find_all('th')[0].text)
    for category_tr in week_data.find_all('tr'):
        category = category_tr.find('th').text
        i = 0
        for day_td in category_tr.find_all('td'):
            for meal_data in day_td.find_all('p', 'dish'):
                if not meal_data.find('strong'):
                    continue
                name = extra_regex.sub('', meal_data.find('strong').text)
                name = strip_regex.sub(' ', name).strip()
                if len(name) > 250:
                    name = name[:245] + '...'
                notes = [span['title'] for span in meal_data.find_all('span', 'tooltip')]
                notes += [img['title'] for img in meal_data.find_all('img')]
                prices = price_regex.findall(meal_data.find('span', 'price').text)
                canteen.addMeal(weekDays[i], category, name,
                                list(set(notes)),
                                prices, ('student', 'employee', 'other')
                                )
            i += 1
Exemple #12
0
 def _to_tuple(self, date):
     '''Convert the given date to the tuple according 
        to what period type we want'''
     if self.granularity == 'days':
         return tuple(date.isoformat()[:10].split('-'))
     elif self.granularity == 'weeks':
         return date.isocalendar()[:2]
Exemple #13
0
def day_index_education(date, fy, fw, fd):
    r"""Return a tuple `(weekday, week)` where `weekday` is the day
    number of the date (0 for monday) and `week` is the number of the
    current week. Return `(-1, -1)` if the date is before the date of
    days `fd` of week number `fw` of year `fy`. The zero week is the
    week containing the first of september.

    EXEMPLES::

    >>> import datetime
    >>> d = datetime.datetime(2020, 9, 1)
    >>> d.isocalendar()
    (2020, 36, 2)
    >>> day_index_education(datetime.datetime(2021, 1, 1), 2020, 36, 2)
    (17, 4)
    >>> day_index_education(datetime.datetime(2020, 12, 31), 2020, 36, 2)
    (17, 3)
    """
    y, w, d = date.isocalendar()
    if y > fy:
        return (52 + w - fw - int(d == 0), d - 1 + (7 * int(d == 0)))
    if y == fy:
        if w >= fw:
            if d >= fd:
                return (w - fw - int(d == 0), d - 1 + (7 * int(d == 0)))
    return (-1, -1)
Exemple #14
0
def parse_week(url, date, canteen):
    url += '/{0}/{1:0>2}/'.format(*date.isocalendar())
    document = parse(urlopen(url).read())
    week_data = document.find('table', id='week-menu')
    if week_data is None:
        print('week not found')
        return
    weekDays = extractWeekDates(week_data.thead.find_all('th')[0].text)
    for category_tr in week_data.find_all('tr'):
        category = category_tr.find('th').text
        i = 0
        for day_td in category_tr.find_all('td'):
            for meal_data in day_td.find_all('p', 'dish'):
                if not meal_data.find('strong'):
                    continue
                name = extra_regex.sub('', meal_data.find('strong').text)
                name = strip_regex.sub(' ', name).strip()
                if len(name) > 250:
                    name = name[:245] + '...'
                notes = [
                    span['title']
                    for span in meal_data.find_all('span', 'tooltip')
                ]
                notes += [img['title'] for img in meal_data.find_all('img')]
                prices = price_regex.findall(
                    meal_data.find('span', 'price').text)
                canteen.addMeal(weekDays[i], category, name, list(set(notes)),
                                prices, ('student', 'employee', 'other'))
            i += 1
Exemple #15
0
    def get_actual(self):
        week_no = date.isocalendar(date.today())[1]
        is_even_week = bool((week_no + 1) % 2)

        items = self.select_related("soup", "meal", "dessert").filter(even_week=is_even_week)

        return dict([(one.day, one) for one in items])
    def get_week_range_by_date(self, date):
        """Find the first/last day of the week for the given day.
        Assuming weeks start on Sunday and end on Saturday.

        Returns a tuple of ``(start_date, end_date)``.

        """
        # isocalendar calculates the year, week of the year, and day
        # of the week.
        # dow is Mon = 1, Sat = 6, Sun = 7
        year, week, dow = date.isocalendar()

        # Find the first day of the week.
        if dow == 7:
            # Since we want to start with Sunday, let's test for that
            # condition.
            start_date = date
        else:
            # Otherwise, subtract `dow` number days to get the first day
            start_date = date - timedelta(dow)

        # Now, add 6 for the last day of the week (i.e., count up to Saturday)
        end_date = start_date + timedelta(6)

        return (start_date, end_date)
Exemple #17
0
def readSpringAheadFile():
    inData = {}
    global nTotalNumWeeks
    if len(springAheadFile) <= 0:
        return (inData)
    with open(springAheadFile, newline='') as fileIn:
        inDict = csv.DictReader(fileIn, delimiter=',', quotechar='"')
        for row in inDict:
            nColCnt = len(row.items()) - 1
            if nColCnt > nTotalNumWeeks:
                nTotalNumWeeks = nColCnt
            usr = row['User']
            date = datetime.strptime(row['Date'], '%m/%d/%Y')
            weekNum = date.isocalendar()[1]
            if usr in inData:
                dayHours = float(row['Hours'])
                if weekNum in inData[usr]:
                    inData[usr][weekNum] = float(
                        inData[usr][weekNum]) + dayHours
                else:
                    inData[usr][weekNum] = dayHours
            else:
                stats = {}
                stats['SpringAhead Rate'] = float(
                    row['Bill Rate'].split("$")[1])
                stats['Avg Run'] = float(40)
                stats["Hours"] = ""
                stats["Dollars"] = ""
                stats[weekNum] = float(row['Hours'])
                inData[usr] = stats

    return (inData)
Exemple #18
0
def week_of(dateStr):
    date = convert_to_date(dateStr)
    if date:
        iso_year, iso_week, iso_weekday = date.isocalendar()
        return iso_week
    else:
        return None
Exemple #19
0
def getweeknumber(day):
    """return iso week number for datetime.date object
    :param day: date
    :type day: datetime.date()
    :return: weeknumber
    :rtype: int
    """
    return date.isocalendar(day)[1]
Exemple #20
0
def getweeknumber(day):
    """return iso week number for datetime.date object
    :param day: date
    :type day: datetime.date()
    :return: weeknumber
    :rtype: int
    """
    return date.isocalendar(day)[1]
Exemple #21
0
	def getCurrentLogfileName(self):
		# Filename: log_<year>_<week>_<day>_run<run count>.json
		date = self.application_data_.progress_[1]
		week = date.isocalendar()[1]
		year = date.year
		day = date.weekday()
		run_count = self.application_data_.run_count_
		return "log_" + str(year) + "_" + str(week) + "_" + str(day) + "_run" + str(run_count) + ".json"
Exemple #22
0
    def get_actual(self):
        week_no = date.isocalendar(date.today())[1]
        is_even_week = bool((week_no + 1) % 2)

        items = self.select_related('soup', 'meal', 'dessert').\
            filter(even_week=is_even_week)

        return dict([(one.day, one) for one in items])
Exemple #23
0
 def iso_year_for_date(date):
     """
         returns the iso year for the given date
         e.g. '2014-09-16' -> 2014
         the iso year can differn from the gregorian year
         e.g. '2012-01-01' -> 2011
     """
     return date.isocalendar()[0]
Exemple #24
0
def update_weekly_posts(weekly_posts_list, date_str, start, end):
    # Recebe a data do post e calcula em qual semana foi postado
    # para preencher na semana correspondente na lista
    date = convert_to_datetime(date_str)
    date = date.isocalendar()[1]
    date = date - start
    if date >= 0 and date <= abs(end - start):
        weekly_posts_list[date] += 1
    return weekly_posts_list
    def get_cadence_fixversion_name(self, timeStruct):
        """ timeStruct = Some time in the sprint assuming sprint starts at monday"""

        weekDay = date.weekday(timeStruct)
        weekNumber = date.isocalendar(timeStruct)[1]

        if (weekNumber & 1) == 1:  # odd weekNumber
            timeStruct = timeStruct + timedelta(
                weeks=1
            )  # first week of sprint added 1 week to get to the biweekly deadline.

        release_date = (timeStruct + timedelta(days=4 - weekDay)).strftime(
            "%Y-%m-%d"
        )  # setting the release date to friday in the release date week 'yyyy/mm/dd'
        fixversion_name = timeStruct.strftime("%y") + str(
            date.isocalendar(timeStruct)[1]).zfill(2)

        return fixversion_name, release_date
Exemple #26
0
def sacasemana(fecha):



	s = fecha
	args = time.strptime(s, "%Y-%m-%d")[:3]
	date = datetime.date(*args)
	weeknum = date.isocalendar()[1]

	return weeknum
Exemple #27
0
 def term_week_of_date(self, date):
   if not self.is_date_within_term(date):
     log_str = str(date) + ' outside term range, defaulting to '
     d = ensure_date(date)
     if d > self.end:
       log_str += 'last week'
       print log_str
       return LAST_WEEK
     if d < self.start:
       log_str += 'first week'
       print log_str
       return FIRST_WEEK
   return (date.isocalendar()[1] - self.start.isocalendar()[1])
    def load(self, address):
        try:
            web_handle = urllib2.urlopen(address)
            web_text = web_handle.read()
     #       matches = sre.findall('\<td class="pl"\>(.*?)\&', web_text)
     #       matches = sre.findall('\>(.*?)\&nbsp;\<', web_text)
            date_match = sre.findall('(\d{1,2}\-\d{1,2}\-\d{2})', web_text)

            lines = sre.findall('\<td class="plleft"\>(.*?)\</td\>\</tr\>', \
                                web_text)
                
            if (date_match != []):
                date = date_match[1];
                date = datetime.strptime(date, "%m-%d-%y")
                date = date.isocalendar()
                
                for line in lines:
                    
                    artist = ""
                    song = ""
                    album = ""

                    matches = sre.findall('\<td class="pl"\>(.*?)\&nbsp', line)
                    tracker = 1
                    playlist = True
                    
                    for match in matches:
                        if tracker == 1:
                            artist = match
                            tracker = 2
                        elif tracker == 2:
                            song = match
                            tracker = 3
                        elif tracker == 3:
                            album = match
                            self.add_song(artist, song, album, date)
 
                            tracker = 4
                        elif tracker ==4:
                            tracker =1 
                        else:
                            print "Wtf this shouldn't happen."
            else:
                playlist = False
                pass
                print "No playlist checkpoint 1"

            return playlist
        
        except urllib2.HTTPError, e:
            print "Cannot retreieve URL: HTTP Error Code", e.code
    def get_one_week_sprint_name(self, timeStruct):
        """ timeInSec = Some time in the sprint assuming sprint starts at monday"""

        weekDay = date.weekday(timeStruct)
        weekNumber = date.isocalendar(timeStruct)[1]
        sprintName = timeStruct.strftime("%y") + "W" + str(weekNumber).zfill(2)
        sprintStartDate = timeStruct + timedelta(days=-weekDay)
        sprintEndDate = timeStruct + timedelta(weeks=1, days=-weekDay)
        sprintStart = sprintStartDate.strftime(
            "%Y/%m/%d 12:00")  #'yyyy/mm/dd 12:00'
        sprintEnd = sprintEndDate.strftime(
            "%Y/%m/%d 12:00")  #'yyyy/mm/dd 12:00'

        return sprintName, sprintStart, sprintEnd
Exemple #30
0
def _handle_iso_week_number(date, timestring, index_timestamp):
    date_iso = date.isocalendar()
    iso_week_str = "{Y:04d}{W:02d}".format(Y=date_iso[0], W=date_iso[1])
    greg_week_str = datetime.strftime(date, "%Y%W")

    # Edge case 1: ISO week number is bigger than Greg week number.
    # Ex: year 2014, all ISO week numbers were 1 more than in Greg.
    # Edge case 2: 2010-01-01 in ISO: 2009.W53, in Greg: 2010.W00
    # For Greg converting 2009.W53 gives 2010-01-04, converting back
    # to same timestring gives: 2010.W01.
    if (iso_week_str > greg_week_str or datetime.strftime(date, timestring) != index_timestamp):

        # Remove one week in this case
        date = date - timedelta(days=7)
    return date
def weekly_traffic_counts(daily_counts, sigma=None):
	# Bin the short term counts by week 
	by_week = {}
	for date, count in daily_counts.iteritems():
		if date == 'AADT':
			continue
		week = date.isocalendar()[1]
		try:
			by_week[week][date] = count
		except KeyError:
			by_week[week] = {date: count}

	# TODO: Inefficient to go through again - fix 
	if sigma:
		return filter_weekly_traffic_counts(by_week, sigma)
	return by_week, None
Exemple #32
0
def week_range(date):
    # dow is Mon = 1, Sat = 6, Sun = 7
    year, week, dow = date.isocalendar()

    # Find the first day of the week.
    if dow == 7:
        # Since we want to start with Sunday, let's test for that condition.
        start_date = date
    else:
        # Otherwise, subtract `dow` number days to get the first day
        start_date = date - timedelta(dow)

    # Now, add 6 for the last day of the week (i.e., count up to Saturday)
    end_date = start_date + timedelta(6)

    return (start_date, end_date)
    def articles_topic_week(self, outfile): #articles per topic per week
        '''
        INPUT DataFrame
        OUTPUT DataFrame

        creats DataFrame to be written to csv file which is used by web app
        '''
        #columns to be written in csv file
        col0 = ['topic_sorted','pub_week','pub_week_date','headline','web_url']
        col1 = ["headline"+str(i) for i in range(5)]
        col2 = ['url'+str(i) for i in range(5)]

        d = self.articles_week_dict()
        for topic in self.df['topic_sorted'].order().unique().tolist():
            for pub_week_date in self.df['pub_week_date'].order().unique().tolist():
                pub_week_date_str = pub_week_date.strftime("%Y-%m-%d")

                cond = "topic_sorted == " + str(topic) + \
                       " & pub_week_date_str == '" + pub_week_date_str + "'"
                dg = self.df.query(cond).sort(['weight'], ascending=[0])[col0]

                headlines = [unicodedata.normalize('NFKD', h).encode('ascii','ignore')
                             for h in dg['headline'].values.tolist()]
                urls = dg['web_url'].values.tolist()

                row = {}
                #if (len(urls) > 0):
                row['n_articles'] = len(urls)
                row['fraction'] = len(urls)/d[pub_week_date_str]
                row['pub_week_date'] = pub_week_date_str
                row['topic'] = topic
                row['pub_week'] = date.isocalendar(pub_week_date)[1]
                for i in range(len(col1)):
                    if i < len(urls):
                        row[col2[i]] = urls[i]
                        row[col1[i]] = headlines[i]
                    else:
                        row[col2[i]] = "x" # just a place holder
                        row[col1[i]] = "x"

                self.data.append(row)

        newdf = pd.DataFrame(self.data)
        # rearrange columns
        newdf = newdf[['topic','pub_week','n_articles','fraction','pub_week_date']+
                        col1+col2]
        newdf.to_csv(outfile, index=False)
Exemple #34
0
def month_calendar(date):
    '''
        Returns a month calendar list of weeks [with nested list of days] for the given date. 
    '''
    today_day = date.today().day
    today_month = date.today().month

    month = date.month
    year = date.year
    curr_week = date.isocalendar()[1]
    first_week = datetime(year, month, 1).isocalendar()[1]    
    cal = calendar.HTMLCalendar()
    
    return {'month_calendar': cal.monthdayscalendar(int(year), int(month)),
            'month_name': month_name(year, month),
            'year': year, 'month': month, 'curr_week': curr_week,
            'first_week': first_week, 'today_day': today_day, 'today_month': today_month}
Exemple #35
0
 def update_report(self, cr, uid, date, user_id, context = None):
     record_id = self.search(cr, uid, [('name','=',str(user_id)+'-'+str(date.isocalendar()[1]).zfill(2))])
     if record_id != []:
         record = self.browse(cr, uid, record_id)[0]
         vals={}
         vals = {
             'date': date,
             'created_deals': record.created_deals+self.count_deal_new(cr, uid, date, user_id, context=context),
             'invoice_deals': record.invoice_deals+self.count_deal_invoice(cr, uid, date, user_id, context=context),
             'added_leads': record.added_leads+self.count_lead_new(cr, uid, date, user_id, context=context),
             'closed_tasks': record.closed_tasks+self.count_task_closed(cr, uid, date, user_id, context=context),
             'created_offers': record.created_offers+self.count_offers_new(cr, uid, date, user_id, context=context),
             'created_contracts': record.created_contracts+self.count_contracts_new(cr, uid, date, user_id, context=context),
         }
         super(jp_report_sales, self).write(cr, uid, record_id, vals, context=context)
     else:
         self.create_report(cr, uid, date, user_id, context=context)
     return 
def get_due_string(date: date) -> Tuple[str, int]:
    # Users with this role shall be punished by
    if g.user.Role == -1:
        return str(date), 0

    now = datetime.now().date()
    weeks_between = date.isocalendar()[1] - now.isocalendar()[1]
    if weeks_between == 0:  # Dates are not more than 1 week apart.
        days_between = (date - now).days
        urgent_level = get_urgent_level(days_between)
        if -2 <= days_between <= 2:
            if days_between == -2:
                return "Vorgestern", urgent_level
            elif days_between == -1:
                return "Gestern", urgent_level
            elif days_between == 0:
                return "Heute", urgent_level
            elif days_between == 1:
                return "Morgen", urgent_level
            elif days_between == 2:
                return "Ãœbermorgen", urgent_level
        return "", urgent_level
    elif weeks_between <= 1:  # Dates are more than one week apart.
        days_between = (date - now).days
        day_string = ""
        if weeks_between == 1:
            day_string = "Nächste Woche "
        if date.weekday() == 0:
            day_string += "Montag"
        elif date.weekday() == 1:
            day_string += "Dienstag"
        elif date.weekday() == 2:
            day_string += "Mittwoch"
        elif date.weekday() == 3:
            day_string += "Donnerstag"
        elif date.weekday() == 4:
            day_string += "Freitag"
        elif date.weekday() == 5:
            day_string += "Sammstag 😯"
        elif date.weekday() == 6:
            day_string += "Sonntag 😯"
        return day_string, get_urgent_level(days_between)
    else:
        return str(date), 0
def consecutiveWeeks(dates):
    dates = sorted(
        [datetime.strptime(date, "%Y-%m-%d").date() for date in dates])
    consecutive = 0

    if len(dates) > 0:
        week_numbers = [date.isocalendar()[1] for date in dates]
        week_numbers = list(np.unique(week_numbers))

        # uses index to decrement delta
        sequence = [e - i for i, e in enumerate(week_numbers)]
        # counts each value in list
        counter = Counter(sequence)
        # gets max key
        max_key = max(counter.items(), key=operator.itemgetter(1))[0]
        # gets max key value
        consecutive = counter[max_key]

    return consecutive
 def update_report(self, cr, uid, date, user_id, context = None):
     record_id = self.search(cr, uid, [('name','=',str(user_id)+'-'+str(date.isocalendar()[1]).zfill(2))])
     if record_id != []:
         record = self.browse(cr, uid, record_id)[0]
         vals = {}
         vals = {
             'date': date,
             'open_deals': record.open_deals+self.count_deal_open(cr, uid, date, user_id, context=context),
             'open_handover_deals': self.count_deal_open_hendover(cr, uid, date, user_id, context=context),
             #'meeting_deals': record.meeting_deals+self.count_meeting_deal(cr, uid, date, user_id, context=context),
             'won_deals': record.won_deals+self.count_deal_won(cr, uid, date, user_id, context=context),
             #'transfer_candidates': record.transfer_candidates+self.count_transfer_candidates(cr, uid, date, user_id, context=context),
             'lost_deals': record.lost_deals+self.count_deal_lost(cr, uid, date, user_id, context=context),
             'cease_deals': record.cease_deals+self.count_deal_cease(cr, uid, date, user_id, context=context),
             'closed_tasks': record.closed_tasks+self.count_task_closed(cr, uid, date, user_id, context=context),
         }
         super(jp_report_recruitment, self).write(cr, uid, record_id, vals, context=context)
     else:
         self.create_report(cr, uid, date, user_id, context=context)
     return 
Exemple #39
0
def week_range(date):
    """Find the first/last day of the week for the given day.
    Assuming weeks start on Sunday and end on Saturday.

    """
    # isocalendar calculates the year, week of the year, and day of the week.
    # dow is Mon = 1, Sat = 6, Sun = 7
    year, week, dow = date.isocalendar()

    # Find the first day of the week. 7 cuz sunday
    if dow == 7:
        start_date = date
    else:
        # Otherwise, subtract `dow` number days to get the first day
        start_date = date - timedelta(dow)

    # Now, add 6 for the last day of the week (i.e., count up to Saturday)
    end_date = start_date + timedelta(6)

    return str(start_date) + ',' + str(end_date)
def estimate_disaggregate(filename, short, year):
	name = short + str(year)
	# Get the traffic per day for each counter by year 
	traffic = traffic_counts(filename, year)
	# Get the long term counters 
	longterms = [location for location in traffic.keys() if not location.startswith(short)]
	# The short term counter 
	shortterm = traffic[name]
	# Bin the short term counts by week 
	short_weeks = {}
	for date, count in shortterm.iteritems():
		if date == 'AADT':
			continue
		week = date.isocalendar()[1]
		try:
			short_weeks[week][date] = count
		except KeyError:
			short_weeks[week] = {date: count}
	# Sample by week - get the estimated AADT for each day of the week 
	# and then take average (sdb * (aadt / db) = estimate)
	estimates = {}
	for week, days in short_weeks.iteritems():
		# Find estimates for each day of the week for each long-term counter
		daily_estimates = defaultdict(int)
		for day, count in days.iteritems():
			# Use each counter as a sample long-term counter 
			for longterm in longterms:
				db = traffic[longterm][day]
				aadt = traffic[longterm]['AADT']
				factor = aadt / float(db)
				estimate = count * factor
				daily_estimates[longterm] += estimate
		# Now find averages 
		for longterm, daily_total in daily_estimates.iteritems():
			average_estimate = daily_total / float(len(days))
			daily_estimates[longterm] = average_estimate 
		# Now save 
		estimates[week] = daily_estimates 
	# Get the actual aadt for this counter 
	actual = traffic[name]['AADT']
	return estimates, actual
Exemple #41
0
 def update_cache(user_id, date):
     date_tuple = date.isocalendar()
     year = date_tuple[0]
     week = date_tuple[1]
     delta = timedelta(days = date.weekday())
     begin_day = date.date() - delta;
     i = 0
     time = 0
     while i < 7:
         end_day = begin_day + timedelta(days = 1)
         checkins = Checkin.query.filter_by(user_id = user_id).filter(Checkin.time.between(begin_day, end_day)).order_by(Checkin.time)
         time += Checkin.get_work_time_for_checkins(checkins)
         begin_day = end_day
         i += 1
     cache = TimeCache.query.filter_by(user_id = user_id, year = year, week = week).first()
     if not cache:
         cache = TimeCache(user_id = user_id, week = week, year = year, time = time)
     else:
         cache.time = time;
     db.session.add(cache)
     return time
Exemple #42
0
    def _get_menu(self, location: str, date: date):
        pdf_menu_url = self.base_url.substitute(
            location=StudentenwerkPDFMenuParser.
            location_to_url_table[location],
            date=str(date.year)[2:] + str(date.isocalendar()[1]))
        resp = requests.get(pdf_menu_url)
        if not resp.ok:
            warn(
                f'Could not retrieve PDF menu from {pdf_menu_url}: [HTTP {resp.status_code}] {str(resp.content)}'
            )
        with tempfile.NamedTemporaryFile(suffix='.pdf') as tmp_pdf_file:
            tmp_pdf_file.write(resp.content)
            tmp_pdf_file.seek(0)

            tables = camelot.read_pdf(tmp_pdf_file.name)
            if len(tables) > 0:
                return tables[0].df

            warn(
                f'Could not parse PDF menu from {pdf_menu_url}: Table detection failed'
            )
            return None
Exemple #43
0
    def test_passage_export_filters(self, payload_version: PayloadVersion):
        date = datetime.fromisocalendar(2019, 11, 1)
        # create data for 3 cameras
        baker.make(
            'passage.PassageHourAggregation',
            camera_id=cycle(range(1, 4)),
            camera_naam=cycle(f'Camera: {i}' for i in range(1, 4)),
            date=date,
            year=date.year,
            week=date.isocalendar()[1],
            hour=1,
            _quantity=100,
        )
        api_version = to_api_version(payload_version)
        url = reverse(f'{api_version}:passage-export')
        response = self.client.get(url,
                                   dict(year=2019, week=12),
                                   HTTP_AUTHORIZATION='Token foo')
        assert response.status_code == 200
        lines = [x for x in response.streaming_content]
        assert len(lines) == 0

        response = self.client.get(url,
                                   dict(year=2019, week=11),
                                   HTTP_AUTHORIZATION='Token foo')
        assert response.status_code == 200
        lines = [x for x in response.streaming_content]

        # Expect the header and 3 lines
        assert len(lines) == 4

        response = self.client.get(url,
                                   dict(year=2019),
                                   HTTP_AUTHORIZATION='Token foo')
        assert response.status_code == 200
        lines = [x for x in response.streaming_content]

        # Expect the header and 3 lines
        assert len(lines) == 4
Exemple #44
0
def calendar_json(request, year, month):
    year = int(year)
    month = int(month)

    date = datetime.strptime("%s-%s-%s" % (year, month, 1), "%Y-%m-%d")

    NOW = datetime.now()
    CURRENT_WEEK = date.isocalendar()[1]

    cal_month = calendar.monthcalendar(year, month)
    temp_cal = []

    #Build
    for week in cal_month:
        hours_count = 0
        for day in week:
            if int(day) > 0:
                day_date = datetime.strptime(
                    "%s-%s-%s" % (year, month, int(day)), "%Y-%m-%d")

                for reg in HourRegistration.objects.filter(
                        date=day_date, creator=Core.current_user()):
                    hours_count += float(reg.hours)

            if NOW == date:
                temp_cal.append((CURRENT_WEEK, day, "day today", hours_count))
            elif (day > 0):
                temp_cal.append((CURRENT_WEEK, day, "day", hours_count))
            else:
                temp_cal.append((CURRENT_WEEK, day, "", hours_count))

            hours_count = 0

        if CURRENT_WEEK == 52:
            CURRENT_WEEK = 0
        CURRENT_WEEK += 1

    return HttpResponse(JSONEncoder().encode(temp_cal),
                        mimetype='application/json')
Exemple #45
0
 def fake_snippet(self):
     """Creates a fake snippet for a random user from the given set."""
     user_email = random.choice(self.user_emails)
     user = self.users[user_email]
     dates = self.user_snippet_dates[user_email]
     date = None
     while True:
         date = iso_week_begin(self.faker.datetime.date())
         if date not in dates:
             break
     dates.add(date)
     (year, week, _) = date.isocalendar()
     tag_count = random.randint(0, min(3, len(self.words)))
     tag_texts = random.sample(self.words, tag_count)
     tags = [self.tags[text] for text in tag_texts]
     snippet = Snippet(
         user=user,
         text=self.faker.text.text(),
         year=year,
         week=week,
         tags=tags,
     )
     self.snippets.append(snippet)
Exemple #46
0
def calendar_json(request, year, month):
    year = int(year)
    month = int(month)

    date = datetime.strptime("%s-%s-%s" % (year, month, 1), "%Y-%m-%d")

    NOW = datetime.now()
    CURRENT_WEEK = date.isocalendar()[1]

    cal_month = calendar.monthcalendar(year, month)
    temp_cal = []

    #Build
    for week in cal_month:
        hours_count = 0
        for day in week:
            if int(day) > 0:
                day_date = datetime.strptime("%s-%s-%s" % (year, month, int(day)), "%Y-%m-%d")

                for reg in HourRegistration.objects.filter(date=day_date, creator=Core.current_user()):
                    hours_count += float(reg.hours)

            if NOW == date:
                temp_cal.append((CURRENT_WEEK, day, "day today", hours_count))
            elif(day > 0):
                temp_cal.append((CURRENT_WEEK, day, "day", hours_count))
            else:
                temp_cal.append((CURRENT_WEEK, day, "", hours_count))

            hours_count = 0

        if CURRENT_WEEK == 52:
            CURRENT_WEEK = 0
        CURRENT_WEEK += 1

    return HttpResponse(JSONEncoder().encode(temp_cal), mimetype='application/json')
Exemple #47
0
    def week(self, date):

        try:
            date = datetime.strptime(date, "%d.%m.%Y")
            year = date.year
            week = date.isocalendar()[1]
        except ValueError:
            return []

        activities = self.__repository._getAll()

        matching = filterBy(
            activities, lambda activity: year == datetime.strptime(
                activity.date, "%d.%m.%Y").year and week == datetime.strptime(
                    activity.date, "%d.%m.%Y").isocalendar()[1])
        shellSort(
            matching, lambda activity1, activity2:
            (datetime.strptime(activity1.date, "%d.%m.%Y") < datetime.strptime(
                activity2.date, "%d.%m.%Y")) or
            (activity1.date == activity2.date and datetime.strptime(
                activity1.time, '%H:%M') <= datetime.strptime(
                    activity2.time, '%H:%M')))

        return matching
Exemple #48
0
def _week_end_day(_date):
    weekday = date.isocalendar(_date)
    end_date = _date + timedelta(7-weekday[2])
    d = datetime(int(end_date.year),int(end_date.month),int(end_date.day),23,59,59,999)
    return d
Exemple #49
0
 def withdate(cls, date):
     """Return the week that contains the given datetime.date"""
     return cls(*(date.isocalendar()[:2]))
Exemple #50
0
 def key_func(x):
     d = date.isocalendar(x[0])
     return d[0], d[1]
Exemple #51
0
def _week_start_day(_date):
    weekday = date.isocalendar(_date)
    start_date = _date - timedelta(weekday[2]-1)
    d = datetime(int(start_date.year),int(start_date.month),int(start_date.day),0,0,0,0)
    return d
def week_number(date):
    return date.isocalendar()[1]
Exemple #53
0
def get_year_and_week_number(date):
    return date.isocalendar()[:2]
def counter(filename):
	lines = open(filename).read().splitlines()
	metadata = lines[0].split(',')
	locations = {}

	# metric counts will be stored here for each location 
	data = {}
	for year in years:
		for index, value in enumerate(metadata[3:]):
			if is_location(value):
				name = value + str(year)
				locations[index] = value 
				data[name] = {
					'weekend_traffic': 0,
					'weekday_traffic': 0,
					'num_weekdays': 0,
					'num_weekends': 0,
					'morning_traffic': 0,
					'midday_traffic': 0,
					'by_week': defaultdict(int)		# will collect counts for each week 
				}

	# go through each datapoint
	for line in lines[1:]:
		info = line.split(',')
		# skip any blank lines 
		if info[0] == '':
			continue 

		# day/month/year format
		date = datetime.strptime(info[0], '%d/%m/%Y')

		# only count in-season information
		if not is_in_season(date):
			continue 

		holiday = int(info[1])
		time = info[2]	
		week = date.isocalendar()[1]
		hour = int(time.split(':')[0])

		for i, aadt in enumerate(info[3:]): 
			# skip over in/out counts 
			if i not in locations:
				continue 
			# skip over blank entries for aadt
			if aadt == '':
				continue

			aadt = int(aadt) 
			name = locations[i] + str(date.year)

			# days start at 0:00, i.e., midnight  
			if time == '0:00' or time == '00:00':
				if is_weekday(date, holiday):
					data[name]['num_weekdays'] += 1
				else:
					data[name]['num_weekends'] += 1 

			# aggregate aadt counts 
			if is_weekday(date, holiday):
				data[name]['weekday_traffic'] += aadt 
			else:
				data[name]['weekend_traffic'] += aadt 

			# new week starts at weekday #0
			data[name]['by_week'][week] += aadt

			# increment morning/midday data if possible
			if is_morning(hour):
				data[name]['morning_traffic'] += aadt
			elif is_midday(hour):
				data[name]['midday_traffic'] += aadt

	# all data has been aggregated! now calculate indexes 
	no_data = []
	for location, counts in data.iteritems():
		num_days = float(counts['num_weekends'] + counts['num_weekdays'])

		if num_days == 0:
			print location, 'has no AADT'
			no_data.append(location)
			continue

		# WWI (ratio of average daily weekend traffic to average daily weekday traffic)
		avg_daily_weekend_traffic = counts['weekend_traffic'] / float(counts['num_weekends'])
		avg_daily_weekday_traffic = counts['weekday_traffic'] / float(counts['num_weekdays'])
		data[location]['WWI'] = avg_daily_weekend_traffic / avg_daily_weekday_traffic

		# AMI 1 (ratio of average morning to midday traffic)
		avg_morning_traffic = counts['morning_traffic'] / num_days
		avg_midday_traffic = counts['midday_traffic'] / num_days
		data[location]['AMI'] = avg_morning_traffic / avg_midday_traffic

		# PPI (avg of top 12 weeks / avg of following 20)
		week_vals = list(data[location]['by_week'].iteritems())
		week_vals.sort(key=lambda t: t[1], reverse=True)
 
		# print '====================', location, '================================'
		# for month_bin, count in week_vals:
		# 	print month_bin, '\t', count
		top_average = sum((t[1] for t in week_vals[:PPI_top])) / float(PPI_top)
		bottom_average = sum((t[1] for t in week_vals[PPI_top:PPI_bottom])) / float(PPI_bottom - PPI_top + 1)
		data[location]['PPI'] = top_average / bottom_average
		# print 'num weeks:', len(week_vals)
		# print 'sum of aadt for this year:', sum((t[1] for t in week_vals))
		# print 'average weekly aadt:', sum((t[1] for t in week_vals)) / float(len(week_vals))
		# print 'average weekly for top 12 months:', top_average
		# print 'average weekly for next 20 months:', bottom_average
		# print 'PPI:', data[location]['PPI']
		# print '=================================================================='

	# remove locations with no data
	for nd in no_data:
		del data[nd]

	return data 
Exemple #55
0
def weekly_view(request, year=None, week=None):
    """
    Current timesheet pulls the current week from the database. 
    if one doesn't exists, it creates in-mem object.
    If Post, saves it to the database
    """
    #TODO: have submit thru JSON
    year = year or date.today().year
    week = week or date.isocalendar()[1]
    
    if request.method == 'GET':
        user_projects = request.user.get_profile().projects
        start_week, end_week = determine_period(monday_of_week(int(week), int(year))) 
        extra_form = 1
        
        week_snapshot, timesheets = WeekSnapshot.objects.in_period(year, week, request.user)
        
        if not week_snapshot:
            week_snapshot = WeekSnapshot(user=request.user, year=year, week=week, start_week=start_week, end_week=end_week)
        else:
            extra_form = 0
            
        week_snapshot_form = WeekSnapshotForm(prefix="week_snapshot", instance=week_snapshot)
        
        TimesheetFormSet = modelformset_factory(Timesheet, can_delete=True, extra=extra_form, form=TimesheetForm)
        timesheet_form_set = TimesheetFormSet(queryset=timesheets)
        
        return 'timesheet', {'projects': user_projects, 'year': int(year), 'week': int(week), 'timesheet_form_set': timesheet_form_set, \
                             'week_snapshot': week_snapshot, 'week_snapshot_form': week_snapshot_form}
    else:
        is_draft = request.POST['is_draft'] == 'true'
        status = ApproverQueue.draft_status if is_draft else ApproverQueue.in_queue_status()
        
        week_snapshot = WeekSnapshot.objects.get_or_none(year=year, week=week, user=request.user)
        week_snapshot_form = WeekSnapshotForm(request.POST, prefix="week_snapshot", instance=week_snapshot)
        if week_snapshot_form.is_valid():
            week_snapshot = week_snapshot_form.save(request.user)            
        
        TimesheetFormSet = modelformset_factory(Timesheet, can_delete=True)    
        timesheet_form_set = TimesheetFormSet(request.POST)
        
        #Pull rulesets for weeksnapshot
        rulesets = RuleSet.objects.for_invoker_model(WeekSnapshot)
        
        timesheet_rulesets = rulesets.filter(content_type=ContentType.objects.get_for_model(Timesheet))
        week_rulesets = rulesets.filter(content_type=ContentType.objects.get_for_model(WeekSnapshot))
        #TODO: serve the errors through JSON errors
        if timesheet_form_set.is_valid():
            timsheets = timesheet_form_set.save()
            week_snapshot.timesheets.clear()
            #TODO: should batch all of the errors into one and send them back
            for timesheet in timsheets:
                if timesheet_rulesets:
                    validated_instance = GenericAspect.validate(timesheet_rulesets, timesheet)
                    if validated_instance.has_errors:
                        raise TypeError('ruleset errors encountered')
                week_snapshot.timesheets.add(timesheet)
            week_snapshot.save()
        else:
            raise TypeError('validation errors encountered')
        
        #check if we have validators
        if week_rulesets:
            validated_instance = GenericAspect.validate(week_rulesets, week_snapshot)
            if validated_instance.has_errors:
                raise TypeError('ruleset errors encountered')
        
        #add new status to the weeksnapshot
        post_status_update(week_snapshot, status)            
    
        #send signal since everything is done correctly
        workflow.signals.post_attach_queue_save_event.send(sender=WeekSnapshot, instance=week_snapshot, is_draft=is_draft)
            
        return 'home-r', {}
	conn_db = psycopg2.connect(**config_db)
	conn_db.set_isolation_level(psycopg2.extensions.ISOLATION_LEVEL_AUTOCOMMIT)
except:
	logit("Not able to connect:: {0!s} !!".format(config_db),sys.argv[1])
#	print "["+str(datetime.now())+"] : ["+sys.argv[1]+"] Not able to connect:: %s !!".format(config_db)
	sys.exit(1)

### Open db cursor
cur_db = conn_db.cursor(cursor_factory=psycopg2.extras.DictCursor)



for week in xrange(-weeks_to_deactivate,-1):
	relative_first_day_of_week=date.isoformat(datetime.datetime.today()+datetime.timedelta(weeks=week)-datetime.timedelta(days=date.weekday(datetime.datetime.today()+datetime.timedelta(weeks=week))))
	relative_next_first_day_of_week=date.isoformat(datetime.datetime.today()+datetime.timedelta(weeks=week)-datetime.timedelta(days=date.weekday(datetime.datetime.today()+datetime.timedelta(weeks=week))) + datetime.timedelta(days=+7))
	relative_week='{0:02d}'.format(date.isocalendar(datetime.datetime.today()+datetime.timedelta(weeks=week))[1])
	relative_year=date.isocalendar(datetime.datetime.today()+datetime.timedelta(weeks=week))[0]
	rulename='route_rule_bsms_in_p{0!s}w{1!s}'.format(relative_year,relative_week)
	if vc_debug : logit("Checking is the rule {1!s} for {0!s} week is ebabled..".format(week,rulename),sys.argv[1])
	sql_check_rule_to_deactivate=cur_db.mogrify(query_rule,{'week' : week,'enabled' : "D",'tablename': table_name_base,'rulename':rulename})
	if vc_sql_debug : logit("SQL: {0!s}".format(sql_check_rule_to_deactivate),sys.argv[1])
	cur_db=sql_execute(cur_db,sql_check_rule_to_deactivate,vc_sql_debug)
	rec_deactiavate = cur_db.fetchone()
	
	if int(cur_db.rowcount) > 0:
		# tva e O 
		if rec_deactiavate['active'] == 'O':
			logit("We have active rule {0!s} to deactivate".format(rec_deactiavate['rulename']),sys.argv[1])
			#print "["+str(datetime.datetime.now())+"] : ["+sys.argv[1]+"] We have active rule {0!s} to deactivate".format(rec_deactiavate['rulename'])
			sql_to_deactivete="alter table {0!s} disable rule {1!s};".format(table_name_base,rec_deactiavate['rulename'])
			cur_db=sql_execute(cur_db,sql_to_deactivete,vc_sql_debug)	
 def weekId(date, prefix=""):
     iso = date.isocalendar()
     return "%sweek-%d-%d" % (prefix, iso[0], iso[1])
Exemple #58
0
def get_week(file):
  date = __get_date(file)
  try:
    return str(date.isocalendar()[1]).split()[0]
  except:
    return str(date.iso_week[1]).split()[0]
Exemple #59
0
def build_backup_status(bck_path, bck_file):
  """ This is the main function of this script. Build the backup status and save it to the status yaml file.
  
  function build_backup_status(bck_path, bck_file)
  :param bck_path: The Maestro backup PATH, where backups are stored.
  :param bck_file: Yaml file which will store backup status information.
  :returns: Nothing
  :raises: Nothing
  """

  oLogging=logging.getLogger('backup-status')
  oLoggingOut=logging.getLogger('backup-status-output')

  run_datetime = date.today()
  run_date = date.isoformat(run_datetime)
  run_year,run_week,_ = date.isocalendar(run_datetime)

  bck_path=os.path.normpath(bck_path)

  
  ybck_status={'status': {
                          'week'       : '{0}-{1}'.format(run_year, run_week),
                          'date'       : run_date,
                          'filesystems': {},
                          'status'     : 0
                         },
               'backup': {},
               'services': {}}

  
  check_FS_stats(bck_path, ybck_status)

  if not os.path.isdir(bck_path):
     oLogging.error('"%s" was not a valid directory. aborted.', bck_path)
     sys.exit(2)

  oLogging.info('Using "%s" as {root_backup}', bck_path)

  reIgnored=re.compile('ignored', re.I)
  reSupportedName=re.compile('^\w+\.[\w.]+$', re.I)

  # Loop in subdirectories to build services backup status
  for root,dirnames,files in os.walk(bck_path):
     oLogging.debug("Walking in '%s'", root)
     rel_path=re.sub(bck_path+'/*','', root).split('/')
     bIgnored=False
     if rel_path[0] <> "" and reIgnored.search(rel_path[0]) <> None:
        # Ignore the root path, if 'ignored' is found in the root dir name.
        bIgnored=True
        oLogging.debug("'%s' contains 'ignored'. Ignored from analyze.", rel_path[0])
        for i in sorted(dirnames):
           dirnames.remove(i)

     elif rel_path[0] <> "" and reSupportedName.search(rel_path[0]) == None:
        # Ignore the root path, if the server part name does not contain at least 1 '.'
        # The server name (root dir of the path) should be built at least with *.*, like '<ServerName>.<Domain>'
        bIgnored=True
        oLogging.debug("'%s' is not like '<ServerName>.<Domain>'. Ignored from analyze.", rel_path[0])
        for i in sorted(dirnames):
           dirnames.remove(i)

     if not bIgnored and len(rel_path) == 3: # server/service/week level
        if 'bup_repo' not in dirnames or 'logs' not in dirnames:
           oLogging.warning('%s is not a valid backup directory. Ignored.', root)
           bIgnored=True

        # Cleaning all additional subdirectories to stop the walk recursive task.
        for i in sorted(dirnames):
           if bIgnored or i <> 'logs': # keep logs in walk process to build week history. (Next elif case)
              dirnames.remove(i)

        # Adding data
        Server=rel_path[0]
        Service=rel_path[1]
        Week=rel_path[2]
        if not bIgnored:

           # Check if the mount point is different, to rebuild FS statistics.
           mount_point=check_FS_stats(root, ybck_status)

           if not ybck_status['backup'].has_key(Server):
              ybck_status['backup'][Server]={}
           if not ybck_status['backup'][Server].has_key(Service):
              ybck_status['backup'][Server]={Service:{
                                                      'status': 0,
                                                      'message': 'No error/warning reported.',
                                                      'last': None,
                                                      'history': {}
                                                     }
                                            }
           ybck_status['backup'][Server][Service]['used']=path_size(root)
           ybck_status['backup'][Server][Service]['mount-point']=mount_point
           ybck_status['backup'][Server][Service]['path']=os.path.join(bck_path, Server, Service)

           if not ybck_status['services'].has_key(Service):
             ybck_status['services'][Service]=[Server]
           else:
             if Server not in ybck_status['services'][Service]:
                ybck_status['services'][Service].append(Server)
     elif not bIgnored and len(rel_path) == 4: # server/service/week/logs level - Build history and provide last status.
        Server=rel_path[0]
        Service=rel_path[1]
        Week=rel_path[2]
        
        yService=ybck_status['backup'][Server][Service]
        oLogging.debug('Week: %s - Looping on logs:', Week)
        for log_file in sorted(files):
            get_backupfile_status(yService, log_file, root, Week)

        if not yService.has_key('last') or yService['last'] == None:
           yService['status']=2
           yService['message']='"{0}/*" week(s) has no valid log to verify.'.format(os.path.join(bck_path, Server, Service))
        else:
           # Check status from latest backup date, compare to now.
           # If missing last backup = Warning
           # If missing 2 or more last backup = error
           # Possible fixes:
           # - Restore backup function - Fix backup run error.
           # - Move old service backup to /mnt/backup/disabled.

           # Get number of days of missing backup
           iDaysOld=(run_datetime-datetime.strptime(yService['last'],'%Y-%m-%d').date()).days
           if iDaysOld == 1:
              UpdateStatus(ybck_status['status'], 1,
                           "Warning! {0}: Missing previous day backup. Please review service status.".format(Service))
           elif iDaysOld >1:
              UpdateStatus(ybck_status['status'], 2,
                           "Error! {0}: Several backup days missing. Please review service status. If this service is obsolete, move '{0}' to '{1}'".format(Service, os.path.join(bck_path,'ignored')))

           # Report to the top status if errors/warnings are found in a service backup.
           if yService['status'] == 1: 
              UpdateStatus(ybck_status['status'], 1,("Warning! {0}: "+yService['message']).format(Service))
           if yService['status'] > 1: 
              UpdateStatus(ybck_status['status'], 2,("Error! {0}: "+yService['message']).format(Service))

  if not ybck_status['status'].has_key('message'):
     ybck_status['status']['message']='No error/warning reported.'

  if bck_file == '-':
     print yaml.dump(ybck_status)
  else:
     try:
        stream=open(os.path.join(bck_path, bck_file),'w')
     except IOError as oErr:
        oLogging.error('Unable to write in \'%s\'.%s. Fix and retry.', oErr.strerror, os.path.join(bck_path, bck_file))
     else:
        yaml.dump(ybck_status, stream)
        stream.close()
        oLogging.info('\'%s\' written.', os.path.join(bck_path, bck_file))
Exemple #60
0
trinidad = magellan.magellan()
cursor = trinidad.initdb()

if args.week:
    print "Retreiving analyzed weekly data for the past 10 weeks...\n"

    # get the previous iso week (to use as the end of the plot)
    today = date.today()
    week = (today.isocalendar())[1] - 1
    year = (today.isocalendar())[0]

    # here, retreive all the data from the past 10 weeks
    if week <= 10:
        # get the number of weeks last year
        lyweeks = date.isocalendar(date(year - 1, 12, 31))[1]
        command = (
            "SELECT year,week,homefrac,awayfrac,travelfrac from \
                  analysis_weekly where (YEAR=%i) OR (YEAR=%i AND WEEK > %i) \
                  ORDER BY year,week"
            % (year, year - 1, lyweeks - 10 + week)
        )
    else:
        # it's far enough into the year we can just grab the last 10
        command = (
            "SELECT year,week,homefrac,awayfrac,travelfrac from \
                  analysis_weekly where WEEK > %i AND YEAR=%i ORDER BY week"
            % (week - 10, year)
        )
    filen = "latest.png"
elif args.year: