def R7_parser(self): self.START_DATE = self.gdate if "MONTHS" in self.kwargs: self.START_DATE = delta(self.START_DATE, months=self.kwargs["MONTHS"]) if "DAYINDEX" in self.kwargs and "DAYNAME" in self.kwargs: if self.kwargs["DAYINDEX"] - 2 == 0: self.START_DATE = self.START_DATE + relativedelta( day=1, weekday=MO(self.kwargs["DAYNAME"])) if self.kwargs["DAYINDEX"] - 2 == 1: self.START_DATE = self.START_DATE + relativedelta( day=1, weekday=TU(self.kwargs["DAYNAME"])) if self.kwargs["DAYINDEX"] - 2 == 2: self.START_DATE = self.START_DATE + relativedelta( day=1, weekday=WE(self.kwargs["DAYNAME"])) if self.kwargs["DAYINDEX"] - 2 == 3: self.START_DATE = self.START_DATE + relativedelta( day=1, weekday=TH(self.kwargs["DAYNAME"])) if self.kwargs["DAYINDEX"] - 2 == 4: self.START_DATE = self.START_DATE + relativedelta( day=1, weekday=FR(self.kwargs["DAYNAME"])) if self.kwargs["DAYINDEX"] - 2 == 5: self.START_DATE = self.START_DATE + relativedelta( day=1, weekday=SA(self.kwargs["DAYNAME"])) if self.kwargs["DAYINDEX"] - 2 == 6: self.START_DATE = self.START_DATE + relativedelta( day=1, weekday=SU(self.kwargs["DAYNAME"]))
def set_next_relative_date(start_date, day_of_week, week_of_month, separation_count): arg = MO(1) if day_of_week == 1: arg = MO(week_of_month) if day_of_week == 2: arg = TU(week_of_month) if day_of_week == 3: arg = WE(week_of_month) if day_of_week == 4: arg = TH(week_of_month) if day_of_week == 5: arg = FR(week_of_month) if day_of_week == 6: arg = SA(week_of_month) if day_of_week == 7: arg = SU(week_of_month) if week_of_month == -1: return start_date + relativedelta( day=31, months=+separation_count, weekday=arg) return start_date + relativedelta( day=1, months=+separation_count, weekday=arg)
def _get_start_of_week(self, span): user_lang = self.env['res.lang'].search([('code', '=', self.env.user.lang)]) week_start_map = {'1': MO(-1), '2': TU(-1), '3': WE(-1), '4': TH(-1), '5': FR(-1), '6': SA(-1), '7': SU(-1)} week_start_delta = relativedelta(weekday=week_start_map.get(user_lang.week_start, MO(-1))) if span == 'week': return week_start_delta return START_OF[span] + week_start_delta
def contract_rev_date(id, con_ed=0): if id != 0: c.execute("SELECT con_ed FROM datestimes WHERE id=%s;" % id) data = parser.parse(c.fetchone()[0]) print (data) two_weeks_before = data - relativedelta(weekday=WE(-3)) dat = two_weeks_before.strftime("%d-%m-%Y") print("Review date is: ", two_weeks_before.strftime("%d-%m-%Y")) c2.execute("UPDATE datestimes SET con_rev='%s' WHERE id=%s;" % (dat, id)) #print(data) print("Contract review date has been updated") conn.commit() #return two_weeks_before else: two_weeks_before = parser.parse(con_ed) - relativedelta(weekday=WE(-3)) dat = two_weeks_before.strftime("%d-%m-%Y") return dat
def get_expiry(self): weekly_exp = None monthly_exp = None try: now = datetime.today() cmon = now.month xpry_resp = self.get_expiry_date(exchangeSegment=2, series='OPTIDX', symbol='NIFTY') if 'result' in xpry_resp: expiry_dates = xpry_resp['result'] else: logger.error(f"UDF: Error getting Expiry dates. Reason:{xpry_resp['description']}") raise ex.XTSDataException('No response received: ') thu = (now + relativedelta(weekday=TH(1))).strftime('%d%b%Y') wed = (now + relativedelta(weekday=WE(1))).strftime('%d%b%Y') weekly_exp = thu if thu in expiry_dates else wed logger.info(f'UDF: {weekly_exp} is the week expiry') nxtmon = (now + relativedelta(weekday=TH(1))).month if (nxtmon != cmon): month_last_thu_expiry = now + relativedelta(weekday=TH(5)) mon_thu = (now + relativedelta(weekday=TH(5))).strftime('%d%b%Y') mon_wed = (now + relativedelta(weekday=WE(5))).strftime('%d%b%Y') if (month_last_thu_expiry.month!= nxtmon): mon_thu = (now + relativedelta(weekday=TH(4))).strftime('%d%b%Y') mon_wed = (now + relativedelta(weekday=WE(4))).strftime('%d%b%Y') else: for i in range(1, 7): t = now + relativedelta(weekday=TH(i)) if t.month != cmon: # since t is exceeded we need last one which we can get by subtracting -2 since it is already a Thursday. mon_thu = (t + relativedelta(weekday=TH(-2))).strftime('%d%b%Y') mon_wed = (t + relativedelta(weekday=WE(-2))).strftime('%d%b%Y') break monthly_exp = mon_thu if mon_thu in expiry_dates else mon_wed logger.info(f'UDF: {monthly_exp} is the month expiry') except Exception as e: logger.exception(f'Error in Expiry date function - {e}') return weekly_exp, monthly_exp
def execute(self): third_wednesday = self.__current_date + relativedelta(day=1, weekday=WE(3)) if self.__current_date >= third_wednesday and self.__current_position_status.empty is False: delivery_month = str(self.__current_date.strftime('%Y%m')) select_contract_reach_delivery_month = self.__current_position_status[ 'delivery_month'] == delivery_month self.__current_position_status.loc[ select_contract_reach_delivery_month, 'status'] = 'delivered' return self.__current_position_status
def FourWeekCreate(): today = date.today() weekArr = [] for i in range(4): lastMon = -2 - i #must be -1 more than other days as it runs every monday. so it needs to look at LAST monday otherDays = -1 - i weekArr.append(today + relativedelta(weekday=MO(lastMon))) #last MON weekArr.append(today + relativedelta(weekday=TU(otherDays))) #last TUE weekArr.append(today + relativedelta(weekday=WE(otherDays))) #last WED weekArr.append(today + relativedelta(weekday=TH(otherDays))) #last THU weekArr.append(today + relativedelta(weekday=FR(otherDays))) #last FRI weekArr.append(today + relativedelta(weekday=SA(otherDays))) #last SAT weekArr.append(today + relativedelta(weekday=SU(otherDays))) #last SUN return weekArr
def replace_days(dictionary): weekday = dictionary.get("weekday", None) if weekday: week_expr = dictionary["weekday"] week_day = week_expr[:2] week_int = int(week_expr.split("(")[1].split(")")[0]) if week_day == "MO": dictionary["weekday"] = MO(week_int) elif week_day == "TU": dictionary["weekday"] = TU(week_int) elif week_day == "WE": dictionary["weekday"] = WE(week_int) elif week_day == "TH": dictionary["weekday"] = TH(week_int) elif week_day == "FR": dictionary["weekday"] = FR(week_int) elif week_day == "SA": dictionary["weekday"] = SA(week_int) elif week_day == "SU": dictionary["weekday"] = SU(week_int) else: logger.error("Wrong day code") return dictionary
def parse(raw_date): weekday = { 'monday': MO(-1), 'tuesday': TU(-1), 'wednesday': WE(-1), 'thursday': TH(-1), 'friday': FR(-1), 'saturday': SA(-1), 'sunday': SU(-1), } if raw_date == 'today': date = datetime.now().date() elif raw_date == 'yesterday': date = datetime.now().date() - relativedelta(days=1) elif raw_date in weekday: date = datetime.now().date() + relativedelta(weekday=weekday[raw_date]) elif re.match('\d{4}-w\d{2}', raw_date) is not None: fromDate = datetime.strptime(raw_date + '-1', "%Y-W%W-%w").date() toDate = fromDate + relativedelta(days=7) return fromDate, toDate else: date = datetime.strptime(raw_date, '%Y-%m-%d') toDate = date + relativedelta(days=1) return date, toDate
def _populate(self, year): if year <= 1989: return if year > 1990: self[date(year, JAN, 1)] = "Neujahr" if self.prov in ("BW", "BY", "BYP", "ST"): self[date(year, JAN, 6)] = "Heilige Drei Könige" self[easter(year) - rd(days=2)] = "Karfreitag" if self.prov == "BB": # will always be a Sunday and we have no "observed" rule so # this is pretty pointless but it's nonetheless an official # holiday by law self[easter(year)] = "Ostersonntag" self[easter(year) + rd(days=1)] = "Ostermontag" self[date(year, MAY, 1)] = "Erster Mai" if self.prov == "BE" and year == 2020: self[date(year, MAY, 8)] = ( "75. Jahrestag der Befreiung vom Nationalsozialismus " "und der Beendigung des Zweiten Weltkriegs in Europa") self[easter(year) + rd(days=39)] = "Christi Himmelfahrt" if self.prov == "BB": # will always be a Sunday and we have no "observed" rule so # this is pretty pointless but it's nonetheless an official # holiday by law self[easter(year) + rd(days=49)] = "Pfingstsonntag" self[easter(year) + rd(days=50)] = "Pfingstmontag" if self.prov in ("BW", "BY", "BYP", "HE", "NW", "RP", "SL"): self[easter(year) + rd(days=60)] = "Fronleichnam" if self.prov in ("BY", "SL"): self[date(year, AUG, 15)] = "Mariä Himmelfahrt" self[date(year, OCT, 3)] = "Tag der Deutschen Einheit" if self.prov in ("BB", "MV", "SN", "ST", "TH"): self[date(year, OCT, 31)] = "Reformationstag" if self.prov in ("HB", "SH", "NI", "HH") and year >= 2018: self[date(year, OCT, 31)] = "Reformationstag" # in 2017 all states got the Reformationstag (500th anniversary of # Luther's thesis) if year == 2017: self[date(year, OCT, 31)] = "Reformationstag" if self.prov in ("BW", "BY", "BYP", "NW", "RP", "SL"): self[date(year, NOV, 1)] = "Allerheiligen" if year <= 1994 or self.prov == "SN": # can be calculated as "last wednesday before year-11-23" which is # why we need to go back two wednesdays if year-11-23 happens to be # a wednesday base_data = date(year, NOV, 23) weekday_delta = WE(-2) if base_data.weekday() == 2 else WE(-1) self[base_data + rd(weekday=weekday_delta)] = "Buß- und Bettag" if year >= 2019: if self.prov == "TH": self[date(year, SEP, 20)] = "Weltkindertag" if self.prov == "BE": self[date(year, MAR, 8)] = "Internationaler Frauentag" self[date(year, DEC, 25)] = "Erster Weihnachtstag" self[date(year, DEC, 26)] = "Zweiter Weihnachtstag"
async def status_report(message: types.Message, chat: Chat): if message.get_command() == '/wsr': start_dt = message.date + relativedelta( weekday=WE(-1), hour=0, minute=0, second=0, microsecond=0) end_dt = message.date + relativedelta(weekday=WE, hour=0, minute=0, second=0, microsecond=0) \ - relativedelta(days=1) else: start_dt = message.date + relativedelta( day=1, hour=0, minute=0, second=0, microsecond=0) end_dt = message.date + relativedelta(months=1, day=1, hour=0, minute=0, second=0, microsecond=0, days=-1) grid = generate_grid(start_dt, end_dt) grid = [[[i[0], i[1]] for i in week] for week in grid] async with OrmSession() as session: select_stmt = select(Project) \ .where( Project.chat_id == chat.id ) \ .order_by(Project.id) projects_result = await session.execute(select_stmt) project = projects_result.scalars().first() from_period = PeriodBucket.new(project.period_bucket_mode, start_dt) todo_lists = await project.get_since(session, from_period, with_log_messages=True) message_content = [] for todo_list in todo_lists: bucket = PeriodBucket.get_by_key(todo_list.period_bucket_key) for todo_item in todo_list.items: message_content.append( text(':spiral_calendar_pad:', str(bucket), ':pushpin:', todo_item.text)) for log_message in todo_item.notes: message_content.append( text(':paperclip:', log_message.text)) message_content.append(text('')) if bucket.start(): for week in grid: for i in week: if i[1].date() == bucket.start().date(): i[0] = i[0].replace('white', 'purple') import io file = io.StringIO(emojize(text(*message_content, sep='\n'))) for week in grid: for i in week: if i[1].date() == datetime.now().date(): if 'white' in i[0] or 'black' in i[0]: i[0] = i[0].replace('circle', 'large_square') else: i[0] = i[0].replace('circle', 'square') grid = [[i[0] for i in week] for week in grid] await message.answer_document( file, caption=emojize( text(text( f'Отчет о проделанной работе с {start_dt.date()} по {end_dt.date()}' ), text(''), text('Пн Вт Ср Чт Пт Сб Вс'), *[text(*week, sep='') for week in grid], sep='\n')))
import vk_api import datetime import random from datetime import date import time from dateutil.relativedelta import relativedelta, MO, WE, TH, TU, SU, FR, SA import requests import xml.etree.ElementTree as et from vk_api.longpoll import VkLongPoll, VkEventType now = datetime.datetime.now() today = date.today() monday = (today + relativedelta(weekday=MO(-1))).day tuesday = (today + relativedelta(weekday=TU(-1))).day wednesday = (today + relativedelta(weekday=WE(-1))).day thursday = (today + relativedelta(weekday=TH(-1))).day friday = (today + relativedelta(weekday=FR(-1))).day saturday = (today + relativedelta(weekday=SA(-1))).day mon = now.month moth = "" user_login = {} user_password = {} if mon == 1: moth = "Январь" if mon == 2: moth = "Феварль" if mon == 3: moth = "Март" if mon == 4: moth = "Апрель" if mon == 5:
def handle(self) -> date: return self.result + relativedelta(weekday=WE(self.number))
from datetime import date from dateutil.relativedelta import relativedelta, WE format = "%Y-%m-%d" today = date.today() sunday = today + relativedelta(weekday=WE(-1)) date = sunday.strftime(format) print(date)
def create_next_event(self): current = fields.Date.today() current_date = datetime.strptime(current, '%Y-%m-%d') current_day = current_date.weekday() dpth = 1 event_ids = self.env['event.event'].search([('is_template', '=', True), ('recurrency', '=', True), ('state', '!=', 'done')]) for event in event_ids: if event.end_type == 'count': event.counter += 1 week_list = [] while dpth <= event.depth: if event.mo: event_date = current_date + relativedelta(weekday=MO(dpth)) week_list.append(event_date) if event.tu: event_date = current_date + relativedelta(weekday=TU(dpth)) week_list.append(event_date) if event.we: event_date = current_date + relativedelta(weekday=WE(dpth)) week_list.append(event_date) if event.th: event_date = current_date + relativedelta(weekday=TH(dpth)) week_list.append(event_date) if event.fr: event_date = current_date + relativedelta(weekday=FR(dpth)) week_list.append(event_date) if event.sa: event_date = current_date + relativedelta(weekday=SA(dpth)) week_list.append(event_date) if event.su: event_date = current_date + relativedelta(weekday=SU(dpth)) week_list.append(event_date) event_tmpl_start_date = event.date_begin event_tmpl_start_time = datetime.strptime( event_tmpl_start_date, "%Y-%m-%d %H:%M:%S").time() event_tmpl_end_date = event.date_end event_tmpl_end_time = datetime.strptime( event_tmpl_end_date, "%Y-%m-%d %H:%M:%S").time() dpth += 1 for event_day in week_list: event_start_date = datetime.combine( event_day, event_tmpl_start_time) event_end_date = datetime.combine(event_day, event_tmpl_end_time) date = event_start_date.strftime('%Y-%m-%d %H:%M:%S') final_date = event.final_date day = event_day.strftime('%Y-%m-%d') event_find = self.env['event.event'].search([ ('template_id', '=', event.id), ('date_begin', '=', date) ]) if event.end_type == 'end_date' and day > event.final_date: event.state = 'done' return True elif event.end_type == 'count' and event.counter > event.count: event.state = 'done' return True elif not event_find: new_event_id = event.copy( default={ 'is_template': False, 'date_begin': event_start_date, 'date_end': event_end_date, 'recurrency': False, 'template_id': event.id, })
import pymongo import matplotlib.pyplot as plt import os from collections import namedtuple, defaultdict, deque from datetime import date from dateutil.relativedelta import relativedelta, MO, TU, WE, TH, FR, SA, SU from graphviz import Graph, Digraph from itertools import groupby, product from operator import itemgetter from utils import _sanitize_username, _sanitize_question # This is needed when obtaining date for last Monday, Tuesday, etc. WEEKDAYS = { 'Mon': MO(-1), 'Tue': TU(-1), 'Wed': WE(-1), 'Thu': TH(-1), 'Fri': FR(-1), 'Sat': SA(-1), 'Sun': SU(-1) } # Ordering months. MONTHS = { 'Jan': 1, 'Feb': 2, 'Mar': 3, 'Apr': 4, 'May': 5, 'Jun': 6, 'Jul': 7,
def main(): #MAIN IDEA: #Identify the sales volume and ROAS per margin and display as a bubble chart in data studio c = CurrencyConverter() print('------------Product Margins------------') #analytics dates to pull individually #NEED TO REWORK THIS TO HAVE LESS API REQUESTS today = date.today() mon = today + relativedelta( weekday=MO(-2) ) #last MON: -2 because it will run every monday so we need to look 2 mondays back tue = today + relativedelta(weekday=TU(-1)) #last TUE wed = today + relativedelta(weekday=WE(-1)) #last WED thu = today + relativedelta(weekday=TH(-1)) #last THU fri = today + relativedelta(weekday=FR(-1)) #last FRI sat = today + relativedelta(weekday=SA(-1)) #last SAT sun = today + relativedelta(weekday=SU(-1)) #last SUN weekArr = [mon, tue, wed, thu, fri, sat, sun] analyticsCSV = "" #pull for each day of the week - avoids data sampling this way but takes a bit longer for day in weekArr: analyticsCSV += analytics_pull.main(str(day)) print("Pulled date: " + str(day)) dfAnalytics = csv_parser.parseToCSV(analyticsCSV, "ANL") dfAnalytics['date'] = dfAnalytics.apply( lambda row: match_maker.CheckDateFormatAnalytics(row), axis=1) dfAnalytics = dfAnalytics[dfAnalytics.date != -1] dfAnalytics['date'] = dfAnalytics.apply(lambda row: formatDate(row), axis=1) print('-----Analytics Dataframe-----') #adwords adwordsCSV = adwords_pull.get_raw_report() dfAdwords = csv_parser.parseToCSV(adwordsCSV, "ADW") #REMOVE THE SPACE AFTER THE GID - WILL VARY DEPENDING ON CLIENTS GIDnew = [] for dfAdwordsIndex, dfAdwordsRow in dfAdwords.iterrows(): newGID = dfAdwordsRow['GID'].partition(" ")[0] GIDnew.append(newGID) dfAdwords = dfAdwords.drop(columns="GID") dfAdwords.loc[:, "GID"] = GIDnew print('-----Adwords Dataframe-----') sql = ''' SELECT dfAdwords.Date, SUM(dfAdwords.Cost) AS Cost, dfAdwords.GID FROM dfAdwords GROUP BY dfAdwords.Date, dfAdwords.GID ''' dfAdwords = pandasql.sqldf(sql, locals()) print('-----Grouped Adwords Dataframe-----') #XML dfXML = process_xml.main(margins=True) dfXML['GID'] = dfXML['GID'].str.lower() dfAnalytics['productSku'] = dfAnalytics['productSku'].str.lower() print('-----Default XML Dataframe-----') sql = ''' SELECT dfXML.Margin, dfXML.GID, dfXML.Brand FROM dfXML GROUP BY dfXML.Margin, dfXML.GID, dfXML.Brand ''' dfXML = pandasql.sqldf(sql, locals()) print('-----Grouped XML Dataframe-----') sql = ''' SELECT dfXML.Margin, dfXML.Brand, dfAnalytics.date, dfAnalytics.itemQuantity, dfAnalytics.productSku, dfAnalytics.productName, dfAnalytics.itemRevenue FROM dfAnalytics INNER JOIN dfXML ON dfXML.GID = dfAnalytics.productSku ''' dfXML = pandasql.sqldf(sql, locals()) print('-----Joined XML Dataframe-----') print(dfXML) sql = ''' SELECT dfXML.Margin, dfXML.Brand, dfXML.date, dfXML.itemQuantity, dfXML.productSku, dfXML.productName, dfXML.itemRevenue, dfAdwords.Cost FROM dfXML INNER JOIN dfAdwords ON dfXML.Date = dfAdwords.Date AND dfXML.productSku = dfAdwords.GID ''' dfXML = pandasql.sqldf(sql, locals()) print('-----Joined XML Dataframe-----') print(dfXML) newCost = [] newROAS = [] for dfXMLIndex, dfXMLRow in dfXML.iterrows(): newVal = c.convert(dfXMLRow['Cost'] / 1000000, 'USD', 'YOUR_CURRENCY') newCost.append(newVal) if float(newVal) <= 0: newROAS.append(0) else: newROAS.append(float(dfXMLRow['itemRevenue']) / float(newVal)) dfXML.loc[:, "newCost"] = newCost dfXML.loc[:, "newROAS"] = newROAS print('-----Final XML Dataframe-----') sql = ''' SELECT date, Margin, (SUM(CAST(itemRevenue as float64)) / sum(CAST(newCost as float64))) as ROAS, SUM(CAST(itemQuantity as int64)) as salesVolume FROM dfXML GROUP BY Margin, date ''' dfXML = pandasql.sqldf(sql, locals()) dfXML.to_gbq('DATASET_NAME.TABLE_NAME', project_id='YOUR_PROJECT_ID', chunksize=None, if_exists='append', table_schema=None, location='LOCATION', progress_bar=True, credentials=google_auth.getCreds())
def main(filePath): c = CurrencyConverter() today = date.today() mon = today + relativedelta(weekday=MO(-2)) #last MON tue = today + relativedelta(weekday=TU(-1)) #last TUE wed = today + relativedelta(weekday=WE(-1)) #last WED thu = today + relativedelta(weekday=TH(-1)) #last THU fri = today + relativedelta(weekday=FR(-1)) #last FRI sat = today + relativedelta(weekday=SA(-1)) #last SAT sun = today + relativedelta(weekday=SU(-1)) #last SUN weekArr = [mon,tue,wed,thu,fri,sat,sun] print("Executing for dates: ") for day in weekArr: print(day) print(day.strftime("%m")) countryArr = {"US": "ACCOUNT NAME"} print('loading XML dataframe...') dfXML = process_xml.main() print('loading adwords csv...') adwordsCSV = adwords_pull.get_raw_report() print('parsing adwords into dataframe...') dfCSV = csv_parser.parseToCSV(adwordsCSV,"ADW") print('parsing analytics into dataframe...') analyticsCSV = "" #pull for each day of the week - avoids data sampling this way but takes a bit longer #THIS NEEDS TO BE REFACTORED IN THE FUTURE for day in weekArr:# analyticsCSV += analytics_pull.main(str(day)) print("Pulled date: " + str(day)) dfAnalytics = csv_parser.parseToCSV(analyticsCSV,"ANL") dfAnalytics['date'] = dfAnalytics.apply(lambda row: CheckDateFormatAnalytics(row), axis=1) dfAnalytics = dfAnalytics[dfAnalytics.date != -1] #add month and Year to analytics dfAnalytics['monthNum'] = dfAnalytics.apply(lambda row: ApplyMonth(row,"ANL"), axis=1) dfAnalytics['yearNum'] = dfAnalytics.apply(lambda row: ApplyYear(row,"ANL"), axis=1) print("Done Processing APIs...") print("Processing Data...") dfXML['ID'] = dfXML['ID'].str.lower() dfCSV['GID'] = dfCSV['GID'].str.lower() dfXML['GID'] = dfXML['GID'].str.lower() dfCSV['OfferID'] = dfCSV['OfferID'].str.lower() #FILER US ONLY sql = f''' SELECT * FROM dfCSV WHERE dfCSV.Account = "{countryArr["US"]}" ''' dfCSV = pandasql.sqldf(sql, locals()) dfCSV['monthNum'] = dfCSV.apply(lambda row: ApplyMonth(row), axis=1) dfCSV['yearNum'] = dfCSV.apply(lambda row: ApplyYear(row), axis=1) #REMOVE THE SPACE AFTER THE GID GIDnew = [] for dfCSVIndex, dfCSVRow in dfCSV.iterrows(): newGID = dfCSVRow['GID'].partition(" ")[0] GIDnew.append(newGID) print(len(GIDnew)) print(dfCSV.shape) dfCSV=dfCSV.drop(columns="GID") dfCSV.loc[:,"GID"] = GIDnew print("processing csv to xml matches for linking table") #take the XML feed and the adwords report and EXACT match the OfferIDs together #ATTACH GTINS TO PRODUCTS # GOAL IS TO MATCH GTIN ON EVERY GROUP ID FROM ADWORDS # BEST WAY TO DO IT IS GROUP ADWORDS DF BY GID TO MAKE THE FILE SMALLER sqlGroupAdwords = ''' SELECT dfCSV.GID, SUM(dfCSV.Impressions) AS Impressions, SUM(dfCSV.Clicks) AS Clicks, SUM(dfCSV.Cost) AS Cost, SUM(dfCSV.ConversionValue) AS ConversionValue, dfCSV.monthNum as monthNum, dfCSV.yearNum as yearNum, dfCSV.Account as Account FROM dfCSV GROUP BY dfCSV.GID, dfCSV.Account, dfCSV.monthNum, dfCSV.yearNum ''' # DROP UNUSED DF dfCSV.iloc[0:0] # WRITE NEW DF adwordsGrpDF = pandasql.sqldf(sqlGroupAdwords, locals()) sql = ''' SELECT dfXML.GTIN as GTIN, dfXML.ID as ID, dfXML.GID as GID, dfXML.IMG as IMG, dfXML.BRAND as BRAND, adwordsGrpDF.monthNum as monthNum, adwordsGrpDF.yearNum as yearNum, adwordsGrpDF.Impressions as Impressions, adwordsGrpDF.Clicks as Clicks, adwordsGrpDF.ConversionValue as ConversionValue, adwordsGrpDF.Cost as Cost FROM dfXML, adwordsGrpDF WHERE dfXML.GID = adwordsGrpDF.GID ''' # DROP UNUSED DF adwordsGrpDF.iloc[0:0] dfXML.iloc[0:0] # WRITE NEW DF linkingTable = pandasql.sqldf(sql, locals()) print("-------XML AND ADWORDS MATCHED ON OfferID VIA SQL-------") print("-------LINKS GTIN TO THE OFFER ID === THEREFORE GTIN TO THE Adwords Metrics-------") #At this point the linkingTable DF holds the adwords stats and the gtin of the products #popular products CVS from import into df #imported CSV in dataframe form print('parsing imported CSV into dataframe...') print('----------------impoted CSV dataframe----------------') dfICSV = process_csv.main(filePath) #print(dfICSV) #match popular products with linkingTable dfICSV.rename(columns={'PopularityRank' : 'Pop'},inplace=True) #order by popularity dfICSV.sort_values(by=['Pop']) #TAKE THE FIRST 2000 RECORDS FROM THE POPULAR PRODUCTS CSV INTO A NEW DF - This can be any number between 0 and about 10k dfPopM = dfICSV[:2000] print("-----GET PRODUCTS THEY DONT SELL-----") sqlNotSold = ''' SELECT Pop, ChangeFromLastWeek, Title, GTIN, Brand, PriceRangeStart, PriceRangeEnd, WeekNumber FROM dfPopM WHERE ProductInventoryStatus = "Not in inventory" ORDER BY Pop ''' dfNotSold = pandasql.sqldf(sqlNotSold, locals()) #match popular products with linkingTable (adwords stats) count = 0 print("-----Processing matches-----") sql = ''' SELECT linkingTable.GID, linkingTable.Impressions, linkingTable.Cost, linkingTable.Clicks, linkingTable.ConversionValue, linkingTable.IMG, linkingTable.BRAND, linkingTable.monthNum, linkingTable.yearNum, dfPopM.Pop FROM linkingTable, dfPopM WHERE SUBSTR(linkingTable.GTIN,0,LENGTH(linkingTable.GTIN) - 1) = SUBSTR(dfPopM.GTIN,0,LENGTH(dfPopM.GTIN) - 3) ''' dfPopM.iloc[0:0] dfPopXmlMatch = pandasql.sqldf(sql, locals()) sqlGroupMatchTable = ''' SELECT dfPopXmlMatch.Pop, dfPopXmlMatch.GID FROM dfPopXmlMatch GROUP BY dfPopXmlMatch.Pop,dfPopXmlMatch.GID ''' dfPopXmlMatch = pandasql.sqldf(sqlGroupMatchTable, locals()) sqlMatchAll = ''' SELECT linkingTable.GID, linkingTable.Impressions, linkingTable.Cost, linkingTable.Clicks, linkingTable.ConversionValue, linkingTable.IMG, linkingTable.BRAND, linkingTable.monthNum, linkingTable.yearNum, dfPopXmlMatch.Pop FROM linkingTable, dfPopXmlMatch WHERE dfPopXmlMatch.GID = linkingTable.GID GROUP BY linkingTable.GID, linkingTable.Impressions, linkingTable.Cost, linkingTable.Clicks, linkingTable.ConversionValue, linkingTable.IMG, linkingTable.monthNum, linkingTable.yearNum, dfPopXmlMatch.Pop ''' linkingTable.iloc[0:0] dfPopXmlMatch = pandasql.sqldf(sqlMatchAll, locals()) #group the results by date and IDNew while adding the metrics together sqlGroup = ''' SELECT dfPopXmlMatch.GID as GID, SUM(dfPopXmlMatch.Impressions) as Impressions, SUM(dfPopXmlMatch.Cost) as Cost, SUM(dfPopXmlMatch.Clicks) as Clicks, SUM(dfPopXmlMatch.ConversionValue) as ConversionValue, dfPopXmlMatch.IMG as IMG, dfPopXmlMatch.BRAND as BRAND, dfPopXmlMatch.monthNum as monthNum, dfPopXmlMatch.yearNum as yearNum, dfPopXmlMatch.Pop FROM dfPopXmlMatch GROUP BY dfPopXmlMatch.GID, dfPopXmlMatch.IMG, dfPopXmlMatch.Pop, dfPopXmlMatch.monthNum , dfPopXmlMatch.yearNum ''' dfPopXmlMatch = pandasql.sqldf(sqlGroup, locals()) print('---------GROUPED THE ABOVE---------') sqlGroupAnalytics = ''' SELECT dfAnalytics.productName as productName, dfAnalytics.productSku as productSku, dfAnalytics.country as country, dfAnalytics.monthNum as monthNum, dfAnalytics.yearNum as yearNum, SUM(dfAnalytics.itemRevenue) as itemRevenue, AVG(dfAnalytics.buyToDetailRate) as buyToDetailRate FROM dfAnalytics WHERE dfAnalytics.country = "United States" GROUP BY dfAnalytics.country, dfAnalytics.productSku, dfAnalytics.productName, dfAnalytics.monthNum , dfAnalytics.yearNum ''' dfAnalytics = pandasql.sqldf(sqlGroupAnalytics, locals()) sql = ''' SELECT dfPopXmlMatch.GID as ID, dfPopXmlMatch.Impressions as Impressions, dfPopXmlMatch.Cost as Cost, dfPopXmlMatch.Clicks as Clicks, dfPopXmlMatch.ConversionValue as ConversionValue, dfPopXmlMatch.IMG as IMG, dfPopXmlMatch.BRAND as BRAND, dfPopXmlMatch.Pop as Pop, dfAnalytics.itemRevenue as itemRev, dfAnalytics.buyToDetailRate as buyToDetailRate, dfAnalytics.productName as productName, dfAnalytics.country as country, dfAnalytics.monthNum as monthNum, dfAnalytics.yearNum as yearNum FROM dfPopXmlMatch, dfAnalytics WHERE LOWER(dfPopXmlMatch.GID) = LOWER(dfAnalytics.productSku) AND dfAnalytics.monthNum = dfPopXmlMatch.monthNum AND dfAnalytics.yearNum = dfPopXmlMatch.yearNum ''' dfPopXmlMatch.iloc[0:0] dfFinal = pandasql.sqldf(sql, locals()) ######### currency converting start ######### #COMMENT OUT THE BELOW IF YOU DONT NEED TO CONVER CURRENCY - MORE EFFICIENT WAY IN product_sizes.py - make into method in futurre updates newCost = [] newROAS = [] for dfFinalIndex, dfFinalRow in dfFinal.iterrows(): newVal = c.convert(dfFinalRow['Cost'] / 1000000, 'USD', 'YOUR_CURRENCY') newCost.append(newVal) if float(newVal) <= 0: newROAS.append(0) else: newROAS.append(float(dfFinalRow['itemRev']) / float(newVal)) dfFinal.loc[:,"newCost"] = newCost dfFinal.loc[:,"newROAS"] = newROAS ######### currency converting end ######### #add a new column to df and fill it with the above week number weekNo = int(mon.strftime("%W")) + 1 dfFinal["WeekNumber"] = str(weekNo) #if not string, data studio gives an error print("Processing Complete...") #Products they dont sell dfNotSold dfNotSold.to_gbq('DATASET_NAME.TABLE_NAME', project_id='YOUR_PROJECT_ID', chunksize=None, if_exists='append', table_schema=None, location='LOCATION', progress_bar=True, credentials=google_auth.getCreds()) #linked table with what they have sold dfFinal dfFinal.to_gbq('DATASET_NAME.TABLE_NAME', #TABLE_NAME here is refered to as CLIENT_DO_SELL in the SQL files project_id='YOUR_PROJECT_ID', chunksize=None, if_exists='append', table_schema=None, location='LOCATION', progress_bar=True, credentials=google_auth.getCreds()) #the csv from google mc dfICSV = dfICSV[:2000] #clip it if needed here dfICSV.to_gbq('DATASET_NAME.TABLE_NAME', project_id='YOUR_PROJECT_ID', chunksize=None, if_exists='append', table_schema=None, location='LOCATION', progress_bar=True, credentials=google_auth.getCreds()) dfFinal.iloc[0:0] dfICSV.iloc[0:0]
start_date_after = datetime.date.today() + relativedelta(weekday=MO(+1)) print("Next Monday: ", start_date_after.strftime("%d-%m-%Y")) # contract start date start_date = "2017-01-31" # input("Start date: ") start_date = dateutil.parser.parse(start_date).strftime("%d-%m-%Y") start_date = datetime.datetime.strptime(start_date, '%d-%m-%Y') # contrat end date contract_lenght = 365 # int(input("How long in days?: ")) contract_calculation = start_date + datetime.timedelta(days=contract_lenght) + relativedelta(weekday=FR) contract_calculation.strftime("%d-%m-%Y") print("end date on friday", contract_calculation) # contract review date (two weeks before on wednesday) contract_review_date = contract_calculation - relativedelta(weekday=WE(-3)) print("Review date is: ", contract_review_date) #### END CODE #### def raise_frame(frame): frame.tkraise() img_path = 'logo.jpg' root = Tk() root.title("WSW Centre") root.geometry("900x600+100+100") #top frame with logo
def progz(): # start date on mondey week before and week after the date td = datetime.date.today() print("Today: ", datetime.date.today()) start_date_before = datetime.date.today() + relativedelta(weekday=MO(-1)) print("Last Monday: ", start_date_before.strftime("%d-%m-%Y")) start_date_after = datetime.date.today() + relativedelta(weekday=MO(+1)) print("Next Monday: ", start_date_after.strftime("%d-%m-%Y")) # contract start date start_date = "2017-01-31" # input("Start date: ") start_date = dateutil.parser.parse(start_date).strftime("%d-%m-%Y") start_date = datetime.datetime.strptime(start_date, '%d-%m-%Y') # contrat end date contract_lenght = 365 # int(input("How long in days?: ")) contract_calculation = start_date + datetime.timedelta(days=contract_lenght) + relativedelta(weekday=FR) contract_calculation.strftime("%d-%m-%Y") print("end date on friday", contract_calculation) # contract review date (two weeks before on wednesday) contract_review_date = contract_calculation - relativedelta(weekday=WE(-3)) print("Review date is: ", contract_review_date) ########## root = Tk() root.geometry("650x300") # root.option_add("*Font", "helvetica 12") menu = Menu(root) root.config(menu=menu) file_menu = Menu(menu) file_menu.add_command(label="Quit") menu.add_cascade(label="File", menu=file_menu) help_menu = Menu(menu) help_menu.add_command(label="Help") menu.add_cascade(label="Help", menu=help_menu) ################################ END OF MENU ############## label1 = Label(root, text=" ").grid(row=0, sticky='w') label2 = Label(root, text="Todays date:").grid(row=1, sticky='e', padx=10) label3 = Label(root, text="Monday before:").grid(row=1, column=3, sticky='e', padx=10) label4 = Label(root, text="Monday after: ").grid(row=2, column=3, sticky='e', padx=10) label6 = Label(root, text="Contract Start Date: ").grid(row=6, sticky='e', padx=10, pady=10) start_date_btn = Button(text="Add Date").grid(row=6, column=2, columnspan=3, sticky='w', pady=10) e_today = Entry(root) e_today.insert(END, td) e_mon_before = Entry(root) e_mon_before.insert(END, start_date_before) e_mon_after = Entry(root) e_mon_after.insert(END, start_date_after) e_start_date = Entry(root) e_today.grid(row=1, column=1) e_mon_before.grid(row=1, column=4) e_mon_after.grid(row=2, column=4) e_start_date.grid(row=6, column=1, pady=10) root.mainloop()
def _populate(self, year): # Bank Holidays Act 1873 # The Employment of Females Act 1873 # Factories Act 1894 # Industrial Conciliation and Arbitration Act 1894 # Labour Day Act 1899 # Anzac Day Act 1920, 1949, 1956 # New Zealand Day Act 1973 # Waitangi Day Act 1960, 1976 # Sovereign's Birthday Observance Act 1937, 1952 # Holidays Act 1981, 2003 if year < 1894: return # New Year's Day name = "New Year's Day" jan1 = date(year, JAN, 1) self[jan1] = name if self.observed and jan1.weekday() in WEEKEND: self[date(year, JAN, 3)] = name + " (Observed)" name = "Day after New Year's Day" jan2 = date(year, JAN, 2) self[jan2] = name if self.observed and jan2.weekday() in WEEKEND: self[date(year, JAN, 4)] = name + " (Observed)" # Waitangi Day if year > 1973: name = "New Zealand Day" if year > 1976: name = "Waitangi Day" feb6 = date(year, FEB, 6) self[feb6] = name if self.observed and year >= 2014 and feb6.weekday() in WEEKEND: self[feb6 + rd(weekday=MO)] = name + " (Observed)" # Easter self[easter(year) + rd(weekday=FR(-1))] = "Good Friday" self[easter(year) + rd(weekday=MO)] = "Easter Monday" # Anzac Day if year > 1920: name = "Anzac Day" apr25 = date(year, APR, 25) self[apr25] = name if self.observed and year >= 2014 and apr25.weekday() in WEEKEND: self[apr25 + rd(weekday=MO)] = name + " (Observed)" # Sovereign's Birthday if year >= 1952: name = "Queen's Birthday" elif year > 1901: name = "King's Birthday" if year == 1952: self[date(year, JUN, 2)] = name # Elizabeth II elif year > 1937: self[date(year, JUN, 1) + rd(weekday=MO(+1))] = name # EII & GVI elif year == 1937: self[date(year, JUN, 9)] = name # George VI elif year == 1936: self[date(year, JUN, 23)] = name # Edward VIII elif year > 1911: self[date(year, JUN, 3)] = name # George V elif year > 1901: # http://paperspast.natlib.govt.nz/cgi-bin/paperspast?a=d&d=NZH19091110.2.67 self[date(year, NOV, 9)] = name # Edward VII # Labour Day name = "Labour Day" if year >= 1910: self[date(year, OCT, 1) + rd(weekday=MO(+4))] = name elif year > 1899: self[date(year, OCT, 1) + rd(weekday=WE(+2))] = name # Christmas Day name = "Christmas Day" dec25 = date(year, DEC, 25) self[dec25] = name if self.observed and dec25.weekday() in WEEKEND: self[date(year, DEC, 27)] = name + " (Observed)" # Boxing Day name = "Boxing Day" dec26 = date(year, DEC, 26) self[dec26] = name if self.observed and dec26.weekday() in WEEKEND: self[date(year, DEC, 28)] = name + " (Observed)" # Province Anniversary Day if self.prov in ('NTL', 'Northland', 'AUK', 'Auckland'): if 1963 < year <= 1973 and self.prov in ('NTL', 'Northland'): name = "Waitangi Day" dt = date(year, FEB, 6) else: name = "Auckland Anniversary Day" dt = date(year, JAN, 29) if dt.weekday() in (TUE, WED, THU): self[dt + rd(weekday=MO(-1))] = name else: self[dt + rd(weekday=MO)] = name elif self.prov in ('TKI', 'Taranaki', 'New Plymouth'): name = "Taranaki Anniversary Day" self[date(year, MAR, 1) + rd(weekday=MO(+2))] = name elif self.prov in ('HKB', "Hawke's Bay"): name = "Hawke's Bay Anniversary Day" labour_day = date(year, OCT, 1) + rd(weekday=MO(+4)) self[labour_day + rd(weekday=FR(-1))] = name elif self.prov in ('WGN', 'Wellington'): name = "Wellington Anniversary Day" jan22 = date(year, JAN, 22) if jan22.weekday() in (TUE, WED, THU): self[jan22 + rd(weekday=MO(-1))] = name else: self[jan22 + rd(weekday=MO)] = name elif self.prov in ('MBH', 'Marlborough'): name = "Marlborough Anniversary Day" labour_day = date(year, OCT, 1) + rd(weekday=MO(+4)) self[labour_day + rd(weeks=1)] = name elif self.prov in ('NSN', 'Nelson'): name = "Nelson Anniversary Day" feb1 = date(year, FEB, 1) if feb1.weekday() in (TUE, WED, THU): self[feb1 + rd(weekday=MO(-1))] = name else: self[feb1 + rd(weekday=MO)] = name elif self.prov in ('CAN', 'Canterbury'): name = "Canterbury Anniversary Day" showday = date(year, NOV, 1) + rd(weekday=TU) + \ rd(weekday=FR(+2)) self[showday] = name elif self.prov in ('STC', 'South Canterbury'): name = "South Canterbury Anniversary Day" dominion_day = date(year, SEP, 1) + rd(weekday=MO(4)) self[dominion_day] = name elif self.prov in ('WTL', 'Westland'): name = "Westland Anniversary Day" dec1 = date(year, DEC, 1) # Observance varies?!?! if year == 2005: # special case?!?! self[date(year, DEC, 5)] = name elif dec1.weekday() in (TUE, WED, THU): self[dec1 + rd(weekday=MO(-1))] = name else: self[dec1 + rd(weekday=MO)] = name elif self.prov in ('OTA', 'Otago'): name = "Otago Anniversary Day" mar23 = date(year, MAR, 23) # there is no easily determined single day of local observance?!?! if mar23.weekday() in (TUE, WED, THU): dt = mar23 + rd(weekday=MO(-1)) else: dt = mar23 + rd(weekday=MO) if dt == easter(year) + rd(weekday=MO): # Avoid Easter Monday dt += rd(days=1) self[dt] = name elif self.prov in ('STL', 'Southland'): name = "Southland Anniversary Day" jan17 = date(year, JAN, 17) if year > 2011: self[easter(year) + rd(weekday=TU)] = name else: if jan17.weekday() in (TUE, WED, THU): self[jan17 + rd(weekday=MO(-1))] = name else: self[jan17 + rd(weekday=MO)] = name elif self.prov in ('CIT', 'Chatham Islands'): name = "Chatham Islands Anniversary Day" nov30 = date(year, NOV, 30) if nov30.weekday() in (TUE, WED, THU): self[nov30 + rd(weekday=MO(-1))] = name else: self[nov30 + rd(weekday=MO)] = name
def _populate(self, year): if year <= 1989: return if year > 1990: self[date(year, JAN, 1)] = 'Neujahr' if self.prov in ('BW', 'BY', 'BYP', 'ST'): self[date(year, JAN, 6)] = 'Heilige Drei Könige' self[easter(year) - rd(days=2)] = 'Karfreitag' if self.prov == "BB": # will always be a Sunday and we have no "observed" rule so # this is pretty pointless but it's nonetheless an official # holiday by law self[easter(year)] = "Ostersonntag" self[easter(year) + rd(days=1)] = 'Ostermontag' self[date(year, MAY, 1)] = 'Erster Mai' if self.prov == "BE" and year == 2020: self[date(year, MAY, 8)] = \ "75. Jahrestag der Befreiung vom Nationalsozialismus " \ "und der Beendigung des Zweiten Weltkriegs in Europa" self[easter(year) + rd(days=39)] = 'Christi Himmelfahrt' if self.prov == "BB": # will always be a Sunday and we have no "observed" rule so # this is pretty pointless but it's nonetheless an official # holiday by law self[easter(year) + rd(days=49)] = "Pfingstsonntag" self[easter(year) + rd(days=50)] = 'Pfingstmontag' if self.prov in ('BW', 'BY', 'BYP', 'HE', 'NW', 'RP', 'SL'): self[easter(year) + rd(days=60)] = 'Fronleichnam' if self.prov in ('BY', 'SL'): self[date(year, AUG, 15)] = 'Mariä Himmelfahrt' self[date(year, OCT, 3)] = 'Tag der Deutschen Einheit' if self.prov in ('BB', 'MV', 'SN', 'ST', 'TH'): self[date(year, OCT, 31)] = 'Reformationstag' if self.prov in ('HB', 'SH', 'NI', 'HH') and year >= 2018: self[date(year, OCT, 31)] = 'Reformationstag' # in 2017 all states got the Reformationstag (500th anniversary of # Luther's thesis) if year == 2017: self[date(year, OCT, 31)] = 'Reformationstag' if self.prov in ('BW', 'BY', 'BYP', 'NW', 'RP', 'SL'): self[date(year, NOV, 1)] = 'Allerheiligen' if year <= 1994 or self.prov == 'SN': # can be calculated as "last wednesday before year-11-23" which is # why we need to go back two wednesdays if year-11-23 happens to be # a wednesday base_data = date(year, NOV, 23) weekday_delta = WE(-2) if base_data.weekday() == 2 else WE(-1) self[base_data + rd(weekday=weekday_delta)] = 'Buß- und Bettag' if year >= 2019: if self.prov == 'TH': self[date(year, SEP, 20)] = 'Weltkindertag' if self.prov == 'BE': self[date(year, MAR, 8)] = 'Internationaler Frauentag' self[date(year, DEC, 25)] = 'Erster Weihnachtstag' self[date(year, DEC, 26)] = 'Zweiter Weihnachtstag'
def _populate(self, year): # New Year's Day name = "New Year's Day" _date = date(year, JAN, 1) if self.observed and _date.weekday() == SUN: self[_date + rd(weekday=MO(+1))] = name + " (Observed)" else: self[_date] = name # Valentine's Day self[date(year, FEB, 14)] = "Valentine's Day" # Mother's Day self[date(year, MAY, 1) + rd(weekday=SU(+2))] = "Mother's Day" # Labour Day name = "Labour Day" _date = date(year, MAY, 23) if self.observed and _date.weekday() == SUN: self[_date + rd(weekday=MO)] = name + " (Observed)" else: self[_date] = name # Father's Day self[date(year, JUN, 1) + rd(weekday=SU(+3))] = "Father's Day" # Emancipation Day name = "Emancipation Day" _date = date(year, AUG, 1) if self.observed and _date.weekday() == SUN: self[_date + rd(weekday=MO)] = name + " (Observed)" else: self[_date] = name # Independence Day name = "Independence Day" _date = date(year, AUG, 6) if self.observed and _date.weekday() in WEEKEND: self[_date + rd(weekday=MO)] = name else: self[_date] = name # National Heroes Day self[date(year, OCT, 1) + rd(weekday=MO(+3))] = "National Heroes Day" # Christmas self[date(year, DEC, 25)] = "Christmas day" # Boxing day self[date(year, DEC, 26)] = "Boxing day" # New Year Eve # self[date(year, DEC, 31)] = "New Year Eve" # Holidays based on Easter # Ash Wednesday self[easter(year) + rd(days=-40, weekday=WE(-1))] = "Ash Wednesday" # Good Friday self[easter(year) + rd(weekday=FR(-1))] = "Good Friday" # Easter self[easter(year)] = "Easter" # Easter self[easter(year) + rd(weekday=MO(+1))] = "Easter Monday"
# This file contains all the functions that the workflow # uses for specialised dates. from math import floor from arrow.arrow import datetime, timedelta # The DAY_MAP is specific to relative delta from date_format_mappings import DATE_MAPPINGS, TIME_MAPPINGS, DATE_TIME_MAPPINGS from dateutil.relativedelta import relativedelta, MO, TU, WE, TH, FR, SA, SU from dateutil.rrule import rrule, YEARLY, DAILY DAY_MAP = { "mon": relativedelta(days=+1, weekday=MO(+1)), "tue": relativedelta(days=+1, weekday=TU(+1)), "wed": relativedelta(days=+1, weekday=WE(+1)), "thu": relativedelta(days=+1, weekday=TH(+1)), "fri": relativedelta(days=+1, weekday=FR(+1)), "sat": relativedelta(days=+1, weekday=SA(+1)), "sun": relativedelta(days=+1, weekday=SU(+1)), "prev mon": relativedelta(days=-1, weekday=MO(-1)), "prev tue": relativedelta(days=-1, weekday=TU(-1)), "prev wed": relativedelta(days=-1, weekday=WE(-1)), "prev thu": relativedelta(days=-1, weekday=TH(-1)), "prev fri": relativedelta(days=-1, weekday=FR(-1)), "prev sat": relativedelta(days=-1, weekday=SA(-1)), "prev sun": relativedelta(days=-1, weekday=SU(-1)), } DAYS_OF_WEEK_ABBREVIATIONS = { "mon": "monday", "tue": "tuesday", "wed": "wednesday",
async def weekly_status_report(session, project, now): config = project.settings['weekly_status_report'] logging.info('Calling at %s weekly_status_report for project%s %s', now, project.id, config) start_dt = now + relativedelta( weekday=WE(-1), hour=0, minute=0, second=0, microsecond=0) end_dt = now + relativedelta( weekday=WE, hour=0, minute=0, second=0, microsecond=0) - relativedelta(days=1) grid = generate_grid(start_dt, end_dt) grid = [[[i[0], i[1]] for i in week] for week in grid] bucket = PeriodBucket.new(project.period_bucket_mode, start_dt.date()) select_stmt = select(ItemsList) \ .options( selectinload(ItemsList.items) .selectinload(Item.notes) ) \ .where( ItemsList.project_id == project.id, ItemsList.period_bucket_key >= bucket.key(), ) \ .order_by(ItemsList.period_bucket_key) project_daily_todo_lists = await session.execute(select_stmt) message_content = [] for todo_list in project_daily_todo_lists.scalars(): bucket = PeriodBucket.get_by_key(todo_list.period_bucket_key) for todo_item in todo_list.items: message_content.append( text(':spiral_calendar_pad:', str(bucket), ':pushpin:', todo_item.text)) for log_message in todo_item.notes: message_content.append(text(':paperclip:', log_message.text)) message_content.append(text('')) if bucket.start(): for week in grid: for i in week: if i[1].date() == bucket.start().date(): i[0] = i[0].replace('white', 'purple') import io file = io.StringIO(emojize(text(*message_content, sep='\n'))) for week in grid: for i in week: if i[1].date() == datetime.now().date(): if 'white' in i[0] or 'black' in i[0]: i[0] = i[0].replace('circle', 'large_square') else: i[0] = i[0].replace('circle', 'square') grid = [[i[0] for i in week] for week in grid] await bot.send_document( project.chat_id, file, caption=emojize( text(text( f'Отчет о проделанной работе с {start_dt.date()} по {end_dt.date()}' ), text(''), text('Пн Вт Ср Чт Пт Сб Вс'), *[text(*week, sep='') for week in grid], sep='\n')))