def add_cal_events(event_list): cal_api = utils.google_calendar_login() for meeting in event_list: # If meeting already exists, don't recreate it. meeting_exists = check_for_cal_event(cal_api, meeting) if meeting_exists: continue # Get info for the event name = meeting.get('name') if not name: continue name = utils.process_name(name) cal_id = dr.calendar_id_dir[name] start_time = meeting['time'] time_range = utils.make_time_range(start_time) time_range = time_range.replace(' ', '') meeting_text = 'Meeting with ' + meeting['mentor'] + ' in Room ' + meeting['room_num'] + \ ' (' + meeting['room_name'] + ') on ' + meeting['day'] + ' ' + time_range # Create the event on the appropriate calendar. created_event = cal_api.events().quickAdd(calendarId=cal_id, text=meeting_text).execute() print('created event: ' + created_event['summary'])
def get_namespace(self, resource, context): filename = resource.get_value('filename') if filename: filename = process_name(filename)[1] else: filename = resource.get_title() return {'filename': filename}
def check_for_cal_event(cal_api, meeting, return_event_id=False): name = meeting.get('name') if not name: return False if return_event_id else None name = utils.process_name(name) cal_id = dr.calendar_id_dir[name] if not cal_id: return False if return_event_id else None # Fix time in order to compare # Get current year today = datetime.date.today() year = str(today.year) # Get the meeting day and month m = re.search('([0-9]{1,2})/([0-9]{2})', meeting['day']) if not m: return False if return_event_id else None month = m.group(1) day = m.group(2) if len(month) == 1: month = '0' + month # Get the meeting day and time start_time = meeting['time'] start_time = start_time[:start_time.find(' ')] if len(start_time) == 4: start_time = '0' + start_time comparison_time = year + '-' + month + '-' + day + 'T' + start_time events = cal_api.events().list(calendarId=cal_id, q='Meeting with').execute() event_exists = False target_event_id = None for event in events['items']: st_time = event['start']['dateTime'] txt = event['summary'] # Check that the start time and the mentor name match the event if comparison_time in st_time: event_exists = True target_event_id = event['id'] break if return_event_id: return target_event_id else: return event_exists
def get_namespace(self, resource, context): filename = resource.get_value('filename') or resource.get_title() contents = [ process_name(x)[1] + u'\n' for x in resource.get_value('data').get_contents() ] contents = ''.join(contents) # Extract archive extract = context.root.is_allowed_to_edit(context.user, resource) if extract: widget = PathSelectorWidget('target', value='..').render() else: widget = None # Ok return {'filename': filename, 'contents': contents, 'extract': extract, 'widget': widget}
def delete_cal_events(event_list): cal_api = utils.google_calendar_login() for meeting in event_list: event_id = check_for_cal_event(cal_api, meeting, return_event_id=True) if event_id: name = meeting.get('name') if not name: continue name = utils.process_name(name) cal_id = dr.calendar_id_dir[name] # Delete event deleted_event = cal_api.events().delete( calendarId=cal_id, eventId=event_id).execute() print('deleted event: ' + deleted_event['summary'])
def send_deleted_msgs(msg_dicts, server): for event in msg_dicts: clean_name = utils.process_name(event['name']) # Get full schedule for the day for that team or associate full_schedule = daily_notice.main(team=clean_name, specific_day=event['day'], send_emails=False) if clean_name == 'not_found': address_name = event['name'] else: address_name = dr.names[clean_name] msg = 'Hello ' + address_name + ',\n\n' + \ 'You have been REMOVED FROM the following meeting with ' + event['mentor'] + ':\n\n' + \ event['day'] + ' - ' + event['time'] + '\n' + \ 'Room ' + event['room_num'] + ' (' + event['room_name'] + ')\n\n\n\n' + \ 'Your full updated schedule for ' + event['day'] + ' is as follows:' event_list = bulk_event_formatter(full_schedule[clean_name]) if not event_list: msg += 'No meetings!' else: msg += ''.join(event_list) msg += '\n\nPlease check the main schedule if this is in error.\n\n' + \ '- Scheduling Bot' to_addresses = dr.update_email_list[clean_name] if isinstance(to_addresses, list): for addr in to_addresses: message = MIMEText(msg) message['From'] = '*****@*****.**' message['To'] = addr message['Subject'] = 'Cancelled mentor meeting at ' + event['time'] + ' on ' + event['day'] server.send_message(message) print('Sent deleted email - ' + address_name + ' with ' + event['mentor'] + ' at ' + event['time']) else: message = MIMEText(msg) message['From'] = '*****@*****.**' message['To'] = to_addresses message['Subject'] = 'Cancelled mentor meeting at ' + event['time'] + ' on ' + event['day'] server.send_message(message) print('Sent deleted email - ' + address_name + ' with ' + event['mentor'] + ' at ' + event['time'])
def get_namespace(self, resource, context): filename = resource.get_value('filename') or resource.get_title() contents = [ process_name(x)[1] + u'\n' for x in resource.get_value('data').get_contents() ] contents = ''.join(contents) # Extract archive extract = context.root.is_allowed_to_edit(context.user, resource) if extract: widget = PathSelectorWidget('target', value='..').render() else: widget = None # Ok return { 'filename': filename, 'contents': contents, 'extract': extract, 'widget': widget }
def extract_archive(self, handler, default_language, filter=None, postproc=None, update=False): change_resource = self.database.change_resource for path_str in handler.get_contents(): # 1. Skip folders path = Path(path_str) if path.endswith_slash: continue # Skip the owner file (garbage produced by microsoft) filename = path[-1] if filename.startswith('~$'): continue # 2. Create parent folders if needed folder = self for name in path[:-1]: name, title = process_name(name) subfolder = folder.get_resource(name, soft=True) if subfolder is None: folder = folder.make_resource(name, Folder) folder.set_value('title', title, default_language) elif not isinstance(subfolder, Folder): raise RuntimeError, MSG_NAME_CLASH else: folder = subfolder # 3. Find out the resource name and title, the file mimetype and # language mimetype = guess_mimetype(filename, 'application/octet-stream') name, extension, language = FileName.decode(filename) name, title = process_name(name) language = language or default_language # Keep the filename extension (except in webpages) if mimetype not in ('application/xhtml+xml', 'text/html'): name = FileName.encode((name, extension, None)) # 4. The body body = handler.get_file(path_str) if filter: body = filter(path_str, mimetype, body) if body is None: continue # 5. Update or make file file = folder.get_resource(name, soft=True) if file: if update is False: msg = 'unexpected resource at {path}' raise RuntimeError, msg.format(path=path_str) if mimetype == 'text/html': body = tidy_html(body) file_handler = file.get_handler(language) else: file_handler = file.get_handler() old_body = file.handler.to_str() file_handler.load_state_from_string(body) if postproc: postproc(file) # FIXME Comparing the bytes does not work for XML, so we use # this weak heuristic if len(old_body) != len(file.handler.to_str()): change_resource(file) else: # Case 1: the resource does not exist file = folder._make_file(name, filename, mimetype, body, language) file.set_value('title', title, language=language) if postproc: postproc(file)
def extract_archive(self, handler, default_language, filter=None, postproc=None, update=False): change_resource = self.database.change_resource for path_str in handler.get_contents(): # 1. Skip folders clean_path = "/".join([ checkid(x) or 'file' if x else 'file' for x in path_str.split("/")]) path = Path(clean_path) if path.endswith_slash: continue # Skip the owner file (garbage produced by microsoft) filename = path[-1] if filename.startswith('~$'): continue # 2. Create parent folders if needed folder = self for name in path[:-1]: name, title = process_name(name) subfolder = folder.get_resource(name, soft=True) if subfolder is None: folder = folder.make_resource(name, Folder) folder.set_value('title', title, default_language) elif not isinstance(subfolder, Folder): raise RuntimeError, MSG_NAME_CLASH else: folder = subfolder # 3. Find out the resource name and title, the file mimetype and # language mimetype = guess_mimetype(filename, 'application/octet-stream') name, extension, language = FileName.decode(filename) name, title = process_name(name) language = language or default_language # Keep the filename extension (except in webpages) if mimetype not in ('application/xhtml+xml', 'text/html'): name = FileName.encode((name, extension, None)) # 4. The body body = handler.get_file(path_str) if filter: body = filter(path_str, mimetype, body) if body is None: continue # 5. Update or make file file = folder.get_resource(name, soft=True) if file: if update is False: msg = 'unexpected resource at {path}' raise RuntimeError, msg.format(path=path_str) if mimetype == 'text/html': body = tidy_html(body) file_handler = file.get_handler(language) else: file_handler = file.get_handler() old_body = file.handler.to_str() file_handler.load_state_from_string(body) if postproc: postproc(file) # FIXME Comparing the bytes does not work for XML, so we use # this weak heuristic if len(old_body) != len(file.handler.to_str()): change_resource(file) else: # Case 1: the resource does not exist file = folder._make_file(name, filename, mimetype, body, language) file.set_value('title', title, language=language) if postproc: postproc(file)
def pre_process(file, persistent=False): """ To pre-process the data for further operation(s). :param file: Path to a csv file. :param persistent: A boolean variable indicating whether to make the pre-processed data persistent locally. """ # create a duplicate of data print('start pre-processing...') duplicate = pd.read_csv(file) # define keys for detecting duplicates keys = ['rdate', 'rid', 'hid'] # define indices of rows to be removed indices = [] # cleanse invalid sample(s) print('cleansing invalid sample...') duplicate = cleanse_sample(duplicate, keys=keys, indices=indices) # define rules for dropping feature rules = [ # useless features 'horsenum', 'rfinishm', 'runpos', 'windist', 'win', 'place', '(rm|p|m|d)\d+', # features containing too many NANs 'ratechg', 'horseweightchg', 'besttime', 'age', 'priority', 'lastsix', 'runpos', 'datediff', # features which are difficult to process 'gear', 'pricemoney' ] # eliminate useless features print('eliminating useless features...') duplicate = cleanse_feature(duplicate, rules=rules) # specify columns to be filled columns = [ 'bardraw', 'finishm', 'exweight', 'horseweight', 'win_t5', 'place_t5' ] # specify corresponding methods methods = [('constant', 4), ('constant', 1e5), ('constant', 122.61638888121101), ('constant', 1106.368874062333), ('constant', 26.101661368452852), ('constant', 6.14878956518161)] # fill nan value(s) print('filling nans...') duplicate = fill_nan(duplicate, columns=columns, methods=methods) # specify columns to be replaced columns = ['bardraw', 'finishm', 'exweight', 'horseweight'] # specify schema(s) of replacement values = [(0, 14), (0, 1e5), (0, 122.61638888121101), (0, 1106.368874062333)] # replace invalid value(s) print('replacing invalid values...') duplicate = replace_invalid(duplicate, columns=columns, values=values) # convert 'finishm' into 'velocity' print('generating velocity...') duplicate[ 'velocity'] = 1e4 * duplicate['distance'] / duplicate['finishm'] # apply target encoding on 'class' print('processing class...') duplicate = process_class(duplicate) # apply target encoding on 'jname' and 'tname' print('processing jname and tname...') duplicate = process_name(duplicate) # apply target encoding on 'venue' and 'course' print('processing venue and course...') duplicate = process_course(duplicate) # apply target encoding on 'track' and 'going' print('processing track and going...') duplicate = process_going(duplicate) # conduct local persistence if persistent: # set index before saving duplicate.set_index('index', inplace=True) print('saving result...') duplicate.to_csv(file.replace('.csv', '_modified.csv')) return duplicate
def main(): # Make Google API object sheets_api = utils.google_sheets_login() # Set variables spreadsheet_id = vrs.spreadsheet_id room_mapping = vrs.room_mapping sheet_options = vrs.sheet_options full_range = vrs.full_range lookahead = vrs.lookahead_days # Determine which days to check for. # Find index of today's date today_date = utils.get_today(skip_weekends=True) day_index = -1 for idx, date in enumerate(sheet_options): if today_date in date: day_index = idx break if day_index == -1: raise IndexError("Today's date not found within sheet_options." "If Mentor Madness has ended, please shut off this" "update script.") # Set the days to check, without worrying about going past the program end sheet_names = [sheet_options[day_index]] idx = 1 while lookahead > 0: try: sheet_names.append(sheet_options[day_index + idx]) idx += 1 lookahead -= 1 except IndexError: break # Create holding variables for adding and deleting messages adding_msgs = [] deleting_msgs = [] for day in sheet_names: # String formatting for API query and file saving sheet_query = day + '!' + full_range csv_name = utils.day_to_filename(day) # Make request for sheet sheet = sheets_api.spreadsheets().values().get( spreadsheetId=spreadsheet_id, range=sheet_query).execute() new_sheet = sheet['values'] for idx, new_sheet_row in enumerate(new_sheet): if len(new_sheet_row) < vrs.row_length: new_sheet[idx].extend([''] * (vrs.row_length - len(new_sheet_row))) # Load old sheet old_sheet = open(csv_name, 'r') reader = csv.reader(old_sheet) row_counter = 0 for old_row in reader: new_row = new_sheet[row_counter] timeslot = new_row[0] # Make rows the same length if they are not if len(old_row) < len(new_row): old_row.extend([''] * (len(new_row) - len(old_row))) elif len(old_row) > len(new_row): new_row.extend([''] * (len(old_row) - len(new_row))) # Iterate over rooms for room_num in range(1, len(room_mapping) + 1): # Get descriptive variables of room room_dict = room_mapping[room_num] room_name = room_dict['name'] mentor_name = new_row[room_dict['mentor_col']] for col_num in room_dict['check_range']: old_name = old_row[col_num] new_name = new_row[col_num] if new_name != old_name: new_event_dict = { 'time': timeslot, 'name': new_name, 'mentor': mentor_name, 'room_num': str(room_num), 'room_name': room_name, 'day': day } old_event_dict = { 'time': timeslot, 'name': old_name, 'mentor': mentor_name, 'room_num': str(room_num), 'room_name': room_name, 'day': day } if new_name and old_name: # Someone was changed, assuming the names are different if utils.process_name( new_name) != utils.process_name(old_name): deleting_msgs.append(old_event_dict) adding_msgs.append(new_event_dict) else: continue elif old_name: # Someone was deleted deleting_msgs.append(old_event_dict) elif new_name: # Someone was added adding_msgs.append(new_event_dict) row_counter += 1 # Save the sheet old_sheet.close() old_sheet = open(csv_name, 'w') writer = csv.writer(old_sheet) writer.writerows(new_sheet) email_sender.send_update_mail(adding_msgs, deleting_msgs) gcal_scheduler.add_cal_events(adding_msgs) gcal_scheduler.delete_cal_events(deleting_msgs)
def main(team=None, send_today=False, specific_day=None, send_emails=True, create_calendar_events=False): # Make Google API object sheets_api = utils.google_sheets_login() # Set variables spreadsheet_id = vrs.spreadsheet_id room_mapping = vrs.room_mapping full_range = vrs.full_range sheet_options = vrs.sheet_options # Determine which day to send for # The default is the following business day if send_today: match_day = utils.get_today(skip_weekends=True) else: match_day = utils.get_next_day() if specific_day: match_day = specific_day # Pick out the appropriate sheet names from the list sheet_names = [x for x in sheet_options if match_day in x] if team is None: name_dict = dr.empty_name_dict else: name_dict = {} if isinstance(team, list): for t in team: name_dict[t] = [] else: name_dict[team] = [] for day in sheet_names: # String formatting for API query and file saving sheet_query = day + '!' + full_range # Make request for sheet sheet = sheets_api.spreadsheets().values().get( spreadsheetId=spreadsheet_id, range=sheet_query).execute() new_sheet = sheet['values'] new_sheet = new_sheet[1:] # Get rid of the header row for idx, new_sheet_row in enumerate(new_sheet): if len(new_sheet_row) < vrs.row_length: new_sheet[idx].extend([''] * (vrs.row_length - len(new_sheet_row))) # Add a spacer dict to separate days spacer_dict = { 'time': None, 'mentor': None, 'room_num': None, 'room_name': None, 'day': day } for key, val in name_dict.items(): name_dict[key].append(spacer_dict) for row in new_sheet: timeslot = row[0] # Iterate over rooms for room_num in range(1, len(room_mapping) + 1): # Get descriptive variables of room room_dict = room_mapping[room_num] room_name = room_dict['name'] mentor_name = row[room_dict['mentor_col']] for col_num in room_dict['check_range']: name = utils.process_name(row[col_num]) if name and name != 'not_found' and name in name_dict.keys( ): new_event_dict = { 'time': timeslot, 'mentor': mentor_name, 'name': name, 'room_num': str(room_num), 'room_name': room_name, 'day': day } name_dict[name].append(new_event_dict) print('Got info for ' + day) try: if create_calendar_events: for name, event_list in name_dict.items(): # event_list['name'] = name gcal_scheduler.add_cal_events(event_list) except Exception: traceback.print_exc() if send_emails: try: email_sender.send_daily_mail(name_dict) except Exception: traceback.print_exc() return None else: return name_dict
def main(specific_day=None): # Make Google API object sheets_api = utils.google_sheets_login() # Set variables spreadsheet_id = vrs.spreadsheet_id room_mapping = vrs.room_mapping full_range = vrs.full_range sheet_names = [] if not specific_day: sheet_names = [utils.get_next_day()] else: if isinstance(specific_day, list): sheet_names = specific_day elif isinstance(specific_day, str): sheet_names = [specific_day] mentor_dict = {} for day in sheet_names: # String formatting for API query and file saving sheet_query = day + '!' + full_range # Make request for sheet sheet = sheets_api.spreadsheets().values().get( spreadsheetId=spreadsheet_id, range=sheet_query).execute() new_sheet = sheet['values'] new_sheet = new_sheet[1:] # Get rid of the header row # Make sure each row is the correct length for idx, new_sheet_row in enumerate(new_sheet): if len(new_sheet_row) < vrs.row_length: new_sheet[idx].extend([''] * (vrs.row_length - len(new_sheet_row))) for row in new_sheet: timeslot = row[0] # Iterate over rooms for room_num in range(1, len(room_mapping) + 1): # Get descriptive variables of room room_dict = room_mapping[room_num] room_name = room_dict['name'] mentor_name = row[room_dict['mentor_col']] if not mentor_name: continue # Add mentor to mentor list for the day if mentor_dict.get(mentor_name) is None: mentor_dict[mentor_name] = [] teamname_idx = room_dict['check_range'][0] teamname = utils.process_name(row[teamname_idx]) if teamname: new_event_dict = { 'time': timeslot, 'mentor': mentor_name, 'company': teamname, 'room_num': str(room_num), 'room_name': room_name, 'day': day } mentor_dict[mentor_name].append(new_event_dict) email_sender.make_daily_mentor_schedules(mentor_dict) email_sender.make_mentor_packet_schedules(mentor_dict)
def main(): # Make Google API object sheets_api = utils.google_sheets_login() # Set variables spreadsheet_id = vrs.spreadsheet_id room_mapping = vrs.room_mapping full_range = vrs.full_range name_dict = dr.empty_name_dict # Determine which days to check for today = time.localtime() day = today.tm_mday if day > 24: sheet_names = vrs.week3 elif day > 17: sheet_names = vrs.week2 else: sheet_names = vrs.week1 for day in sheet_names: # String formatting for API query and file saving sheet_query = day + '!' + full_range # Make request for sheet sheet = sheets_api.spreadsheets().values().get( spreadsheetId=spreadsheet_id, range=sheet_query).execute() new_sheet = sheet['values'] for idx, new_sheet_row in enumerate(new_sheet): if len(new_sheet_row) < vrs.row_length: new_sheet[idx].extend([''] * (vrs.row_length - len(new_sheet_row))) # Add a spacer dict to separate days spacer_dict = { 'time': None, 'mentor': None, 'room_num': None, 'room_name': None, 'day': day } for key, val in name_dict.items(): name_dict[key].append(spacer_dict) for row in new_sheet: timeslot = row[0] # Iterate over rooms for room_num in range(1, len(room_mapping.keys()) + 1): # Get descriptive variables of room room_dict = room_mapping[room_num] room_name = room_dict['name'] mentor_name = row[room_dict['mentor_col']] for col_num in room_dict['check_range']: name = utils.process_name(row[col_num]) if name and name != 'not_found': new_event_dict = { 'time': timeslot, 'mentor': mentor_name, 'room_num': str(room_num), 'room_name': room_name, 'day': day } name_dict[name].append(new_event_dict) email_sender.send_weekly_mail(name_dict)