def main(start_date, end_date, email_recp): """ driver function """ process_date_from = parse(start_date) process_date_to = parse(end_date) if process_date_from.month != (process_date_to + timedelta(days=-1)).month: logger.error('Please enter dates for same month. Exiting...') return # Create Spreadsheet if not already done spreadsheet_id = gdrive.get_sheet_id(SHEET_NAME) if not spreadsheet_id: spreadsheet_id = gsheet.create_spreadsheet(SHEET_NAME) logger.info(f"spreadsheet_id={spreadsheet_id}") if not spreadsheet_id: logger.error('unable to create spreadsheet. Exiting!!!') return # Add a sheet for current month current_sheet_name = process_date_from.strftime("%Y%m") gsheet.add_sheet(spreadsheet_id, current_sheet_name) # Get the emails for the given date range mime_messages = gmail.get_mails(process_date_from, process_date_to) logger.info('total number of messgaes = %s' % len(mime_messages)) # Parse and extract the data from emails records = [] for message_id, mime_message in mime_messages.items(): data = gmail_parser.parse(mime_message) if not data: logger.warning('couldnt parse %s' % mime_message['subject']) continue data = __enrich(data, mime_message, message_id) records.append(data) logger.debug(data) # Write the data to google sheet HEADER = ['date', 'amount', 'seller', 'tx_type', 'category', 'id'] request = [] for record in records: expense = [record.get(name) or '' for name in HEADER] request.append(expense) if request: gsheet.write_to_sheet(spreadsheet_id, current_sheet_name, request) gsheet.de_duplicate(spreadsheet_id, current_sheet_name) # Generate Expense report and email it. report_date = process_date_from expense_report = report.generate(spreadsheet_id, current_sheet_name, process_date_from, process_date_to) logger.debug(expense_report) send_mail.send_message( 'Expense report : ' + report_date.strftime('%Y-%m-%d'), expense_report, email_recp)
def main(argv): """Process the JSON data and generate a full report out of it.""" data = load_data("car_sales.json") summary = process_data(data) summary_with_breaks = "<br/>".join(summary) summary_for_email = "<\n>".join(summary) # print(summary_with_breaks) # TODO: turn this into a PDF report #Convert car data from json/dictionary to two dimensional array (list of lists) table = cars_dict_to_table(data) #print(table) report.generate("/home/<username>/report.pdf", "Car Sales History", summary_with_breaks, table) # TODO: send the PDF report as an email attachment sender = "*****@*****.**" receiver = "*****@*****.**" subject = "Sales summary for last month" body = summary_for_email message = emails.generate(sender, receiver, subject, body, "/home/<username>/report.pdf") emails.send(message)
def generate_from_spreadsheet(key, token, username, password, request): message = 'ok' #message to be returned to indicate whether the function is executed successfully try: #try to get all the cell containing the data in the first sheet gd_client = gdata.docs.service.DocsService() gd_client.email = username gd_client.password = password gd_client.ssl = True gd_client.source = "My Fancy Spreadsheet Downloader" gd_client.ProgrammaticLogin() uri = 'http://docs.google.com/feeds/documents/private/full/%s' % key entry = gd_client.GetDocumentListEntry(uri) title = entry.title.text spreadsheets_client = gdata.spreadsheet.service.SpreadsheetsService() spreadsheets_client.email = gd_client.email spreadsheets_client.password = gd_client.password spreadsheets_client.source = "My Fancy Spreadsheet Downloader" spreadsheets_client.ProgrammaticLogin() docs_auth_token = gd_client.GetClientLoginToken() gd_client.SetClientLoginToken( spreadsheets_client.GetClientLoginToken()) now = datetime.datetime.now() uploaded_file_name = str(now.year) + str(now.day) + str( now.month) + str(now.hour) + str(now.minute) + str( now.second) + '.xls' gd_client.Export(entry, FILE_UPLOAD_PATH + '/' + uploaded_file_name) gd_client.SetClientLoginToken(docs_auth_token) except: return "Wrong spreadsheet link or you do not have permission to modify the file, please check again!", "", "" #call generate function request.session['is_spreadsheet'] = True message, response = generate(uploaded_file_name, request) request.session['is_spreadsheet'] = None if message != 'ok': return message, "", "" message, output_link = upload_result(uploaded_file_name, title, username, password) return message, output_link, title #return the message
def generate_from_spreadsheet(key, token, username, password, request): message = 'ok' #message to be returned to indicate whether the function is executed successfully try: #try to get all the cell containing the data in the first sheet gd_client = gdata.docs.service.DocsService() gd_client.email = username gd_client.password = password gd_client.ssl = True gd_client.source = "My Fancy Spreadsheet Downloader" gd_client.ProgrammaticLogin() uri = 'http://docs.google.com/feeds/documents/private/full/%s' % key entry = gd_client.GetDocumentListEntry(uri) title = entry.title.text spreadsheets_client = gdata.spreadsheet.service.SpreadsheetsService() spreadsheets_client.email = gd_client.email spreadsheets_client.password = gd_client.password spreadsheets_client.source = "My Fancy Spreadsheet Downloader" spreadsheets_client.ProgrammaticLogin() docs_auth_token = gd_client.GetClientLoginToken() gd_client.SetClientLoginToken(spreadsheets_client.GetClientLoginToken()) now = datetime.datetime.now() uploaded_file_name = str(now.year)+str(now.day)+str(now.month)+str(now.hour)+str(now.minute)+str(now.second) + '.xls' gd_client.Export(entry, FILE_UPLOAD_PATH + '/' + uploaded_file_name) gd_client.SetClientLoginToken(docs_auth_token) except : return "Wrong spreadsheet link or you do not have permission to modify the file, please check again!", "", "" #call generate function request.session['is_spreadsheet'] = True message, response = generate(uploaded_file_name, request) request.session['is_spreadsheet'] = None if message != 'ok': return message, "", "" message, output_link = upload_result(uploaded_file_name, title, username, password) return message, output_link, title #return the message
test = datetime.strptime(endDate, '%Y-%m-%d') invalid = False except: endDate = input( f'Enter report end date (YYYY-MM-DD). *Enter* to use [{ startDate }]' ) database = 'messages.db' if ( os.getenv('persistDatabase') != '') else ':memory:' conn = sqlite3.connect(database) conn.row_factory = sqlite3.Row if (database == ':memory:') or (database != ':memory:' and os.getenv('skipDownload') == 'False'): data.importData(conn, teamsAccessToken, startDate, endDate) criteria = { 'mentioningMe': os.getenv('mentioningMe'), 'mentioningAll': os.getenv('mentioningAll'), 'directMessage': os.getenv('directMessage') } report.generate(conn, teamsAccessToken, startDate, endDate, criteria)
return result for chunk in domain_chunks: log('--- Starting request ---') metrics = None try: metrics = client.urlMetrics(chunk) except MozscapeError as e: log('ERROR! : %s' % (e)) continue results = [] for idx, domain in enumerate(chunk): metric = metrics[idx] result = get_result(metric) log_domain(result) results.append(result) write_chunk(results) results = [] log('--- Sleeping for %s seconds. ---' % (str(settings.REQUEST_INTERVAL))) time.sleep(settings.REQUEST_INTERVAL) log('--- Converting results.ji to csv format. ---') timestamp = datetime.fromtimestamp(time.time()).strftime('%Y-%m-%d-%H-%M-%S') report.generate(filename=timestamp + '.csv') log('--- Completed converting results.ji to csv format. ---')
import os # create folders ./data, ./graphs and ./reports if they do not exist if not os.path.exists('./data'): os.makedirs('data') if not os.path.exists('./graphs'): os.makedirs('graphs') if not os.path.exists("./reports"): os.mkdir("reports") print("Parsing dependency graph...") import depgraph depgraph.build_from_dotfile() print("Calculating measurement values...") import measures measures.calc() print("Creating report...") import report report.generate() print("Done.")
def evaluate(user_func, *args, pdf=False, png = False, timeseries=False, powerLoss=0.8, energyOutput=False, \ locations=["Mongolia", "Iceland", "Switzerland"], year="2016", printToScreen = True): """ Calculates effective emissions of the function Parameters: user_func: user's function + associated args pdf (bool): whether a PDF report should be generated powerLoss (float): PSU efficiency rating energyOutput (bool): return value also includes information about energy usage, not just function's return locations (list of strings): list of locations to be compared year (str): year of dataset to be used printToScreen (bool): get information in the command line """ try: utils.setGlobal(printToScreen) if (utils.valid_cpu() or utils.valid_gpu()): result, return_value, watt_averages, files, total_time, time_baseline, reading_baseline_wattage, time_process, reading_process_wattage = energy(user_func, *args, powerLoss = powerLoss, year = year, \ printToScreen = printToScreen, timeseries = timeseries) location, default_location, comparison_values, default_emissions = get_comparison_data(result, locations, year, printToScreen) breakdown = energy_mix(location, year = year) emission, state_emission = emissions(result, breakdown, location, year, printToScreen) if printToScreen: utils.log("Assumed Carbon Equivalencies") if printToScreen: utils.log("Process Energy", result) func_info = [user_func.__name__, *args] kwh_and_emissions = [result, emission, state_emission] if pdf: #pass report.generate(location, watt_averages, breakdown, kwh_and_emissions, \ func_info, comparison_values, default_emissions, default_location) if png: # generate energy mix pie chart energy_dict = {"Coal" : breakdown[0], "Petroleum" : breakdown[1], "Natural Gas" : breakdown[2], "Low Carbon" : breakdown[3]} figtitle = "Location: " + location location_split = location.split() filename = location_split[0] for i in range(1, len(location_split)): filename += "_" + location_split[i] filename += ".png" if locate.in_US(location): energy_dict["Oil"] = energy_dict.pop("Petroleum") figtitle = figtitle + ", USA" graph.pie_chart(energy_dict, figtitle, filename) # generate emissions comparison bar charts png_bar_chart(location, emission, default_emissions) if timeseries: graph.timeseries(time_baseline, reading_baseline_wattage, "Baseline Wattage Timeseries") graph.timeseries(time_process, reading_process_wattage, "Process Wattage Timeseries") if energyOutput: return (total_time, result, return_value) else: return return_value else: utils.log("The energy-usage package only works on Linux kernels " "with Intel processors that support the RAPL interface and/or machines with" " an Nvidia GPU. Please try again on a different machine.") except Exception as e: print("\n" + str(e))