コード例 #1
0
def main():
    verbose = False
    screenshot = None
    spreadsheet_name = "Webhelper"
    webhelper_rulefile = os.path.dirname(os.path.realpath(__file__)) + os.sep + "webhelper_rules.txt"

    if '-v' in sys.argv:
        sys.argv.remove('-v')
        verbose = True

    if '-s' in sys.argv:
        screenshot = sys.argv[sys.argv.index('-s')+1]
        sys.argv.remove('-s')
        sys.argv.remove(screenshot)

    if '-n' in sys.argv:
        spreadsheet_name = sys.argv[sys.argv.index('-n')+1]
        sys.argv.remove('-n')
        sys.argv.remove(spreadsheet_name)

    if '-r' in sys.argv:
        webhelper_rulefile = sys.argv[sys.argv.index('-r')+1]
        sys.argv.remove('-r')
        sys.argv.remove(webhelper_rulefile)
        if verbose:
            print "using rule file",webhelper_rulefile

    if '-h' in sys.argv or '--help' in sys.argv:
        print "Usage: %s [-h] [--help] [-v] [--export-google] [-s <filename to save .png screenshot to>] [-r <webhelper rule file>] [-n <CSV file prefix or spreadsheet name>]" % (sys.argv[0],)
        sys.exit(1)

    driver = webdriver.PhantomJS(desired_capabilities=dcap)
    #driver = webdriver.Remote(command_executor='http://127.0.0.1:4444/wd/hub', desired_capabilities=dcap)

    driver.set_window_size(1200,600)
    driver.implicitly_wait(10)

    spreadsheet = None
    if '--export-google' in sys.argv:
        sys.argv.remove('--export-google')
        from webhelper_google import GoogleSpreadsheet
        spreadsheet = GoogleSpreadsheet(spreadsheet_name)
    else:
        spreadsheet = CSVSpreadsheet(spreadsheet_name)

    f = open(webhelper_rulefile)
    rule = f.read()
    f.close()
    (rules, subrules) = parse_rules(rule)

    values = {}

    exportrules = []
    for rule in rules:
        for step in rule['steps']:
            if step['command'].find("export") >= 0 and rule['name'] not in exportrules:
                exportrules.append(rule['name'])

    for rule_to_run in rules:
        if verbose:
            print "Starting to run rule "+rule_to_run['name']
        try:
            value = run_rule(driver, rule_to_run, subrules, verbose)
        except WebDriverException as e:
            driver.save_screenshot('/tmp/webhelper.png')
            print "Rule failed with following message.  Saved screenshot to '/tmp/webhelper.png'"
            print e
            sys.exit(1)
        if verbose:
            print "Ran rule "+rule_to_run['name']
        values[rule_to_run['name']] = value
        if verbose:
            print `value`
        if rule_to_run['name'] in exportrules:
            if type(value) != type([]):
                value = []
            if len(value) > 0 and type(value[0]) != type([]):
                value = [value]
            if verbose:
                print "Exporting %s data to %s" % (rule_to_run['name'], spreadsheet,)
            spreadsheet.export_data(value, rule_to_run['name'])
            if verbose:
                print "Finished exporting %s data to %s" %(rule_to_run['name'], spreadsheet,)
            
        if screenshot:
            driver.save_screenshot(screenshot)
    if verbose:
        print "Done running rules"
コード例 #2
0
def main():
    verbose = False
    spreadsheet_name = "Webhelper"
    webhelper_rulefile = os.path.dirname(os.path.realpath(__file__)) + os.sep + "webhelper_rules.txt"

    if '-v' in sys.argv:
        sys.argv.remove('-v')
        verbose = True

    if '-n' in sys.argv:
        spreadsheet_name = sys.argv[sys.argv.index('-n')+1]
        sys.argv.remove('-n')
        sys.argv.remove(spreadsheet_name)

    if '-r' in sys.argv:
        webhelper_rulefile = sys.argv[sys.argv.index('-r')+1]
        sys.argv.remove('-r')
        sys.argv.remove(webhelper_rulefile)

    if '-h' in sys.argv or '--help' in sys.argv:
        print "Usage: %s [-h] [--help] [-v] [--import-google] [--export-google] [-r <webhelper rule file>] [-n <CSV file prefix or spreadsheet name>]" % (sys.argv[0],)
        sys.exit(1)

    import_spreadsheet = None
    if '--import-google' in sys.argv:
        sys.argv.remove('--import-google')
        from webhelper_google import GoogleSpreadsheet
        import_spreadsheet = GoogleSpreadsheet(spreadsheet_name)
    else:
        import_spreadsheet = CSVSpreadsheet(spreadsheet_name)

    export_spreadsheet = None
    if '--export-google' in sys.argv:
        sys.argv.remove('--export-google')
        from webhelper_google import GoogleSpreadsheet
        # If we have already opened this google spreadsheet, just use the same handle
        if import_spreadsheet.__class__ == GoogleSpreadsheet:
            export_spreadsheet = import_spreadsheet
        else:
            export_spreadsheet = GoogleSpreadsheet(spreadsheet_name)
    else:
        export_spreadsheet = CSVSpreadsheet(spreadsheet_name)

    f = open(webhelper_rulefile)
    rule = f.read()
    f.close()
    (rules, subrules) = parse_rules(rule)

    values = {}

    exportrules = []
    for rule in rules:
        for step in rule['steps']:
            if step['command'].find("export") >= 0 and rule['name'] not in exportrules:
                exportrules.append(rule['name'])

    for rule in exportrules:
        if verbose:
            print "Importing %s data from %s" % (rule, import_spreadsheet,)
        rows = import_spreadsheet.import_data(rule)
        if verbose:
            print "Finished importing %s data from %s" %(rule, import_spreadsheet,)

        financial_data_rows = []
        if len(rows) > 0:
            # Let's look for a header
            max_populated_cells = 0
            header_index = None
            for ind, row in enumerate(rows):
                # Find whether all cells in this row are alpha numeric.  If any are numeric, this is probably not a header
                all_alphanum = True
                for cell in row:
                    if cell.replace(',','').replace('$', '').replace('.', '').replace(' ', '').isdigit():
                        all_alphanum = False
                        break
                if not all_alphanum:
                    continue
                if len(filter(lambda val:val, row)) > max_populated_cells:
                    max_populated_cells = len(filter(lambda val:val, row))
                    header_index = ind

            # If we didn't even find a header with at least 3 columns, that doesn't count as a header
            if max_populated_cells < 3:
                header_index = None

            if header_index != None and verbose:
                print "Found header at row %d" % (header_index,)

            found_date = None
            found_note = None
            found_amount = None
            found_debit = None
            found_credit = None

            # Look for key information, date of transaction, note and dollar amount
            if header_index != None:
                for ind, cell in enumerate(rows[header_index]):
                    if found_date == None and cell.lower().find("date") >= 0:
                        found_date = ind
                    elif found_note == None and cell.lower().find("description") >= 0 or cell.lower().find("note") >= 0 or cell.lower().find("memo") >= 0:
                        found_note = ind
                    elif found_amount == None and cell.lower().find("amount") >= 0:
                        found_amount = ind
                    elif found_debit == None and cell.lower().find("debit") >= 0:
                        found_debit = ind                    
                    elif found_credit == None and cell.lower().find("credit") >= 0:
                        found_credit = ind                    
            else:
                for ind, cell in enumerate(rows[0]):
                    if found_amount == None and cell.replace(',','').replace('$', '').replace('.', '').replace(' ', '').replace('-', '').isdigit():
                        found_amount = ind
                    elif found_date == None and cell.replace('/', '').replace(' ', '').isdigit():
                        found_date = ind
                    elif found_note == None and not cell.replace(',','').replace('$', '').replace('.', '').replace(' ', '').isdigit():
                        found_note = ind
                header_index = -1

            financial_data_rows.append([])
            if found_date != None:
                financial_data_rows[-1].append("Date")
            if found_amount != None or (found_debit != None and found_credit != None):
                financial_data_rows[-1].append("Amount")
            if found_note != None:
                financial_data_rows[-1].append("Note")

            if verbose:
                print "Built header: "+str(financial_data_rows[-1])

            for row in rows[header_index+1:]:
                new_row = []
                if found_date != None:
                    if len(row) > found_date:
                        new_row.append(standardize_date(row[found_date]))
                    else:
                        new_row.append("")
                if found_amount != None or (found_debit != None and found_credit != None):
                    amount = None
                    if found_amount != None and len(row) > found_amount:
                        amount = row[found_amount]
                    elif found_debit != None and len(row) > found_debit and standardize_amount(row[found_debit]):
                        amount = "-" + row[found_debit]
                    elif found_credit != None and len(row) > found_credit and standardize_amount(row[found_credit]):
                        amount = row[found_credit]

                    if amount:
                        try:
                            new_row.append("%0.2f" % (round(float(standardize_amount(amount)), 2)))
                        except:
                            new_row.append("")
                    else:
                        new_row.append("")

                # If date and amount are blank, maybe this is another row of notes
                if found_note != None and found_date != None and not new_row[0] and (found_amount != None or (found_debit != None and found_credit != None)) and not new_row[-1]:
                    if found_note != None and len(row) > found_note and row[found_note]:
                        financial_data_rows[-1][-1] += " " + standardize_note(row[found_note])
                    # Whether there were additional notes or not, let's skip any rows without proper information
                    continue

                if found_note != None:
                    if len(row) > found_note:
                        new_row.append(standardize_note(row[found_note]))
                    else:
                        new_row.append("")

                # Finally, add the new row
                financial_data_rows.append(new_row)

        if verbose:
            print "Exporting %s__financial data to %s" % (rule, export_spreadsheet,)
        export_spreadsheet.export_data(financial_data_rows, "%s__financial" % (rule,))
        if verbose:
            print "Finished exporting %s__financial data to %s" %(rule, export_spreadsheet,)
コード例 #3
0
ファイル: webhelper_export.py プロジェクト: JPry/webhelper
def main():
    verbose = False
    screenshot = None
    spreadsheet_name = "Webhelper"
    webhelper_rulefile = os.path.dirname(os.path.realpath(__file__)) + os.sep + "webhelper_rules.txt"

    if "-v" in sys.argv:
        sys.argv.remove("-v")
        verbose = True

    if "-s" in sys.argv:
        screenshot = sys.argv[sys.argv.index("-s") + 1]
        sys.argv.remove("-s")
        sys.argv.remove(screenshot)

    if "-n" in sys.argv:
        spreadsheet_name = sys.argv[sys.argv.index("-n") + 1]
        sys.argv.remove("-n")
        sys.argv.remove(spreadsheet_name)

    if "-r" in sys.argv:
        webhelper_rulefile = sys.argv[sys.argv.index("-r") + 1]
        sys.argv.remove("-r")
        sys.argv.remove(webhelper_rulefile)

    if "-h" in sys.argv or "--help" in sys.argv:
        print "Usage: %s [-h] [--help] [-v] [--export-google] [-s <filename to save .png screenshot to>] [-r <webhelper rule file>] [-n <CSV file prefix or spreadsheet name>]" % (
            sys.argv[0],
        )
        sys.exit(1)

    driver = webdriver.PhantomJS(desired_capabilities=dcap)
    # driver = webdriver.Remote(command_executor='http://127.0.0.1:4444/wd/hub', desired_capabilities=dcap)

    driver.set_window_size(1200, 600)
    driver.implicitly_wait(10)

    spreadsheet = None
    if "--export-google" in sys.argv:
        sys.argv.remove("--export-google")
        from webhelper_google import GoogleSpreadsheet

        spreadsheet = GoogleSpreadsheet(spreadsheet_name)
    else:
        spreadsheet = CSVSpreadsheet(spreadsheet_name)

    f = open(webhelper_rulefile)
    rule = f.read()
    f.close()
    rules = parse_rules(rule)

    values = {}

    exportrules = []
    for rule in rules:
        for step in rule["steps"]:
            if step["command"].find("export") >= 0 and rule["name"] not in exportrules:
                exportrules.append(rule["name"])

    for rule_to_run in rules:
        if verbose:
            print "Starting to run rule " + rule_to_run["name"]
        value = run_rule(driver, rule_to_run, verbose)
        if verbose:
            print "Ran rule " + rule_to_run["name"]
        values[rule_to_run["name"]] = value
        if verbose:
            print ` value `
        if rule_to_run["name"] in exportrules:
            if type(value) != type([]):
                value = []
            if len(value) > 0 and type(value[0]) != type([]):
                value = [value]
            if verbose:
                print "Exporting %s data to %s" % (rule_to_run["name"], spreadsheet)
            spreadsheet.export_data(value, rule_to_run["name"])
            if verbose:
                print "Finished exporting %s data to %s" % (rule_to_run["name"], spreadsheet)

        if screenshot:
            driver.save_screenshot(screenshot)
    if verbose:
        print "Done running rules"
コード例 #4
0
def main():
    verbose = False
    spreadsheet_name = "Webhelper"
    webhelper_rulefile = os.path.dirname(os.path.realpath(__file__)) + os.sep + "webhelper_rules.txt"

    if '-v' in sys.argv:
        sys.argv.remove('-v')
        verbose = True

    if '-n' in sys.argv:
        spreadsheet_name = sys.argv[sys.argv.index('-n') + 1]
        sys.argv.remove('-n')
        sys.argv.remove(spreadsheet_name)

    if '-r' in sys.argv:
        webhelper_rulefile = sys.argv[sys.argv.index('-r') + 1]
        sys.argv.remove('-r')
        sys.argv.remove(webhelper_rulefile)

    if '-h' in sys.argv or '--help' in sys.argv:
        print "Usage: %s [-h] [--help] [-v] [--import-google] [--export-google] [-r <webhelper rule file>] [-n <CSV file prefix or spreadsheet name>]" % (
        sys.argv[0],)
        sys.exit(1)

    import_spreadsheet = None
    if '--import-google' in sys.argv:
        sys.argv.remove('--import-google')
        from webhelper_google import GoogleSpreadsheet
        import_spreadsheet = GoogleSpreadsheet(spreadsheet_name)
    else:
        import_spreadsheet = CSVSpreadsheet(spreadsheet_name)

    export_spreadsheet = None
    if '--export-google' in sys.argv:
        sys.argv.remove('--export-google')
        from webhelper_google import GoogleSpreadsheet
        # If we have already opened this google spreadsheet, just use the same handle
        if import_spreadsheet.__class__ == GoogleSpreadsheet:
            export_spreadsheet = import_spreadsheet
        else:
            export_spreadsheet = GoogleSpreadsheet(spreadsheet_name)
    else:
        export_spreadsheet = CSVSpreadsheet(spreadsheet_name)

    f = open(webhelper_rulefile)
    rule = f.read()
    f.close()
    rules = parse_rules(rule)

    values = {}

    exportrules = []
    for rule in rules:
        for step in rule['steps']:
            if step['command'].find("export") >= 0 and rule['name'] not in exportrules:
                exportrules.append(rule['name'])

    for rule in exportrules:
        if verbose:
            print "Importing %s__financial data from %s" % (rule, import_spreadsheet,)
        rows = import_spreadsheet.import_data("%s__financial" % (rule,))
        if verbose:
            print "Finished importing %s__financial data from %s" % (rule, import_spreadsheet,)

        if verbose:
            print "Importing %s__financial_recent data from %s" % (rule, import_spreadsheet,)
        recentrows = import_spreadsheet.import_data("%s__financial_recent" % (rule,))
        if verbose:
            print "Finished importing %s__financial_recent data from %s" % (rule, import_spreadsheet,)

        if verbose:
            print "Reading previous %s__financial_diff data from %s" % (rule, export_spreadsheet,)
        data_rows_diff = export_spreadsheet.import_data("%s__financial_diff" % (rule,))
        if verbose:
            print "Finished reading previous %s__financial_diff data from %s" % (rule, export_spreadsheet,)

        lresolved = {}
        rresolved = {}
        if len(rows) > 1 and len(recentrows) > 1 and rows[0] != recentrows[0]:
            print "ERROR: The headers on the data from %s__financial_recent do not match the headers from %s__financial" % (
            rule, rule)
            continue
        # Check for equals
        for ind, row in enumerate(recentrows[1:]):
            for ind2, row2 in enumerate(rows[1:]):
                if row == row2:
                    lresolved[ind] = ind2
                    rresolved[ind2] = ind

        if len(lresolved.keys()) < len(recentrows) and len(rows) > 1 and len(recentrows) > 1 and "Note" in rows[
            0] and "Date" in rows[0] and "Amount" in rows[0]:
            # Make header for special Date/Amount/Note case
            lnote = recentrows[0].index("Note")
            rnote = rows[0].index("Note")
            ldate = recentrows[0].index("Date")
            rdate = rows[0].index("Date")
            lamount = recentrows[0].index("Amount")
            ramount = rows[0].index("Amount")
            header = ["Change Type", "Date", "Amount", "Note", "New Date", "New Note"]
            if len(data_rows_diff) > 0:
                if data_rows_diff[0] != header:
                    print "ERROR: The nature of the data seems to have changed such that we now have Date/Amount/Note but we formerly didn't.  Skipping %s__financial." % (
                    rule,)
                    continue
            else:
                data_rows_diff.append(header)

            # Check for note-only changes
            for ind, row in enumerate(recentrows[1:]):
                if lresolved.has_key(ind):
                    continue
                for ind2, row2 in enumerate(rows[1:]):
                    if rresolved.has_key(ind2):
                        continue
                    if row[ldate] == row2[rdate] and row[lamount] == row2[ramount]:
                        data_rows_diff.append(["Modify", row[ldate], row[lamount], row[lnote], "", row2[rnote]])
                        lresolved[ind] = ind2
                        rresolved[ind2] = ind

            if len(lresolved.keys()) < len(recentrows):
                # Check for date-only changes by a certain threshold
                for ind, row in enumerate(recentrows[1:]):
                    if lresolved.has_key(ind):
                        continue
                    for ind2, row2 in enumerate(rows[1:]):
                        if rresolved.has_key(ind2):
                            continue
                        if abs(date_to_sec(row[ldate]) - date_to_sec(row2[rdate])) < DATE_THRESHOLD and row[lamount] == \
                                row2[ramount] and row[lnote] == row2[rnote]:
                            data_rows_diff.append(["Modify", row[ldate], row[lamount], row[lnote], row2[rdate], ""])
                            lresolved[ind] = ind2
                            rresolved[ind2] = ind

            if len(lresolved.keys()) < len(recentrows):
                # Check for note and date changes by a certain threshold
                for ind, row in enumerate(recentrows[1:]):
                    if lresolved.has_key(ind):
                        continue
                    for ind2, row2 in enumerate(rows[1:]):
                        if rresolved.has_key(ind2):
                            continue
                        if abs(date_to_sec(row[ldate]) - date_to_sec(row2[rdate])) < DATE_THRESHOLD and row[lamount] == \
                                row2[ramount]:
                            data_rows_diff.append(
                                ["Modify", row[ldate], row[lamount], row[lnote], row2[rdate], row2[rnote]])
                            lresolved[ind] = ind2
                            rresolved[ind2] = ind

        elif len(lresolved.keys()) < len(recentrows) or len(rresolved.keys()) < len(rows):
            # Make diff header for normal case
            header = []
            if len(rows) > 1:
                header = ["Change Type"] + rows[0]
            elif len(recentrows) > 1:
                header = ["Change Type"] + recentrows[0]
            if "Date" in header and "Amount" in header and "Note" in header:
                header.extend(["New Date", "New Note"])

            if len(data_rows_diff) > 0:
                if data_rows_diff[0] != header:
                    print "ERROR: The nature of the data seems to have changed such that we now have different columns than we used to.  Skipping %s__financial." % (
                    rule,)
                    continue
            else:
                data_rows_diff.append(header)

        # Check for deleted rows
        for ind, row in enumerate(recentrows[1:]):
            if not lresolved.has_key(ind):
                data_rows_diff.append(["Delete"] + row)
                # Add blank spots to end of row if we are in the special Date/Amount/Note case
                if len(data_rows_diff[-1]) < len(data_rows_diff[0]):
                    data_rows_diff[-1].extend([""] * (len(data_rows_diff[0]) - len(data_rows_diff[-1])))

        # Check for new rows
        for ind, row in enumerate(rows[1:]):
            if not rresolved.has_key(ind):
                data_rows_diff.append(["Add"] + row)
                # Add blank spots to end of row if we are in the special Date/Amount/Note case
                if len(data_rows_diff[-1]) < len(data_rows_diff[0]):
                    data_rows_diff[-1].extend([""] * (len(data_rows_diff[0]) - len(data_rows_diff[-1])))

        if verbose:
            print "Exporting %s__financial_diff data to %s" % (rule, export_spreadsheet,)
        export_spreadsheet.export_data(data_rows_diff, "%s__financial_diff" % (rule,))
        if verbose:
            print "Finished exporting %s__financial_diff data to %s" % (rule, export_spreadsheet,)

        if verbose:
            print "Saving %s__financial_recent data to %s" % (rule, import_spreadsheet,)
        import_spreadsheet.export_data(rows, "%s__financial_recent" % (rule,))
        if verbose:
            print "Finished saving %s__financial_recent data to %s" % (rule, import_spreadsheet,)
コード例 #5
0
def main():
    verbose = False
    spreadsheet_name = "Webhelper"
    webhelper_rulefile = os.path.dirname(os.path.realpath(__file__)) + os.sep + "webhelper_rules.txt"

    if '-v' in sys.argv:
        sys.argv.remove('-v')
        verbose = True

    if '-n' in sys.argv:
        spreadsheet_name = sys.argv[sys.argv.index('-n')+1]
        sys.argv.remove('-n')
        sys.argv.remove(spreadsheet_name)

    if '-r' in sys.argv:
        webhelper_rulefile = sys.argv[sys.argv.index('-r')+1]
        sys.argv.remove('-r')
        sys.argv.remove(webhelper_rulefile)

    if '-h' in sys.argv or '--help' in sys.argv:
        print "Usage: %s [-h] [--help] [-v] [--import-google] [--export-google] [-r <webhelper rule file>] [-n <CSV file prefix or spreadsheet name>]" % (sys.argv[0],)
        sys.exit(1)

    import_spreadsheet = None
    if '--import-google' in sys.argv:
        sys.argv.remove('--import-google')
        from webhelper_google import GoogleSpreadsheet
        import_spreadsheet = GoogleSpreadsheet(spreadsheet_name)
    else:
        import_spreadsheet = CSVSpreadsheet(spreadsheet_name)

    export_spreadsheet = None
    if '--export-google' in sys.argv:
        sys.argv.remove('--export-google')
        from webhelper_google import GoogleSpreadsheet
        # If we have already opened this google spreadsheet, just use the same handle
        if import_spreadsheet.__class__ == GoogleSpreadsheet:
            export_spreadsheet = import_spreadsheet
        else:
            export_spreadsheet = GoogleSpreadsheet(spreadsheet_name)
    else:
        export_spreadsheet = CSVSpreadsheet(spreadsheet_name)

    f = open(webhelper_rulefile)
    rule = f.read()
    f.close()
    (rules, subrules) = parse_rules(rule)

    values = {}

    exportrules = []
    for rule in rules:
        for step in rule['steps']:
            if step['command'].find("export") >= 0 and rule['name'] not in exportrules:
                exportrules.append(rule['name'])

    for rule in exportrules:
        if verbose:
            print "Importing %s__financial data from %s" % (rule, import_spreadsheet,)
        rows = import_spreadsheet.import_data("%s__financial" % (rule,))
        if verbose:
            print "Finished importing %s__financial data from %s" %(rule, import_spreadsheet,)

        if verbose:
            print "Importing %s__financial_recent data from %s" % (rule, import_spreadsheet,)
        recentrows = import_spreadsheet.import_data("%s__financial_recent" % (rule,))
        if verbose:
            print "Finished importing %s__financial_recent data from %s" %(rule, import_spreadsheet,)

        if verbose:
            print "Reading previous first and last rows of %s__financial_diff data from %s" % (rule, export_spreadsheet,)
        data_rows_diff = export_spreadsheet.import_first_and_last_data("%s__financial_diff" % (rule,))
        data_rows_append = []
        if verbose:
            print "Finished reading first and last rows of %s__financial_diff data from %s" %(rule, export_spreadsheet,)

        lresolved = {}
        rresolved = {}
        if len(rows) > 1 and len(recentrows) > 1 and rows[0] != recentrows[0]:
            print "ERROR: The headers on the data from %s__financial_recent do not match the headers from %s__financial" % (rule, rule)
            continue

        # Copy the original before we transform it
        origrows = copy.deepcopy(rows)
        # Transform duplicate rows to uniquely identify them
        if len(rows) > 0 and "Note" in rows[0]:
            dups = {}
            keyfields = [rows[0].index("Note")]
            if "Date" in rows[0]:
                keyfields.append(rows[0].index("Date"))
            if "Amount" in rows[0]:
                keyfields.append(rows[0].index("Amount"))
            for ind, row in enumerate(rows[1:]):
                key = []
                for keyind in keyfields:
                    key.append(row[keyind])
                key = tuple(key)
                if dups.has_key(key):
                    dups[key] += 1
                    # Keep incrementing the number if we somehow collide with strings that were used by our data source
                    while dups.has_key((rows[ind+1][keyfields[0]] + " - " + str(dups[key]),)+key[1:]):
                        dups[key] += 1
                    # Add string to the note to make it unique!
                    rows[ind+1][keyfields[0]] += " - " + str(dups[key])
                    # Now mark this note as used also
                    dups[(rows[ind+1][keyfields[0]],)+key[1:]] = 1
                else:
                    dups[key] = 1

        # Transform duplicate recent rows to uniquely identify them
        if len(recentrows) > 0 and "Note" in recentrows[0]:
            dups = {}
            keyfields = [recentrows[0].index("Note")]
            if "Date" in recentrows[0]:
                keyfields.append(recentrows[0].index("Date"))
            if "Amount" in recentrows[0]:
                keyfields.append(recentrows[0].index("Amount"))
            for ind, row in enumerate(recentrows[1:]):
                key = []
                for keyind in keyfields:
                    key.append(row[keyind])
                key = tuple(key)
                if dups.has_key(key):
                    dups[key] += 1
                    # Keep incrementing the number if we somehow collide with strings that were used by our data source
                    while dups.has_key((recentrows[ind+1][keyfields[0]] + " - " + str(dups[key]),)+key[1:]):
                        dups[key] += 1
                    # Add string to the note to make it unique!
                    recentrows[ind+1][keyfields[0]] += " - " + str(dups[key])
                    # Now mark this note as used also
                    dups[(recentrows[ind+1][keyfields[0]],)+key[1:]] = 1
                else:
                    dups[key] = 1

        # Check for equals
        for ind, row in enumerate(recentrows[1:]):
            for ind2, row2 in enumerate(rows[1:]):
                if row == row2:
                    lresolved[ind] = ind2
                    rresolved[ind2] = ind

        current_datestamp = datetime.datetime.now().strftime('%m-%d-%Y %H:%M:%S')
        current_change_number = 1

        if len(rows) > 0 and len(recentrows) > 0 and "Note" in rows[0] and "Date" in rows[0] and "Amount" in rows[0]:
            # Make header for special Date/Amount/Note case
            lnote = recentrows[0].index("Note")
            rnote = rows[0].index("Note")
            ldate = recentrows[0].index("Date")
            rdate = rows[0].index("Date")
            lamount = recentrows[0].index("Amount")
            ramount = rows[0].index("Amount")
            header = ["Change Number", "Change Datestamp", "Change Type", "Date", "Amount", "Note", "New Date", "New Note"]
            if len(data_rows_diff) > 0:
                if data_rows_diff[0] != header:
                    print "ERROR: The nature of the data seems to have changed such that we now have Date/Amount/Note but we formerly didn't.  Skipping %s__financial." % (rule,)
                    continue
                if len(data_rows_diff) > 1:
                    current_change_number = int(data_rows_diff[-1][0]) + 1
            else:
                data_rows_append.append(header)

            # Check for note-only changes
            for ind, row in enumerate(recentrows[1:]):
                if lresolved.has_key(ind):
                    continue
                for ind2, row2 in enumerate(rows[1:]):
                    if rresolved.has_key(ind2):
                        continue
                    if row[ldate] == row2[rdate] and row[lamount] == row2[ramount]:
                        data_rows_append.append([str(current_change_number), current_datestamp, "Modify", row[ldate], row[lamount], row[lnote], "", row2[rnote]])
                        current_change_number += 1
                        lresolved[ind] = ind2
                        rresolved[ind2] = ind

            if len(lresolved.keys()) < len(recentrows):
                # Check for date-only changes by a certain threshold
                for ind, row in enumerate(recentrows[1:]):
                    if lresolved.has_key(ind):
                        continue
                    for ind2, row2 in enumerate(rows[1:]):
                        if rresolved.has_key(ind2):
                            continue
                        if abs(date_to_sec(row[ldate]) - date_to_sec(row2[rdate])) < DATE_THRESHOLD and row[lamount] == row2[ramount] and row[lnote] == row2[rnote]:
                            data_rows_append.append([str(current_change_number), current_datestamp, "Modify", row[ldate], row[lamount], row[lnote], row2[rdate], ""])
                            current_change_number += 1
                            lresolved[ind] = ind2
                            rresolved[ind2] = ind

            if len(lresolved.keys()) < len(recentrows):
                # Check for note and date changes by a certain threshold
                for ind, row in enumerate(recentrows[1:]):
                    if lresolved.has_key(ind):
                        continue
                    for ind2, row2 in enumerate(rows[1:]):
                        if rresolved.has_key(ind2):
                            continue
                        if abs(date_to_sec(row[ldate]) - date_to_sec(row2[rdate])) < DATE_THRESHOLD and row[lamount] == row2[ramount]:
                            data_rows_append.append([str(current_change_number), current_datestamp, "Modify", row[ldate], row[lamount], row[lnote], row2[rdate], row2[rnote]])
                            current_change_number += 1
                            lresolved[ind] = ind2
                            rresolved[ind2] = ind

        elif len(lresolved.keys()) < len(recentrows) or len(rresolved.keys()) < len(rows):
            # Make diff header for normal case
            header = []
            if len(rows) > 1:
                header = ["Change Number", "Change Datestamp", "Change Type"]+rows[0]
            elif len(recentrows) > 1:
                header = ["Change Number", "Change Datestamp", "Change Type"]+recentrows[0]
            if "Date" in header and "Amount" in header and "Note" in header:
                header.extend(["New Date", "New Note"])

            if len(data_rows_diff) > 0:
                if data_rows_diff[0] != header:
                    print "ERROR: The nature of the data seems to have changed such that we now have different columns than we used to.  Skipping %s__financial." % (rule,)
                    continue
                if len(data_rows_diff) > 1:
                    current_change_number = int(data_rows_diff[-1][0]) + 1
            else:
                data_rows_append.append(header)

        # Check for deleted rows
        for ind, row in enumerate(recentrows[1:]):
            if not lresolved.has_key(ind):
                data_rows_append.append([str(current_change_number), current_datestamp, "Delete"] + row)
                # Add blank spots to end of row if we are in the special Date/Amount/Note case
                if len(data_rows_append[-1]) < len(data_rows_append[0]):
                    data_rows_append[-1].extend([""] * (len(data_rows_append[0])-len(data_rows_append[-1])))
                current_change_number += 1

        # Check for new rows
        for ind, row in enumerate(rows[1:]):
            if not rresolved.has_key(ind):
                data_rows_append.append([str(current_change_number), current_datestamp, "Add"] + row)
                # Add blank spots to end of row if we are in the special Date/Amount/Note case
                if len(data_rows_append[-1]) < len(data_rows_append[0]):
                    data_rows_append[-1].extend([""] * (len(data_rows_append[0])-len(data_rows_append[-1])))
                current_change_number += 1

        if verbose:
            print "Exporting %s__financial_diff data to %s" % (rule, export_spreadsheet,)
        export_spreadsheet.export_append_data(data_rows_append, "%s__financial_diff" % (rule,))
        if verbose:
            print "Finished exporting %s__financial_diff data to %s" %(rule, export_spreadsheet,)

        if verbose:
            print "Saving %s__financial_recent data to %s" % (rule, import_spreadsheet,)
        import_spreadsheet.export_data(origrows, "%s__financial_recent" % (rule,))
        if verbose:
            print "Finished saving %s__financial_recent data to %s" %(rule, import_spreadsheet,)