def test_file_removed_and_added_as_dir(self): root,ref = self.root,self.ref ref['file1'].delete() ref.new_directory('file1') root.update(ref) eq_(3,root.dircount) eq_(0,root.filecount)
def test_qif_import_tries_native_dateformat_first(): # When guessing date format in a QIF file, try the *native* date format first, that is, # mm/dd/yy. app = TestApp(app=Application(ApplicationGUI(), date_format='dd/MM/yy')) app.doc.parse_file_for_import(testdata.filepath('qif/ambiguous_date.qif')) # We parsed "01/02/03" with mm/dd/yy eq_(app.itable[0].date_import, '02/01/03')
def test_bound_amount_correctly_imported(app): # The bound transaction correctly has its amount imported. Previously, we would set the split's # amount but we would never balance the txn. Ref #351. app.show_account('Account 1') eq_(app.etable[0].credit, 'CAD 42.00') app.show_account('Account 4') eq_(app.etable[0].debit, 'CAD 42.00')
def test_escaping(self): root = Root(threaded=False) try: d1 = root.new_directory('foo\'bar') eq_('foo\'bar',d1.name) except sqlite.OperationalError: self.fail()
def check(str_date, expected_date): # To test the date format guessing part, we create a QIF, which uses date guessing. app = TestApp() contents = "!Type:Bank\nD{str_date}\nT42.32\n^".format(str_date=str_date) hsio.open(filepath, 'wt', encoding='utf-8').write(contents) app.doc.parse_file_for_import(filepath) eq_(app.itable[0].date_import, expected_date)
def test_add_referenceless_entries_to_reference_account(self, app): # It's possible to add more than one referenceless entries to a referenced account # Previously, TransactionList considered 2 transactions with a None reference as conflictual # We start with one entry app.add_entry() app.add_entry() eq_(app.etable_count(), 3)
def test_entry_count(self, app): # The number of entries is the same as if the import was made once # Previously, the transaction reference would be lost in a transaction conflict resolution app.show_nwview() app.bsheet.selected = app.bsheet.assets[0] app.show_account() eq_(app.etable_count(), 3)
def test_rescan_with_utf8_encoding(self, app): # Selecting the utf-8 encoding and clicking rescan re-opens the file with the correct # encoding. app.csvopt.encoding_index = 1 app.csvopt.rescan() eq_(app.csvopt.lines[1][1], 'fôø') eq_(app.csvopt.lines[2][1], 'bàr')
def test_asset_names_after_qif_import(app): # All accounts are added despite name collisions. Name collision for 'Account 1' is # resolved by appending ' 1', and that collision thereafter is resolved by appending ' 2' # instead. expected = ['Account 1', 'Account 1 1', 'Account 1 2', 'Account 2', 'Interest', 'Salary', 'Cash', 'Utilities'] actual = app.account_names() eq_(actual, expected)
def test_same_date_same_amount(): # There was a bug in QIF loading where two transactions with the same date and the same amount, # regardless of whether they were transfers, would be detected as "duplicates" and de-duplicated. loader = Loader(USD) loader.parse(testdata.filepath('qif', 'same_date_same_amount.qif')) loader.load() eq_(len(loader.transactions), 3)
def test_select_fortis_layout_then_reload(self, app): # simply accessing a layout while having a csv with few columns loaded should not remove # columns from that layout app.csvopt.select_layout('fortis') app.doc.parse_file_for_import(testdata.filepath('csv/fortis.csv')) app.csvopt.select_layout('fortis') eq_(app.csvopt.columns[6], CsvField.Transfer)
def test_with_cat(): # some file have a "!Type:Cat" section with buggy "D" lines loader = Loader(USD) loader.parse(testdata.filepath('qif', 'with_cat.qif')) loader.load() eq_(len(loader.account_infos), 1) eq_(len(loader.transaction_infos), 1)
def test_transfer_space_in_account_names(): # When "L" lines have a space character at the end (bfore the "]" bracket) it doesn't prevent # us from correctly matching the account with seen account names. loader = Loader(USD) loader.parse(testdata.filepath('qif', 'transfer_space_in_account_names.qif')) loader.load() eq_(len(loader.transactions), 1) # the transactions hasn't been doubled.
def test_autoswitch(): # autoswitch.qif has an autoswitch section with accounts containing "D" lines loader = Loader(USD) loader.parse(testdata.filepath('qif', 'autoswitch.qif')) loader.load() eq_(len(loader.account_infos), 50) eq_(len(loader.transaction_infos), 37)
def test_autoswitch_buggy(): # sp,eQIF exporter put another !Option:AutoSwitch after having cleared it loader = Loader(USD) loader.parse(testdata.filepath('qif', 'autoswitch_buggy.qif')) loader.load() eq_(len(loader.account_infos), 50) eq_(len(loader.transaction_infos), 37)
def test_should_all_go_in_the_same_group(self): l = [NamedObject("a b"),NamedObject("a b"),NamedObject("a b"),NamedObject("a b")] #There will be 2 groups here: group "a b" and group "c d" #"b c" can fit in both, but it must be in only one of them matches = getmatches(l) r = get_groups(matches) eq_(1,len(r))
def test_first_field_not_account(): # Previously, when the first field was not an account, a dummy "Account" field was added loader = Loader(USD) loader.parse(testdata.filepath('qif', 'first_field_not_account.qif')) loader.load() accounts = loader.account_infos eq_(len(accounts), 1)
def test_with_fields(self): o1 = NamedObject("foo bar - foo bleh") o2 = NamedObject("foo bar - bleh bar") o1.words = getfields(o1.name) o2.words = getfields(o2.name) m = getmatches([o1, o2])[0] eq_(50, m.percentage)
def test_with_fields_no_order(self): o1 = NamedObject("foo bar - foo bleh") o2 = NamedObject("bleh bang - foo bar") o1.words = getfields(o1.name) o2.words = getfields(o2.name) m = getmatches([o1, o2], no_field_order=True)[0] eq_(m.percentage, 50)
def test_previous_completion_twice(app): # previous_completion() twice returns the second previous completion, skipping duplicates. ce = app.completable_edit('description') ce.text = 'd' ce.down() ce.down() eq_(ce.completion, 'escription')
def test_null_and_unrelated_objects(self): l = [NamedObject("foo bar"),NamedObject("bar bleh"),NamedObject(""),NamedObject("unrelated object")] r = getmatches(l) eq_(len(r), 1) m = r[0] eq_(m.percentage, 50) assert_match(m, 'foo bar', 'bar bleh')
def test_next_completion_twice(app): # next_completion() twice returns the second next completion, skipping duplicates. ce = app.completable_edit('description') ce.text = 'd' ce.up() ce.up() eq_(ce.completion, 'esc1')
def test_autofill_convert_amount_field(app): # autofill_columns can be given 'increase' and 'decrease'. It will all be converted into # 'amount'. app.etable.add() row = app.etable.selected_row row.description = 'Deposit' eq_(app.etable[1].increase, '42.00')
def test_save_and_load_different_reconciliation_date(app): # reconciliation date is correctly saved and loaded newapp = app.save_and_load() newapp.show_nwview() newapp.bsheet.selected = newapp.bsheet.assets[0] newapp.show_account() eq_(newapp.etable[0].reconciliation_date, '12/07/2008')
def test_autofill_column_selection_for_description(app): # Hidden columns are not autofilled app.set_column_visible('payee', False) app.etable.add() row = app.etable.selected_row row.description = 'Deposit' eq_(app.etable[1].payee, '')
def test_persistance(self, tmpdir): DBNAME = tmpdir.join('hstest.db') c = Cache(str(DBNAME)) c['foo'] = [(1, 2, 3)] del c c = Cache(str(DBNAME)) eq_([(1, 2, 3)], c['foo'])
def test_reconciliation_balance(app): # Unreconcilied entries return a None balance, and reconciled entries return a # reconciliation balance eq_(app.etable[0].balance, '') row = app.etable.selected_row row.toggle_reconciled() eq_(app.etable[0].balance, '-42.00')
def test_amount_completion(app): # Upon setting description, set the amount to the amount of the first matching entry with # the same description. app.etable.add() row = app.etable.selected_row row.description = 'first' eq_(app.etable[app.etable.selected_indexes[0]].increase, '102.00')
def test_by_id(self): # it's possible to use the cache by referring to the files by their row_id c = Cache() b = [(0, 0, 0), (1, 2, 3)] c['foo'] = b foo_id = c.get_id('foo') eq_(c[foo_id], b)
def test_previous_completion_after_description(app): # previous_completion() after a complete_description() returns the previous matching # description. ce = app.completable_edit('description') ce.text = 'd' ce.down() eq_(ce.completion, 'esc1')
def test_account_names(self, app): # Non-empty accounts from both files are imported expected = ['815-30219-11111-EOP', '815-30219-12345-EOP', 'Cash'] eq_(app.account_names(), expected)
def test_second_side_matches(app): # When importing entries from Account 3, these entries are matched correctly app.mw.parse_file_for_import(testdata.filepath('moneyguru', 'with_references3.moneyguru')) # target account should be correct, and all entries should be matched eq_(app.iwin.selected_target_account_index, 3) # Account 4 eq_(len(app.itable), 1) # 1 entry means they all match
def test_first_side_matches(app): # When importing entries from Account 1, these entries are matched correctly app.mw.parse_file_for_import(testdata.filepath('moneyguru', 'with_references1.moneyguru')) # All entries should be matched eq_(len(app.itable), 2) # 2 entries means they all match
def test_editing_an_entry_doesnt_change_the_order(self, app): # Editing the first entry doesn't change its position row = app.etable.selected_row row.increase = '42' app.etable.save_edits() eq_(app.etable[0].description, 'first')
def test_account_was_imported(self, app): # The fact that the account was in a group didn't prevent it from being imported. eq_(app.bsheet.assets[0].name, 'Some Asset')
def test_account_names(self, app): # All non-empty accounts have been imported. eq_(app.account_names(), ['815-30219-11111-EOP', 'NEW_ACCOUNT'])
def test_account_names(self, app): # Non-empty accounts from both files are imported eq_(app.account_names(), ['815-30219-12345-EOP', 'Desjardins EOP'])
def test_stat_line(self): expected = '0 / 2 (0.00 B / 2.00 B) duplicates marked.' eq_(expected, self.results.stat_line)
def test_groups(self): eq_(2, len(self.results.groups))
def test_account_names(self, app): # Checks that the import was done # # This test only checks the account names. More precise tests are in # ofx_test.py eq_(app.account_names(), ['815-30219-11111-EOP', '815-30219-12345-EOP'])
def test_stat_line(self): eq_("0 / 3 (0.00 B / 1.01 KB) duplicates marked.", self.results.stat_line)
def test_filter_is_case_insensitive(self): self.results.apply_filter(None) self.results.apply_filter('FOO') eq_(1, len(self.results.dupes))
def test_filters_build_on_one_another(self): self.results.apply_filter(r'bar') eq_(1, len(self.results.groups)) eq_(0, len(self.results.dupes))
def test_set_group(self): #We want the new group to be filtered self.objects, self.matches, self.groups = GetTestGroups() self.results.groups = self.groups eq_(1, len(self.results.groups)) assert self.results.groups[0] is self.groups[0]
def test_dupes_reconstructed_filtered(self): # make_ref resets self.__dupes to None. When it's reconstructed, we want it filtered dupe = self.results.dupes[0] #3rd object self.results.make_ref(dupe) eq_(1, len(self.results.dupes)) assert self.results.dupes[0] is self.objects[0]
def test_mark_all_only_affects_filtered_items(self): # When performing actions like mark_all() and mark_none in a filtered environment, only mark # items that are actually in the filter. self.results.mark_all() self.results.apply_filter(None) eq_(self.results.mark_count, 1)
def test_dupes(self): # There are 2 objects matching. The first one is ref. Only the 3rd one is supposed to be in dupes. eq_(1, len(self.results.dupes)) assert self.results.dupes[0] is self.objects[2]
def test_include_ref_dupes_in_filter(self): # When only the ref of a group match the filter, include it in the group self.results.apply_filter(None) self.results.apply_filter(r'foo bar') eq_(1, len(self.results.groups)) eq_(0, len(self.results.dupes))
def test_apply_filter_works_on_paths(self): # apply_filter() searches on the whole path, not just on the filename. self.results.apply_filter('basepath') eq_(len(self.results.groups), 2)
def test_cancel_filter(self): self.results.apply_filter(None) eq_(3, len(self.results.dupes)) eq_(2, len(self.results.groups))
def test_save_to_xml(self): self.objects[0].is_ref = True self.objects[0].words = [['foo', 'bar']] f = io.BytesIO() self.results.save_to_xml(f) f.seek(0) doc = ET.parse(f) root = doc.getroot() eq_('results', root.tag) eq_(2, len(root)) eq_(2, len([c for c in root if c.tag == 'group'])) g1, g2 = root eq_(6, len(g1)) eq_(3, len([c for c in g1 if c.tag == 'file'])) eq_(3, len([c for c in g1 if c.tag == 'match'])) d1, d2, d3 = [c for c in g1 if c.tag == 'file'] eq_(op.join('basepath', 'foo bar'), d1.get('path')) eq_(op.join('basepath', 'bar bleh'), d2.get('path')) eq_(op.join('basepath', 'foo bleh'), d3.get('path')) eq_('y', d1.get('is_ref')) eq_('n', d2.get('is_ref')) eq_('n', d3.get('is_ref')) eq_('foo,bar', d1.get('words')) eq_('bar,bleh', d2.get('words')) eq_('foo,bleh', d3.get('words')) eq_(3, len(g2)) eq_(2, len([c for c in g2 if c.tag == 'file'])) eq_(1, len([c for c in g2 if c.tag == 'match'])) d1, d2 = [c for c in g2 if c.tag == 'file'] eq_(op.join('basepath', 'ibabtu'), d1.get('path')) eq_(op.join('basepath', 'ibabtu'), d2.get('path')) eq_('n', d1.get('is_ref')) eq_('n', d2.get('is_ref')) eq_('ibabtu', d1.get('words')) eq_('ibabtu', d2.get('words'))
def test_groups(self): eq_(1, len(self.results.groups)) assert self.results.groups[0] is self.groups[0]
def test_stat_line(self): eq_("0 / 3 (0.00 B / 1.01 KB) duplicates marked.", self.results.stat_line) self.results.mark(self.objects[1]) eq_("1 / 3 (1.00 KB / 1.01 KB) duplicates marked.", self.results.stat_line) self.results.mark_invert() eq_("2 / 3 (2.00 B / 1.01 KB) duplicates marked.", self.results.stat_line) self.results.mark_invert() self.results.unmark(self.objects[1]) self.results.mark(self.objects[2]) self.results.mark(self.objects[4]) eq_("2 / 3 (2.00 B / 1.01 KB) duplicates marked.", self.results.stat_line) self.results.mark(self.objects[0]) #this is a ref, it can't be counted eq_("2 / 3 (2.00 B / 1.01 KB) duplicates marked.", self.results.stat_line) self.results.groups = self.groups eq_("0 / 3 (0.00 B / 1.01 KB) duplicates marked.", self.results.stat_line)
def test_LoadXML(self): def get_file(path): return [f for f in self.objects if str(f.path) == path][0] self.objects[0].is_ref = True self.objects[ 4].name = 'ibabtu 2' #we can't have 2 files with the same path f = io.BytesIO() self.results.save_to_xml(f) f.seek(0) app = DupeGuru() r = Results(app) r.load_from_xml(f, get_file) eq_(2, len(r.groups)) g1, g2 = r.groups eq_(3, len(g1)) assert g1[0].is_ref assert not g1[1].is_ref assert not g1[2].is_ref assert g1[0] is self.objects[0] assert g1[1] is self.objects[1] assert g1[2] is self.objects[2] eq_(['foo', 'bar'], g1[0].words) eq_(['bar', 'bleh'], g1[1].words) eq_(['foo', 'bleh'], g1[2].words) eq_(2, len(g2)) assert not g2[0].is_ref assert not g2[1].is_ref assert g2[0] is self.objects[3] assert g2[1] is self.objects[4] eq_(['ibabtu'], g2[0].words) eq_(['ibabtu'], g2[1].words)
def test_sort_empty_list(self): #There was an infinite loop when sorting an empty list. app = DupeGuru() r = app.results r.sort_dupes('name') eq_([], r.dupes)
def check_groups(o): eq_(3, len(g1)) eq_(2, len(g2)) return True
def test_initial_value(self): root = Root(threaded=False) v = root.new_directory('foo') eq_(Path(''), v.initial_path)
def test_dupe_list_update_on_remove_duplicates(self): o1, o2, o3, o4, o5 = self.objects eq_(3, len(self.results.dupes)) self.results.remove_duplicates([o2]) eq_(2, len(self.results.dupes))
def test_invalidate_path(self): self.f.path self.v.mode = MODE_PHYSICAL eq_(('initial', 'file'), self.f.path)
def test_get_dupe_list(self): eq_([self.objects[1], self.objects[2], self.objects[4]], self.results.dupes)