def setUp(self): self.person = Person("Test User", 1234, "*****@*****.**", defaultdict(lambda: "Expense"), "GBP", "abc", "xyz", []) self.consumer = oauth2.Consumer("def", "jkl") self.splitwise = Splitwise(self.consumer, self.person) self.db = Database("sqlite:///:memory:") self.db.create_tables() self.fixer = Fixer() self.sync_handler = SyncHandler(db=self.db, person=self.person, splitwise=self.splitwise, fixer=self.fixer)
def setUp(self): self.fixer = Fixer()
class TestFixer(unittest.TestCase): @classmethod def _load_file_content(self, filename): absolute_filename = os.path.abspath( os.path.join(os.path.dirname(__file__), filename)) with open(absolute_filename, 'r') as f: data = f.read() return data def setUp(self): self.fixer = Fixer() def testValidateCurrency(self): self.assertTrue(self.fixer._validate_currency("EUR"), "EUR should be a valid known currency") self.assertTrue(self.fixer._validate_currency("GBP"), "GBP should be a valid known currency") self.assertTrue(self.fixer._validate_currency("DKK"), "DKK should be a valid known currency") self.assertTrue(self.fixer._validate_currency("SEK"), "SEK should be a valid known currency") self.assertFalse(self.fixer._validate_currency("JSP"), "JSP should not be a valid known currency") self.assertFalse(self.fixer._validate_currency("RYU"), "RYU should not be a valid known currency") def testCurrencyConversionWithSameCurrency(self): self.assertEquals( self.fixer.get_conversion_rate(for_date=date(2015, 4, 18), from_currency="GBP", to_currency="GBP"), 1.0, "Conversion rate for same currency should be 1.0") self.assertEquals( self.fixer.get_conversion_rate(for_date=date(2015, 6, 18), from_currency="EUR", to_currency="EUR"), 1.0, "Conversion rate for same currency should be 1.0") def testCurrencyConversion(self): conversions = { "2016-10-14": self._load_file_content("data/currency/2016-10-14.json"), "2016-10-21": self._load_file_content("data/currency/2016-10-21.json") } m = Mock() requested_urls = [] def mock_request(url): requested_urls.append(url) if url.endswith("2016-10-14"): return conversions["2016-10-14"] if url.endswith("2016-10-21"): return conversions["2016-10-21"] m.side_effect = mock_request self.fixer._request = m sek_rate = self.fixer.get_conversion_rate(for_date=date(2016, 10, 14), from_currency="SEK", to_currency="GBP") self.assertEquals(sek_rate, 0.09) self.assertEquals(requested_urls[0], "http://api.fixer.io/2016-10-14") eur_rate = self.fixer.get_conversion_rate(for_date=date(2016, 10, 21), from_currency="EUR", to_currency="GBP") self.assertEquals(eur_rate, 0.89) self.assertEquals(requested_urls[1], "http://api.fixer.io/2016-10-21")
def read_token(token_file='api.token'): with open(token_file) as f: return f.read().strip() def main(): updater = Updater(token=read_token()) dispatcher = updater.dispatcher start_handler = CommandHandler('start', start) dispatcher.add_handler(start_handler) echo_handler = MessageHandler(Filters.text, echo) dispatcher.add_handler(echo_handler) caps_handler = CommandHandler('caps', caps, pass_args=True) dispatcher.add_handler(caps_handler) convert_handler = CommandHandler('convert', convert, pass_args=True) dispatcher.add_handler(convert_handler) dispatcher.add_error_handler(error) updater.start_polling() updater.idle() if __name__ == '__main__': f = Fixer() main()
def run_merge_test(cnn, bbox=[0, 1024, 0, 1024], data='gt', slices=[70, 71, 72, 73, 74], min_pixels=1000, N=20, oversampling=False, keep_zeros=True, verbose=False): if data == 'rhoana': print 'not implemented yet' return [], [] print '-' * 80 print '-' * 80 print 'New Experiment:' print ' Data:', data print ' Slices:', slices print ' No. splits for borders:', N print ' Keep zeros in segmentation:', keep_zeros # global_vis = [] global_vi_diffs = [] # global_surenesses = [] # global_merge_pairs = [] for s in slices: if verbose: print '-' * 80 print 'Working on slice', s # load slice input_image, input_prob, input_gold, input_rhoana = Util.read_section( s, keep_zeros=keep_zeros) # apply bbox input_image = input_image[bbox[0]:bbox[1], bbox[2]:bbox[3]] input_prob = input_prob[bbox[0]:bbox[1], bbox[2]:bbox[3]] input_gold = input_gold[bbox[0]:bbox[1], bbox[2]:bbox[3]] input_rhoana = input_rhoana[bbox[0]:bbox[1], bbox[2]:bbox[3]] framed_gold = Util.frame_image(input_gold, shape=(200, 200)) hist = Util.get_histogram(framed_gold.astype(np.uint64)) labels = range(len(hist)) np.random.shuffle(labels) slice_vi_diffs = [] for l in labels: if l == 0: continue if len(framed_gold[framed_gold == l]) < min_pixels: continue neighbors = Util.grab_neighbors(framed_gold, l) np.random.shuffle(neighbors) good_neighbors = [] for n in neighbors: if n == 0: continue if len(framed_gold[framed_gold == n]) < min_pixels: continue good_neighbors.append(n) if len(good_neighbors) > 0: for n in good_neighbors: # print 'merging', l, n before_merge_error = np.zeros(framed_gold.shape) before_merge_error[framed_gold == l] = 1 before_merge_error[framed_gold == n] = 2 before_merge_error = mh.croptobbox(before_merge_error) cropped_image, cropped_prob, cropped_segmentation, cropped_binary, bbox, real_border = Uglify.merge_label( input_image, input_prob, framed_gold, l, n, crop=True) vi_before = Util.vi( before_merge_error[10:-10, 10:-10].astype(np.uint8), mh.croptobbox(cropped_binary)[10:-10, 10:-10].astype( np.uint8)) # print 'VI after merge error:', vi_before borders, best_border_image, result, result_no_border, results_no_border, predictions = Fixer.fix_single_merge( cnn, cropped_image, cropped_prob, cropped_binary, real_border=real_border, N=N, erode=True, oversampling=False) if result_no_border.shape[0] == 0: continue if best_border_image.max() == 0: # print 'no solution' continue # if before_merge_error.shape[0] != result_no_border.shape[0] or before_merge_error.shape[1] != result_no_border.shape[1]: # result_no_border = np.resize(result_no_border, before_merge_error.shape) # if before_merge_error.size != mh.croptobbox(r) # compare_result = np.zeros(before_merge_error.shape, dtype=np.uint8) # compare_result[:] = result_no_border[101:-101, 101:-101][0:before_merge_error.shape[0], 0:before_merge_error.shape[1]] best_vi = np.inf vi_diffs = [] # sorted_predictions = sorted(predictions) for r in results_no_border: if r.shape[0] == 0: continue r = r[100:-100, 100:-100] result_no_border_center = (r.shape[0] / 2, r.shape[1] / 2) before_merge_center = ( before_merge_error.shape[0] / 2 - 10, before_merge_error.shape[1] / 2 - 10) r = r[result_no_border_center[0] - before_merge_center[0]: result_no_border_center[0] + before_merge_center[0], result_no_border_center[1] - before_merge_center[1]: result_no_border_center[1] + before_merge_center[1]] b = before_merge_error[result_no_border_center[0] - before_merge_center[0]: result_no_border_center[0] + before_merge_center[0], result_no_border_center[1] - before_merge_center[1]: result_no_border_center[1] + before_merge_center[1]] vi_after_fixing = Util.vi(b.astype(np.uint8), r.astype(np.uint8)) vi_diffs.append(vi_before - vi_after_fixing) # now we have vi_diffs for this one merge error slice_vi_diffs.append((vi_diffs, predictions)) global_vi_diffs.append(slice_vi_diffs) vi_correction_bins = [0, 0, 0, 0, 0] bin_counts = [0, 0, 0, 0, 0] for s in global_vi_diffs: for merge_errors in s: vi_diff_per_error = merge_errors[0] predictions_per_error = merge_errors[1] # sort by prediction found_borders = sorted(zip(vi_diff_per_error, predictions_per_error), key=lambda x: x[1]) for i in range(5): if len(found_borders) > i: for j in range(i, len(found_borders)): print i, j, len(vi_correction_bins), len( found_borders) vi_correction_bins[j] += found_borders[j][0] bin_counts[j] += 1 for i in range(5): vi_correction_bins[i] /= bin_counts[i] return global_vi_diffs, vi_correction_bins
def run_split_test(cnn, bbox=[0, 1024, 0, 1024], data='gt', slices=[70, 71, 72, 73, 74], oversampling=False, smallest_first=False, no_splits=2, keep_zeros=True, fill_zeros=False, verbose=False): ''' data: gt/rhoana slices: [x,y,z..] oversampling: True/False smallest_first: True/False no_splits: 1/2/3.. ''' print '-' * 80 print '-' * 80 print 'New Experiment:' print ' Data:', data print ' Slices:', slices print ' Oversampling:', oversampling print ' Merge smallest first:', smallest_first print ' No. splits to uglify:', no_splits print ' Keep zeros in segmentation:', keep_zeros global_eds = [] global_ris = [] global_vis = [] global_vi_diffs = [] global_ed_diffs = [] global_surenesses = [] global_ed_surenesses = [] global_merge_pairs = [] global_ugly_segmentations = [] global_best_indices = [] for s in slices: if verbose: print '-' * 80 print 'Working on slice', s # load slice input_image, input_prob, input_gold, input_rhoana = Util.read_section( s, keep_zeros=keep_zeros, fill_zeros=fill_zeros) # apply bbox input_image = input_image[bbox[0]:bbox[1], bbox[2]:bbox[3]] input_prob = input_prob[bbox[0]:bbox[1], bbox[2]:bbox[3]] input_gold = input_gold[bbox[0]:bbox[1], bbox[2]:bbox[3]] input_rhoana = input_rhoana[bbox[0]:bbox[1], bbox[2]:bbox[3]] # choose segmentation based on data mode framed_gold = Util.frame_image(input_gold) framed_rhoana = Util.frame_image(input_rhoana) if data == 'gt': segmentation = framed_gold # if GT, uglify the segmenation based on the number of splits ugly_segmentation = Uglify.split(input_image, input_prob, segmentation, n=no_splits) else: segmentation = framed_gold ugly_segmentation = framed_rhoana global_ugly_segmentations.append(ugly_segmentation) before_vi = Util.vi(ugly_segmentation, segmentation) # before_ri = Util.ri(ugly_segmentation, segmentation) before_ed = Util.ed(ugly_segmentation, segmentation) if verbose: print 'Labels before:', len(Util.get_histogram(segmentation)) print 'Labels after:', len( Util.get_histogram(ugly_segmentation)) print 'VI after uglifying:', before_vi # # now run the fixer # vi_s, ed_s, merge_pairs, surenesses = Fixer.splits( cnn, input_image, input_prob, ugly_segmentation, segmentation, smallest_first=smallest_first, oversampling=oversampling, verbose=verbose) best_index = vi_s.index(np.min(vi_s)) best_vi = vi_s[best_index] best_sureness = surenesses[best_index] best_ed_index = ed_s.index(np.min(ed_s)) best_ed = ed_s[best_ed] best_sureness_ed = surenesses[best_ed] vi_diff = before_vi - best_vi ed_diff = before_ed - best_ed global_vis.append(best_vi) global_vi_diffs.append(vi_diff) global_surenesses.append(best_sureness) global_merge_pairs.append(merge_pairs) global_best_indices.append(best_index) global_eds.append(best_ed) global_ed_diffs.append(ed_diff) global_surenesses_ed.append(best_sureness_ed) # # now all done # print 'VI:' Util.stats(global_vis) print 'VI before-after:' Util.stats(global_vi_diffs) print 'Surenesses:' Util.stats(global_surenesses) print 'ED:' Util.stats(global_eds) print 'ED before-after:' Util.stats(global_ed_diffs) print 'ED Surenesses:' Util.stats(global_surenesses_ed) return global_vis, global_vi_diffs, global_surenesses, global_eds, global_ed_diffs, global_surenesses_ed, global_merge_pairs, global_best_indices, global_ugly_segmentations
def onClosing(self): Fixer.save(self.typos)
# def get_align(self): # def get_size(self): # def get_offset(self, fieldname): # def get_ref_qualifier(self): # def spelling(self): # def __eq__(self, other): # def __ne__(self, other): def printType(cppType, columnWidth, printer=printf): printer('{{:<{0}}}{{:<{0}}}{{:<{0}}}'.format(columnWidth), '"' + cppType.spelling + '"', cppType.kind, cppType.get_ref_qualifier()) if __name__ == '__main__': util = Fixer('Print all of the types in a translation unit.') util.add_argument('--column-width', dest='columnWidth', default=45, help='Width in characters of each output column') args, translationUnit = util.setup() grabber = TypeGrabber() traverse(translationUnit.cursor, grabber) if len(grabber.types): printf('{{:<{0}}}{{:<{0}}}{{:<{0}}}'.format(args.columnWidth), '---- Name ----', '---- Kind ----', '---- Ref Qualifier ----') for cppType in grabber.types: printType(cppType, args.columnWidth)