def load_region_data(self, data_date, zfile): """ Region represents bank regions (mapping regions to states). This loads the data from the daily CSV we receive. """ filename = "%s_region.txt" % data_date data_date = timezone.make_aware( datetime.strptime(data_date, '%Y%m%d'), timezone.get_current_timezone()) region_reader = reader(StringIO.StringIO(zfile.read(filename)), delimiter='\t') iter_region = iter(region_reader) next(iter_region) regions = [] total_regions = 0 for row in iter_region: total_regions += 1 r = Region() r.region_id = int(row[0]) r.state_id = row[1] r.data_timestamp = data_date regions.append(r) if len(regions) > 1000: #Batching the bulk_creates prevents the command from #running out of memory. Region.objects.bulk_create(regions) regions[:] = [] Region.objects.bulk_create(regions) if not total_regions or Region.objects.count() != total_regions: raise OaHException("Couldn't load region data from %s" % zfile.filename)
def load_region_data(self, data_date, region_filename): """ Region represents bank regions (mapping regions to states). This loads the data from the daily CSV we receive. """ with open(region_filename) as region_csv: region_reader = reader(region_csv, delimiter="\t") iter_region = iter(region_reader) next(iter_region) regions = [] for row in iter_region: r = Region() r.region_id = int(row[0]) r.state_id = row[1] r.data_timestamp = data_date regions.append(r) if len(regions) > 1000: # Batching the bulk_creates prevents the command from # running out of memory. Region.objects.bulk_create(regions) region[:] = [] Region.objects.bulk_create(regions)
def load_region_data(self, data_date, zfile): """ Region represents bank regions (mapping regions to states). This loads the data from the daily CSV we receive. """ filename = "%s_region.txt" % data_date data_date = timezone.make_aware(datetime.strptime(data_date, '%Y%m%d'), timezone.get_current_timezone()) region_reader = reader(StringIO.StringIO(zfile.read(filename)), delimiter='\t') iter_region = iter(region_reader) next(iter_region) regions = [] total_regions = 0 for row in iter_region: total_regions += 1 r = Region() r.region_id = int(row[0]) r.state_id = row[1] r.data_timestamp = data_date regions.append(r) if len(regions) > 1000: #Batching the bulk_creates prevents the command from #running out of memory. Region.objects.bulk_create(regions) regions[:] = [] Region.objects.bulk_create(regions) if not total_regions or Region.objects.count() != total_regions: raise OaHException("Couldn't load region data from %s" % zfile.filename)
def test_compare_scenarios_output(self, mock_get_rates): """ .. the function.""" mock_get_rates.return_value = {'data': {'3.750': '0.125'}} data = { '1': ['3.750', '0.125'], '2': ['11', '12'], } cut_down = {} cut_down['1'] = self.c.test_scenarios['1'] cut_down['2'] = self.c.test_scenarios['2'] cut_down['16'] = self.c.test_scenarios['16'] self.c.test_scenarios = cut_down row = ['11', 'VA'] r = Region() r.region_id = int(row[0]) r.state_id = row[1] r.data_timestamp = datetime.now() r.save() result = self.c.compare_scenarios_output(data) self.assertTrue( "The following scenarios don't match: ['2'] " in self.c.messages)
def test_compare_scenarios_output(self, mock_get_rates): """ .. the function.""" mock_get_rates.return_value = {'data': {'3.750': '0.125'}} data = { '1': ['3.750', '0.125'], '2': ['11', '12'], } cut_down = {} cut_down['1'] = self.c.test_scenarios['1'] cut_down['2'] = self.c.test_scenarios['2'] cut_down['16'] = self.c.test_scenarios['16'] self.c.test_scenarios = cut_down row = ['11', 'VA'] r = Region() r.region_id = int(row[0]) r.state_id = row[1] r.data_timestamp = datetime.now() r.save() result = self.c.compare_scenarios_output(data) self.assertTrue("The following scenarios don't match: ['2'] " in self.c.messages)