def dataset(data, headers=None): """ `data` is a list of dicts. """ dataset = Dataset() dataset.dict = data if headers: dataset.headers = headers return dataset
def read_database(): with open(COMO_BATTERY_FILE, 'r') as como: data = Dataset(headers=['time', 'capacity', 'cycles']) # http://stackoverflow.com/questions/10206905/ # how-to-convert-json-string-to-dictionary-and-save-order-in-keys data.dict = json.loads( zlib.decompress(como.read()), object_pairs_hook=collections.OrderedDict) return data
def bulk_import_daily_values(resource_cls: DailyValueResource.__class__, query): ds = Dataset() ds.dict = query resource = resource_cls() result = resource.import_data(ds, dry_run=False) if result.has_errors(): logger.error("Import failed. Showing first 10 errors.") for row in result[:10]: for error in row.errors: logger.error(error.error) else: logger.info("Import success! :: %s", str(result.totals))
def print_dict(self, data): """Print dataset generated by a command to the standard output""" dataset = Dataset() dataset.dict = data if dataset.height: if self.output == "table": click.echo(tabulate(dataset.dict, headers="keys")) else: # we will probably implement JSON output only in the long run # and get rid of the `tablib` dependency click.echo(dataset.export(self.output)) click.echo( "\n({} emoji{})".format(dataset.height, "" if dataset.height == 1 else "s"), err=True, )
def cmd_import(args): if not os.path.exists(COMO_BATTERY_FILE): current_dataset = create_database() else: current_dataset = read_database() if os.path.exists(args.get(0)): import_dataset = Dataset() with open(os.path.expanduser(args.get(0)), "r") as import_file: import_dataset.csv = import_file.read() import_dataset.dict = map(import_format, import_dataset.dict) new = current_dataset.stack(import_dataset).sort('time') with open(COMO_BATTERY_FILE, 'w') as como: como.write(zlib.compress(new.json)) puts(colored.white("battery statistics imported")) else: error("Couldn't open file: %s" % args.get(0))
op = arr[0] values = [get_value(source, fips, field) for field in arr[1:]] if op == 'INVERT': return 100 - float(values[0]) elif op == 'SUM': return sum(values) else: raise Exception('invalid operaton') else: return get_value(source, fips, field_info) for dashboard, headings in config.items(): for heading, fields in headings.items(): for field in fields: for county in counties: fips = county['fips'] value = get_interpreted_value(*field['source'], fips) rows.append({ 'County Name': county['name'], 'County FIPS Code': fips, 'Variable Name': field['name'], 'County Value': value, 'Value Type': 'Percent' # could be dynamic }) final = Dataset() final.dict = rows open('data/final.csv', 'w', newline='\n').write(final.csv)
for dashboard in config.values(): for heading in dashboard.values(): for field in heading: source_info, field_info = field['source'] table = source_info.split('_')[1] if ':' in field_info: fields = field_info.split(':')[1:] else: fields = [field_info] # print(f"{table}: {','.join(fields)}") if table not in queries: queries[table] = [] query = queries[table] for field in fields: if field not in query: query.append(field) for table, fields in queries.items(): url = get_url(table.lower(), fields) json = loads(get(url).text) dataset = Dataset() dataset.dict = to_dict_array(json) open(f'data/acs_{table.lower()}.csv', 'w', newline='').write(dataset.csv) open('data/acs.txt', 'w').write('done')