def proc(startDate, endDate, save, force): for asset in args.assets: db = postgresFactory(*args.database) _, _, _, aggregator = dbObjectsFactory(asset, db, appLog) metrics = aggregator.getMetricNames() if len(args.metrics) == 0 else args.metrics metrics = [metric for metric in metrics if len(args.excludemetrics) == 0 or metric not in args.excludemetrics] if args.drop: aggregator.drop(metrics) elif args.list: print(aggregator.getMetricNames()) else: aggregator.run(metrics, startDate, endDate, save, force)
argParser.add_argument("--group", type=str, default="default", help="statistics group to run") args = argParser.parse_args() asset = args.asset drop = args.drop givenDate = args.date group = args.group directory = args.directory if directory[-1] != "/": directory += "/" dbConfig, _ = readConfigFile(args.config) db = postgresFactory(dbConfig) _, _, _, aggregator = dbObjectsFactory(asset, db) if drop: aggregator.drop(group=group) elif len(givenDate) > 0: givenDate = dateutilParser.parse(givenDate) aggregator.run(givenDate, group) else: aggregator.run(group=group) header, data = aggregator.compile(group=group) with open("%s%s.csv" % (directory, asset), "w") as f: f.write(",".join(["date"] + header) + "\n") for row in data: f.write(row[0].strftime('%Y-%m-%d') + ",") f.write(",".join([ str(value) if value is not None else "" for value in row[1:]
argParser.add_argument("--drop-index", dest="dropIndex", action="store_true", help="remove table indexes") argParser.add_argument("--vacuum", action="store_true", help="vacuum outputs table") args = argParser.parse_args() logging.basicConfig( format='%(asctime)s %(name)-12s %(levelname)-8s %(message)s') appLog = logging.getLogger("bitsql:{0}".format(args.asset)) appLog.setLevel(logging.DEBUG) if args.dropDb: schema, _, _, _ = dbObjectsFactory(args.asset, postgresFactory(*args.database), appLog) schema.drop() elif args.addIndex: schema, _, _, _ = dbObjectsFactory(args.asset, postgresFactory(*args.database), appLog) schema.addIndexes() elif args.dropIndex: schema, _, _, _ = dbObjectsFactory(args.asset, postgresFactory(*args.database), appLog) schema.dropIndexes() elif args.vacuum: schema, _, _, _ = dbObjectsFactory(args.asset, postgresFactory(*args.database), appLog) schema.vacuum() else: print("no action chosen, exiting")
argParser.add_argument("--drop-db", dest="dropDb", action="store_true", help="drop tables that contain data of the given asset") argParser.add_argument("--add-index", dest="addIndex", action="store_true", help="add indexes to tables") argParser.add_argument("--drop-index", dest="dropIndex", action="store_true", help="remove table indexes") argParser.add_argument("--loop", action="store_true", help="run export continously, in a loop") argParser.add_argument("--config", default="config.json", help="path to configuration file") argParser.add_argument("--vacuum", action="store_true", help="vacuum outputs table") args = argParser.parse_args() asset = args.asset addIndex = args.addIndex dropIndex = args.dropIndex dropDb = args.dropDb loop = args.loop vacuum = args.vacuum configFilePath = args.config dbConfig, nodesConfig = readConfigFile(configFilePath) if dropDb: schema, _, _, _ = dbObjectsFactory(asset, postgresFactory(dbConfig)) schema.drop() elif addIndex: schema, _, _, _ = dbObjectsFactory(asset, postgresFactory(dbConfig)) schema.addIndexes() elif dropIndex: schema, _, _, _ = dbObjectsFactory(asset, postgresFactory(dbConfig)) schema.dropIndexes() elif vacuum: schema, _, _, _ = dbObjectsFactory(asset, postgresFactory(dbConfig)) schema.vacuum() else: runExport(asset, nodesConfig[asset], dbConfig, loop=loop)