def entrypoint(ctx, create_tables=None, insert_data=None, reload=None, one_off=None, schedule_run_time=None): waitForDatabaseToStart(ctx["dbhost"], int(ctx["dbport"])) waitForRedisToStart(ctx["redisQueueHost"], int(ctx["redisQueuePort"])) waitForRedisToStart(ctx["redisLockHost"], int(ctx["redisLockPort"])) logger.info("create_tables="+str(create_tables)) logger.info("insert_data="+str(insert_data)) logger.info("one_off="+str(one_off)) logger.info("realod="+str(reload)) with Lock(G_LOCK): if create_tables: try: _createTables(ctx) except Exception as e: logger.error("pipeline encountered an error when creating tables" + str(e)) if insert_data: try: _insertData(ctx) except Exception as e: logger.error("pipeline encountered an error when inserting data" + str(e)) if one_off: try: _runPipeline(ctx) except Exception as e: logger.error("pipeline encountered an error during one off run" + str(e)) if reload: schedule.every().day.at(schedule_run_time).do(lambda: runPipeline(ctx)) while True: schedule.run_pending() time.sleep(1000)
def backUpDatabase(ctx, ts): with Lock(G_LOCK): return _backUpDatabase(ctx, ts)
def runPipeline(ctx): with Lock(G_LOCK): return _runPipeline(ctx)
def syncDatabase(ctx): with Lock(G_LOCK): return _syncDatabase(ctx)
def updateDataIntoTableColumn(ctx, table, column, f, kvp): with Lock(G_LOCK): return _updateDataIntoTableColumn(ctx, table, column, f, kvp)
def updateDataIntoTable(ctx, table, f, kvp): with Lock(G_LOCK): return _updateDataIntoTable(ctx, table, f, kvp)
def insertDataIntoTable(ctx, table, f, kvp): with Lock(G_LOCK): return _insertDataIntoTable(ctx, table, f, kvp)
def insertData(ctx): with Lock(G_LOCK): _insertData(ctx)
def createTables(ctx): with Lock(G_LOCK): _createTables(ctx)
def restoreDatabase(ctx, ts): with Lock(G_LOCK): return _restoreDatabase(ctx, ts)
def deleteBackup(ctx, ts): with Lock(G_LOCK): return _deleteBackup(ctx, ts)
def memcache_lock(lock_id): logger.info(f"Using '{lock_type}' lock type.") lock = Lock(lock_id, client=memcache_client) return lock