def parseRules(self): ''' start iterating on each line, send relevant lines to the proper translator A line can be a continue of the previews line. The rule I use is the : Each line starts with a "table_identifier:" So, the next time I get to a "table_identifier:" this will mark a new rule/line for me ''' all_rules_unparsed = self.file_content.split("\n") current_rule = '' for unparsed_rule_string in all_rules_unparsed: unparsed_rule_string = unparsed_rule_string.strip() if len(unparsed_rule_string) == 0 or unparsed_rule_string[0] == '#': # this is an empty line or a comment continue if(':' in unparsed_rule_string): #We start a new rule, first, let's take care of the previous one if len(current_rule) > 0: # I need the condition to handle the first iteration, not nice, but simpler and more robust L.debug("Reading Rule [{}]".format(current_rule)) #parse the rule self._tokenize_parse_single_rule(current_rule) # we have a : so we start a new rule current_rule = unparsed_rule_string else: # we concatenate. we need to be aware of ' ' ':' ',' and '|' separators #check the last char type to see how to concatenate concat_char = ' ' if current_rule.endswith(':') or current_rule.endswith(',') or current_rule.endswith('|'): concat_char = '' current_rule = current_rule + concat_char + unparsed_rule_string #last iteration L.debug("Reading Rule [{}]".format(current_rule)) #parse the rule self._tokenize_parse_single_rule(current_rule) return self
def get_global_stats(timespan): """Handle the querying of match win/loss info. Returns an array of match types.""" victories = dict() cachestring = "globalstatsalltime" if timespan[0] != "all": cachestring = 'globalstats{}{}'.format(timespan[1].year, timespan[1].month) q = cache.get(cachestring) if q is None: logging.debug('Cache miss on globalstats') m = match_stats(timespan) for match in m: if match.mode not in victories: victories[match.mode] = {'wins': 0, 'losses': 0} victories[match.mode]['wins'] = 0 victories[match.mode]['losses'] = 0 if match.victory is True: victories[ match.mode]['wins'] = victories[match.mode]['wins'] + 1 else: victories[ match.mode]['losses'] = victories[match.mode]['losses'] + 1 cache.set(cachestring, victories, timeout=15 * 60) # 15 minutes else: victories = q logging.debug('Cache hit on globalstats') return victories
def get(self, endpoint, params=None, headers=None): endpoint = endpoint if endpoint.startswith( "http") else f"https://api.vk.com/method/{endpoint}" logging.debug(f"Эндпоинт API, куда будет сделан запрос: {endpoint}") response = self.session.get(endpoint, params=params, headers=headers) return response.json()
def start_polling(client): try: loop = asyncio.new_event_loop() asyncio.set_event_loop(loop) users = asyncio.get_event_loop().run_until_complete( redis.execute("SMEMBERS", "users"))['details'] logging.debug("Пользователи в Redis: " + str(users)) while True: for user in users: user_id = user user = asyncio.get_event_loop().run_until_complete( hgetall(user)) if parse_as_boolean(user['active']): # TODO: Использовать Dramatiq вместо этого самопального кода result = poll_user(user, user_id, client) logging.debug( "Выполнен polling пользователя {0}, результат: {1}". format(user_id, result)) sleep(0.1) except Exception as e: logging.error( "Произошла ошибка при попытке начала polling'а всех аккаунтов VK.", exc_info=True) return e
def refresh(self): response = requests.post(constants.OAUTH_TOKEN, headers={ 'Authorization': 'basic {}'.format(self.fortnite_token) }, data={ 'grant_type': 'refresh_token', 'refresh_token': '{}'.format(self.refresh_token), 'includePerms': True }).json() access_token = response.get('access_token') self.session.headers.update( {'Authorization': 'bearer {}'.format(access_token)}) self.refresh_token = response.get('refresh_token') self.expires_at = utils.convert_iso_time(response.get('expires_at')) logging.info( "Токен доступа для использования Fortnite API перегенерирован.") logging.debug( "Access Token: {0}; Refresh Token: {1}; Expires At: {2}.".format( access_token, self.refresh_token, self.expires_at))
def callback(bot, call): try: call = call.callback_query loop = asyncio.new_event_loop() asyncio.set_event_loop(loop) if call.message: if call.data.startswith("channel_counters"): counter = call.data.split("|", 2) if counter[1] == "time": bot.answer_callback_query(callback_query_id=call.id, text="🕒 Время публикации данного поста: {0} MSK.".format( str(datetime.fromtimestamp( int(counter[2])).strftime("%d.%m.%y, %H:%M:%S"))), show_alert=True, cache_time=30) elif counter[1] == "likes": bot.answer_callback_query(callback_query_id=call.id, text="💖 Количество лайков: {0}.".format( str(counter[2])), show_alert=True, cache_time=30) elif counter[1] == "comments": bot.answer_callback_query(callback_query_id=call.id, text="💬 Количество комментариев: {0}.".format( str(counter[2])), show_alert=True, cache_time=30) elif counter[1] == "reposts": bot.answer_callback_query(callback_query_id=call.id, text="🔁 Количество репостов: {0}.".format( str(counter[2])), show_alert=True, cache_time=30) elif counter[1] == "views": bot.answer_callback_query(callback_query_id=call.id, text="👁 Количество просмотров: {0}.".format( str(counter[2])), show_alert=True, cache_time=30) elif counter[1] == "poll": poll = loop.run_until_complete(redis.execute("GET", str("poll&" + str(counter[2])))) if not poll: logging.debug("Poll Name is None, most likely this poll isn't in the cache.") refresh_stats(bot, call, expired=1) return bot.answer_callback_query(callback_query_id=call.id, text="📋 Название голосования: {0}.".format( str(poll[0:170])), show_alert=True, cache_time=30) elif counter[1] == "poll_ans": poll_answer = loop.run_until_complete(redis.execute("GET", str("poll_answer&" + str(counter[2])))) if not poll_answer: logging.debug("Poll Answer is None, most likely this poll isn't in the cache.") refresh_stats(bot, call, expired=1) return else: poll_answer = poll_answer.split("?|&|&|!", 1) bot.answer_callback_query(callback_query_id=call.id, text="❎ Количество голосов за {0}: {1} голосов.".format( str(poll_answer[0][0:140]), str(poll_answer[1])), show_alert=True, cache_time=30) elif call.data.startswith("channel_refresh_stats"): refresh_stats(bot, call) bot.answer_callback_query(callback_query_id=call.id, show_alert=False) except Exception as e: logging.error("Exception has been occurred while trying to execute the method.", exc_info=True) return e
def getComposedCommand(self): """ Compose command with set earlier params :return: execCmd and optsList """ argsList = ["/%s:%s" % (k, v) for k, v in self.settings.items()] argsList.extend(self.args) logging.debug("Running command:\n%s %s" % (self.executable, " ".join(argsList))) return self.executable, argsList
def test_with_schema(): ''' runs the entire schema checker on the test server no params needed ''' args = TheBabaClass() args.handle_all = True args.cnx = app.db.get_test_Server_connection() L.debug(args) app.schemachk.run(args)
def changeDB(self,db,file_content): ''' Changes the DB connected too ''' # This means the DB is going to be created in the script if "CREATE DATABASE" in file_content: return if not app.db.change_db(db): L.debug("CODE:\n{}".format(file_content)) raise app.db.My.errors.ProgrammingError("ERROR: Could not run command. db [{}] does not exists. use -v to get more info.".format(db))
def getComposedCommand(self): argsList = [] for key, value in self.settings.items(): argsList.append("-%s" % key) argsList.append(value) if self.args: argsList.extend(self.args) argsList.append(self.host) print argsList logging.debug("Running command:\n%s %s" % (self.executable, " ".join(argsList))) return self.executable, argsList
def restoreSettings(self): settings = QSettings("MyRDP") try: self.restoreGeometry(settings.value("geometry").toByteArray()) self.restoreState(settings.value("windowState").toByteArray()) except Exception: logging.debug("No settings to restore") # restore tray icon state trayIconVisibility = settings.value("trayIconVisibility").toBool() self.tray.setVisible(trayIconVisibility)
def tracked_dbs(object_type,assets_path=False): ''' Reads the object type and returns the list of DBs maintained under this object type Object type: View, Trigger, Stored Procedure, Function ''' if assets_path: object_path = assets_path + '/' + object_type else: object_path = config.assets_folder + '/' + object_type L.debug("Meta for folder [{}]".format(object_path)) return [right_side_db for right_side_db in os.listdir(object_path) if '.' not in right_side_db] #TODO see if I need to add here the ignore list
def run(args): ''' extract the args and populate with efaults where relevant and decide what to run This one seems to be procedural in nature hmmmmm ''' # Start #L.debug('INPUT') #L.debug(args) #L.debug(upgrade_config.__repr__()) L.debug( "\n\n--------------------------------------------------- START UPGRADING --------------------------------------------------------\n\n" ) # Sync rcom_sql_upgrades table with the file system sync_files_to_db() commands = deque([]) #--mark_completed -> will mark file as completed (sometimes you will run files manually and want the system to know it commands.appendleft( app.upgrade.commands.MarkCompleted(args.file_name_to_mark_complete)) #--unblock -> blocking action, will exit commands.appendleft(app.upgrade.commands.Unblock( args.file_name_to_unblock)) #--archive commands.appendleft(app.upgrade.commands.Archive(args.archive_files)) # Validate System -> no command = this always happens, unless blocking/unblocking happens (then we dont get here) #-- After unlblock, which might remove problematic files, I am doing validations on the system, no point continuing #if issues found commands.appendleft(app.upgrade.commands.ValidateSystem()) #--test commands.appendleft( app.upgrade.commands.Test(args.test_upgrade, args.handle_all, args.limit_files)) #--with_schema commands.appendleft( app.upgrade.commands.TestServerSchema(args.with_schema_checker, args.test_upgrade, args)) #--limit=X || --all commands.appendleft( app.upgrade.commands.Upgrade(args.handle_all, args.limit_files)) # go go go run_commands(commands)
def changeDB(self, db, file_content): ''' Changes the DB connected too ''' # This means the DB is going to be created in the script if "CREATE DATABASE" in file_content: return if not app.db.change_db(db): L.debug("CODE:\n{}".format(file_content)) raise app.db.My.errors.ProgrammingError( "ERROR: Could not run command. db [{}] does not exists. use -v to get more info." .format(db))
async def create_tables(): try: await Psql.execute( 'CREATE TABLE IF NOT EXISTS "public"."channels" ("chat_id" bigint, "community_id" int NOT NULL, ' '"owner_id" int NOT NULL, "access_token" text NOT NULL, "channel_link" text NOT NULL, ' 'PRIMARY KEY ("chat_id"), UNIQUE ("community_id"), UNIQUE ("channel_link"));' ) logging.debug("Таблица channels была успешно создана в PostgreSQL.") return "OK" except Exception as e: logging.error("Произошла ошибка при попытке создания таблиц в PostgreSQL.", exc_info=True) return e
def loadTables(self): ''' Load ALL the table names current db has. I know I can cache this action, but, I rather have clear code than optimized code at this stage. ''' sql = "SELECT table_name FROM information_schema.tables WHERE table_schema='{}'".format(self.current_db) cursor = app.db.get_connection().cursor() cursor.execute(sql) self.tables_list = {res:[] for res, in cursor} L.debug(str(self.tables_list)) return self
def addotp(): # import pdb; pdb.set_trace() try: if 'otp_token' not in request.json: return jsonify({'message': 'Error in params'}) else: otp_token = request.json['otp_token'] id = OTPLIST_TABLE.insert({'otp_token': otp_token, "created_at": datetime.now()}) resp = jsonify({'message':'Token added successfully!'}) resp.status_code = 200 return resp except Exception as e: logging.debug("-------EXCEPTION-in--{}---".format(e.args)) return jsonify({'message': "Exception occured ".format(e.args) })
def loadTables(self): ''' Load ALL the table names current db has. I know I can cache this action, but, I rather have clear code than optimized code at this stage. ''' sql = "SELECT table_name FROM information_schema.tables WHERE table_schema='{}'".format( self.current_db) cursor = app.db.get_connection().cursor() cursor.execute(sql) self.tables_list = {res: [] for res, in cursor} L.debug(str(self.tables_list)) return self
async def execute(cls, *args): try: logging.debug("Passed arguments: " + str(args)) psql_connection = await asyncpg.connect(host=config.databaseHost, database=config.databaseName, user=config.databaseUsername, port=config.databasePort) await psql_connection.execute(*args) await psql_connection.close() return "OK" except Exception as e: logging.error("Произошла ошибка при попытке выполнения SQL запроса.", exc_info=True) return e
async def connection(): try: psql_connection = await asyncpg.connect(host=config.databaseHost, database=config.databaseName, user=config.databaseUsername, port=config.databasePort) logging.info("Соединение с Базой Данных PostgreSQL может быть установлено успешно.") logging.debug("Returned response: " + str(psql_connection)) await psql_connection.close() return "OK" except Exception as e: logging.error("Произошла ошибка при попытке подключения к Базе Данных PostgreSQL.", exc_info=True) return e
async def connection(): try: response = await Redis.execute("ping") logging.info( "The connection to Redis Server can be established successfully." ) logging.debug("Returned response: " + str(response)) return "OK" except Exception as e: logging.error( "Exception has been occurred while trying to establish connection with " "Redis.", exc_info=True) return e
def tracked_dbs(object_type, assets_path=False): ''' Reads the object type and returns the list of DBs maintained under this object type Object type: View, Trigger, Stored Procedure, Function ''' if assets_path: object_path = assets_path + '/' + object_type else: object_path = config.assets_folder + '/' + object_type L.debug("Meta for folder [{}]".format(object_path)) return [ right_side_db for right_side_db in os.listdir(object_path) if '.' not in right_side_db ] #TODO see if I need to add here the ignore list
def _cleanTriggers(self): ''' Load relevant mysql triggers and drop them ''' # iterate on each db to get the list of triggers for database_name in self.what_to_handle['t']: self.cnx.database = database_name self.cursor.execute("SHOW TRIGGERS") for trigger_name in [trigger for (trigger,*_) in self.cursor]: sql = "DROP TRIGGER {db}.{name}".format(db=database_name,name=trigger_name) L.debug(sql) if(self.args.dry_run): L.warning("Dry dropping function {db}.{name}".format(db=database_name,name=trigger_name)) else: self.cursor.execute(sql)
def _cleanViews(self): ''' Load relevant mysql views and drop them ''' # iterate on each db to get the list of triggers for database_name in self.what_to_handle['w']: self.cnx.database = database_name self.cursor.execute("SHOW FULL TABLES IN {} WHERE TABLE_TYPE LIKE 'VIEW'".format(database_name)) for view_name in [view for (view,*_) in self.cursor]: sql = "DROP VIEW {db}.{name}".format(db=database_name,name=view_name) L.debug(sql) if(self.args.dry_run): L.warning("Dry dropping view {db}.{name}".format(db=database_name,name=view_name)) else: self.cursor.execute(sql)
async def fetch(cls, *args): try: logging.debug("Passed arguments: " + str(args)) psql_connection = await asyncpg.connect(host=config.databaseHost, database=config.databaseName, user=config.databaseUsername, port=config.databasePort) result = await psql_connection.fetch(*args) await psql_connection.close() # Результат может быть удобно парсирован так: result[0]['COLUMN'] return result except Exception as e: logging.error("Произошла ошибка при попытке получения данных из PostgreSQL.", exc_info=True) return e
async def fetchrow(cls, *args): try: logging.debug("Passed arguments: " + str(args)) psql_connection = await asyncpg.connect(host=config.databaseHost, database=config.databaseName, user=config.databaseUsername, port=config.databasePort) result = await psql_connection.fetchrow(*args) await psql_connection.close() return result except Exception as e: logging.error("Произошла ошибка при попытке получения линии (fetch row) данных из PostgreSQL.", exc_info=True) return e
async def execute(cls, *args): try: logging.debug("Passed arguments: " + str(args)) redis_connection = await aioredis.create_connection( (config.redisHost, config.redisPort), encoding="UTF-8") result = await redis_connection.execute(*args, encoding="UTF-8") redis_connection.close() await redis_connection.wait_closed() return result except Exception as e: logging.error( "Exception has been occurred while trying to execute Redis statement.", exc_info=True) return e
def _internal_test_rule(self): ''' Remember! the results of describe [table_name] are: Field,Type,Nul,Key,Default,Extra ''' cursor = self._get_cursor() cursor.execute(self.sql) fields = [all_fields[0] for all_fields in cursor] for check_field in self.params: L.info("checking field [{}] exists in".format(check_field)) L.debug(str(fields)) if check_field not in fields: self.has_errors = True self.dynamic_error_str += " [" + check_field + "]" return self
def _cleanTriggers(self): ''' Load relevant mysql triggers and drop them ''' # iterate on each db to get the list of triggers for database_name in self.what_to_handle['t']: self.cnx.database = database_name self.cursor.execute("SHOW TRIGGERS") for trigger_name in [trigger for (trigger, *_) in self.cursor]: sql = "DROP TRIGGER {db}.{name}".format(db=database_name, name=trigger_name) L.debug(sql) if (self.args.dry_run): L.warning("Dry dropping function {db}.{name}".format( db=database_name, name=trigger_name)) else: self.cursor.execute(sql)
def _cleanFunctions(self): ''' Load relevant mysql functions and drop them ''' # first load functions sql = "SHOW FUNCTION STATUS WHERE Db NOT IN(" +self.ignore_dbs_str + ") " sql += "AND Db IN('" + "','".join(self.what_to_handle['f']) + "')" L.debug(sql) self.cursor.execute(sql) res = [(Db,Name) for (Db,Name,*_) in self.cursor] for mysql_func in res: sql = "DROP FUNCTION {db}.{name}".format(db=mysql_func[0],name=mysql_func[1]) L.debug(sql) if(self.args.dry_run): L.warning("Dry dropping function {db}.{name}".format(db=mysql_func[0],name=mysql_func[1])) else: self.cursor.execute(sql)
def _cleanSP(self): ''' Load relevant stored procedures and drop them ''' # first load stored procedures sql = "SHOW PROCEDURE STATUS WHERE Db NOT IN(" +self.ignore_dbs_str + ") " sql += "AND Db IN('" + "','".join(self.what_to_handle['s']) + "')" L.debug(sql) self.cursor.execute(sql) res = [(Db,Name) for (Db,Name,*_) in self.cursor] for sp in res: sql = "DROP PROCEDURE {db}.{name}".format(db=sp[0],name=sp[1]) L.debug(sql) if(self.args.dry_run): L.warning("Dry dropping sp {db}.{name}".format(db=sp[0],name=sp[1])) else: self.cursor.execute(sql)
def send_bulk(es, indexed_data): for index, datas in indexed_data.items(): actions = list() for i in datas: _id = i.pop("id") template = { "_index": index, "_type": index, "_id": _id, } _tmp = {"_source": {"timestamp": datetime.datetime.now()}} _tmp['_source'].update(i) template.update(_tmp) logging.debug(template) actions.append(template) res = helpers.bulk(es, actions) return res
def _cleanSP(self): ''' Load relevant stored procedures and drop them ''' # first load stored procedures sql = "SHOW PROCEDURE STATUS WHERE Db NOT IN(" + self.ignore_dbs_str + ") " sql += "AND Db IN('" + "','".join(self.what_to_handle['s']) + "')" L.debug(sql) self.cursor.execute(sql) res = [(Db, Name) for (Db, Name, *_) in self.cursor] for sp in res: sql = "DROP PROCEDURE {db}.{name}".format(db=sp[0], name=sp[1]) L.debug(sql) if (self.args.dry_run): L.warning("Dry dropping sp {db}.{name}".format(db=sp[0], name=sp[1])) else: self.cursor.execute(sql)
def __init__(self, fortnite_token, launcher_token, password, email): password_response = requests.post(constants.OAUTH_TOKEN, headers={ 'Authorization': 'basic {}'.format(launcher_token) }, data={ 'grant_type': 'password', 'username': '******'.format(email), 'password': '******'.format(password), 'includePerms': True }).json() access_token = password_response.get('access_token') exchange_response = requests.get(constants.OAUTH_EXCHANGE, headers={ 'Authorization': 'bearer {}'.format(access_token) }).json() code = exchange_response.get('code') token_response = requests.post(constants.OAUTH_TOKEN, headers={ 'Authorization': 'basic {}'.format(fortnite_token) }, data={ 'grant_type': 'exchange_code', 'exchange_code': '{}'.format(code), 'includePerms': True, 'token_type': 'egl' }).json() access_token = token_response.get('access_token') refresh_token = token_response.get('refresh_token') expires_at = utils.convert_iso_time(token_response.get('expires_at')) self.session = Session(access_token, refresh_token, expires_at, fortnite_token) logging.info("Токен доступа для использования Fortnite API получен.") logging.debug( "Access Token: {0}; Refresh Token: {1}; Expires At: {2}.".format( access_token, refresh_token, expires_at))
def process(self,db,file_content,filename): ''' Loops on the file itself and parses it (using a parser Into the a buffer, letter to be written into the output file ''' looking_for_header = True looking_for_header_args = False looking_for_body = False not_yet_started_args = True #once I start looking into the args string, I no longer start from (, as it can be the ( in INT(11) SP = SpDataParser(self._current_path + '/' + filename) for line in file_content.splitlines(): if looking_for_body: SP.addBodyLine(line) if looking_for_header: # method name if "CREATE PROCEDURE" in line and len(line)>(len("CREATE PROCEDURE")+8): start_funcname = line.find("CREATE PROCEDURE") end_funcname_location = line.find('(') SP.addSPName(line[start_funcname+len("CREATE PROCEDURE")+1:end_funcname_location],db) looking_for_header = False looking_for_header_args = True # method arguments if looking_for_header_args: if not_yet_started_args: start_args = line.find('(') not_yet_started_args = False else: start_args = 0 end_args = line.find('BEGIN') if(end_args > -1): # Means we got to the end of the args section looking_for_header_args = False looking_for_body = True SP.addRawArgStr(line[start_args+1:]) # Write parsed stuff into the output file self.doc_file.write(str(SP)) L.debug(str(SP))
async def connection(): try: response = await Redis.execute("ping") logging.debug("Returned response: " + str(response)) if str(response['details']) == "PONG": logging.info( "Соединение с Redis может быть установлено успешно.") return {"status": "OK", "details": response['details']} else: logging.error( "Произошла ошибка при попытке установления соединения с Redis." ) return {"status": "ERROR", "details": response['details']} except Exception as e: logging.error( "Произошла ошибка при попытке установления соединения с Redis.", exc_info=True) return {"status": "ERROR", "details": str(e)}
def postCalcFolder(self): '''Open the output file''' self.doc_file = open(self.assets_path + "/autocompletion/php/SP.php","w") header = """ <?php /** * Autocompletion stub * You call (dbname)_(stored_procedure_name) */ class SP{{ /** * @return {} */ static function call(){{ return new self; }} """.format(self._return_type) L.debug(header) self.doc_file.write(header)
def run(args): ''' extract the args and populate with efaults where relevant and decide what to run This one seems to be procedural in nature hmmmmm ''' # Start #L.debug('INPUT') #L.debug(args) #L.debug(upgrade_config.__repr__()) L.debug("\n\n--------------------------------------------------- START UPGRADING --------------------------------------------------------\n\n") # Sync rcom_sql_upgrades table with the file system sync_files_to_db() commands = deque([]) #--mark_completed -> will mark file as completed (sometimes you will run files manually and want the system to know it commands.appendleft(app.upgrade.commands.MarkCompleted(args.file_name_to_mark_complete)) #--unblock -> blocking action, will exit commands.appendleft(app.upgrade.commands.Unblock(args.file_name_to_unblock)) #--archive commands.appendleft(app.upgrade.commands.Archive(args.archive_files)) # Validate System -> no command = this always happens, unless blocking/unblocking happens (then we dont get here) #-- After unlblock, which might remove problematic files, I am doing validations on the system, no point continuing #if issues found commands.appendleft(app.upgrade.commands.ValidateSystem()) #--test commands.appendleft(app.upgrade.commands.Test(args.test_upgrade,args.handle_all,args.limit_files)) #--with_schema commands.appendleft(app.upgrade.commands.TestServerSchema(args.with_schema_checker,args.test_upgrade,args)) #--limit=X || --all commands.appendleft(app.upgrade.commands.Upgrade(args.handle_all,args.limit_files)) # go go go run_commands(commands)
def _cleanViews(self): ''' Load relevant mysql views and drop them ''' # iterate on each db to get the list of triggers for database_name in self.what_to_handle['w']: self.cnx.database = database_name self.cursor.execute( "SHOW FULL TABLES IN {} WHERE TABLE_TYPE LIKE 'VIEW'".format( database_name)) for view_name in [view for (view, *_) in self.cursor]: sql = "DROP VIEW {db}.{name}".format(db=database_name, name=view_name) L.debug(sql) if (self.args.dry_run): L.warning("Dry dropping view {db}.{name}".format( db=database_name, name=view_name)) else: self.cursor.execute(sql)
def _cleanFunctions(self): ''' Load relevant mysql functions and drop them ''' # first load functions sql = "SHOW FUNCTION STATUS WHERE Db NOT IN(" + self.ignore_dbs_str + ") " sql += "AND Db IN('" + "','".join(self.what_to_handle['f']) + "')" L.debug(sql) self.cursor.execute(sql) res = [(Db, Name) for (Db, Name, *_) in self.cursor] for mysql_func in res: sql = "DROP FUNCTION {db}.{name}".format(db=mysql_func[0], name=mysql_func[1]) L.debug(sql) if (self.args.dry_run): L.warning("Dry dropping function {db}.{name}".format( db=mysql_func[0], name=mysql_func[1])) else: self.cursor.execute(sql)
async def execute(cls, *args, can_cause_exception=False): try: logging.debug("Passed arguments: " + str(args)) redis_connection = await aioredis.create_connection( (config.REDIS_HOST, config.REDIS_PORT), encoding="UTF-8") result = await redis_connection.execute(*args, encoding="UTF-8") redis_connection.close() await redis_connection.wait_closed() return {"status": "OK", "details": result} except Exception as e: if not can_cause_exception: logging.error("Произошла ошибка при выполнении Redis запроса.", exc_info=True) else: logging.debug( "Произошла ошибка при выполнении Redis запроса, но параметр can_cause_exception=True.", exc_info=True) return {"status": "ERROR", "details": str(e)}
def parseRules(self): ''' start iterating on each line, send relevant lines to the proper translator A line can be a continue of the previews line. The rule I use is the : Each line starts with a "table_identifier:" So, the next time I get to a "table_identifier:" this will mark a new rule/line for me ''' all_rules_unparsed = self.file_content.split("\n") current_rule = '' for unparsed_rule_string in all_rules_unparsed: unparsed_rule_string = unparsed_rule_string.strip() if len(unparsed_rule_string) == 0 or unparsed_rule_string[ 0] == '#': # this is an empty line or a comment continue if ( ':' in unparsed_rule_string ): #We start a new rule, first, let's take care of the previous one if len( current_rule ) > 0: # I need the condition to handle the first iteration, not nice, but simpler and more robust L.debug("Reading Rule [{}]".format(current_rule)) #parse the rule self._tokenize_parse_single_rule(current_rule) # we have a : so we start a new rule current_rule = unparsed_rule_string else: # we concatenate. we need to be aware of ' ' ':' ',' and '|' separators #check the last char type to see how to concatenate concat_char = ' ' if current_rule.endswith(':') or current_rule.endswith( ',') or current_rule.endswith('|'): concat_char = '' current_rule = current_rule + concat_char + unparsed_rule_string #last iteration L.debug("Reading Rule [{}]".format(current_rule)) #parse the rule self._tokenize_parse_single_rule(current_rule) return self
def process(self,db,file_content,filename): ''' Just run the sqls ''' L.debug(file_content) self.cursor.execute(file_content) # TODO add multi=True
def sync_files_to_db(): ''' Check all NONE completed files in the db, if no longer exist in file system -> delete Entry Check file system for any file not yet in db, create an entry with [pending_completion] STATUS Reset status of all [completed in test] files to be [pending completion] ''' # db boilerplate cnx = app.db.get_connection() # need this to know which files I already processed cnx.database = upgrade_config['upgrade_tracking_database'] cursor = cnx.cursor() # read all files files_in_file_system = os.listdir(config.assets_folder + "/upgrades/current") # ------------------------------------------------------------------------------------------------------------------------------------------------- # Check all NONE completed files in the db, if no longer exist in file system -> delete Entry # ------------------------------------------------------------------------------------------------------------------------------------------------- find_files_sql = "SELECT file_name FROM {}.rcom_sql_upgrades WHERE execution_status <> 'completed'".format(upgrade_config['upgrade_tracking_database']) cursor.execute(find_files_sql) res = cursor.fetchall() files_to_delete_from_db = [] for db_file, in res: # the extra , is to directly unpack the touple here, in this line if db_file in files_in_file_system: # ALL GOOD, DO NOTHING continue; # to next file else: L.warning('No longer in file system, deleting from rcom_sql_upgrades [{}]'.format(db_file)) files_to_delete_from_db.append(db_file) if len(files_to_delete_from_db) > 0: sql_in = "('" + "','" .join(files_to_delete_from_db) + "')" cursor = cnx.cursor() sql = "DELETE FROM {}.rcom_sql_upgrades WHERE file_name IN {}".format(upgrade_config['upgrade_tracking_database'],sql_in) #L.debug(sql) cursor.execute(sql) # ------------------------------------------------------------------------------------------------------------------------------------------------- # Check file system for any file not yet in db, create an entry with [pending_completion] STATUS # ------------------------------------------------------------------------------------------------------------------------------------------------- values =["('"+file_name+"'," + get_file_execution_order(file_name) + ",NULL,'pending_completion',NULL)" for file_name in files_in_file_system \ if not any(ignored_partial_string in file_name for ignored_partial_string in config.ignore_files_dirs_with)] # ignored files list filter if len(values) > 0: values = ','.join(values) sql = "INSERT IGNORE INTO {}.rcom_sql_upgrades VALUES {}".format(upgrade_config['upgrade_tracking_database'],values) #L.debug(sql) cursor.execute(sql) # ------------------------------------------------------------------------------------------------------------------------------------------------- # Reset status of all [completed in test] files to be [pending completion] # ------------------------------------------------------------------------------------------------------------------------------------------------- L.debug('reset completed in test to be pending completion') update_sql = "UPDATE {}.rcom_sql_upgrades SET execution_Status='pending_completion' WHERE execution_Status='completed_in_test'".format(upgrade_config['upgrade_tracking_database']) cursor.execute(update_sql) # SAVING ALL CHANGES TO DB cnx.commit()