async def done_caller(self, future, method_index, retdata): # INFO_MSG("Future %i preparing to done" % method_index) # method = self.entity_class.get_method_by_id(method_index) method = self.entity_info.get_method(self.context_name, method_index) _, returns = method.signature serialized_rets = BinarySerialization(retdata) rets = tuple() for param_index, ret_type in enumerate(returns): try: ret = ret_type.deserialize( serialized_rets.get_data()[param_index]) except SerializationError: ERROR_MSG( f"Failed to return result by method {method.name}, return value {param_index} invalid" ) raise if isinstance(ret, AsyncObj): ret = await ret rets += ret, if len(rets) == 1: rets = rets[0] try: future.set_result(rets) except asyncio.InvalidStateError: ERROR_MSG( f"Failed to done future {future} (of method {method}), retdata: {retdata}" )
def work(city, building_name: str, citizens_ids: List[int32], ability_type_name: str, amount: int = 1, **kwargs) -> WorkingResult: result = WorkingResult() if city.Citizens.locked or amount == 0: citizens_by_abilities = get_citizens_with_ability_names_by_ids_and_ability_type( city, citizens_ids, ability_type_name, building_name) for ability_name, citizens in citizens_by_abilities.items(): ability_info = CitizenAbilities.get_by("Name", ability_name) for citizen in citizens: current_experince = citizen.Abilities[ability_name] multiplier = ability_info['ExperienceMultiplier'] ability_level = get_ability_level(current_experince, multiplier) leveled_ability_info = CitizenAbilitiesLeveling.get_by( "Level", ability_level) parameters = deepcopy(ability_info['BaseParameters']) if leveled_ability_info: for parameter_name in parameters.keys(): if parameter_name in ability_info[ 'BaseParameters']: if isinstance(parameters[parameter_name], (int, float)): parameters[ parameter_name] += leveled_ability_info[ 'Parameters'][ parameter_name] * ability_info[ 'LeveledParametersMultipliers'][ parameter_name] else: if parameter_name in leveled_ability_info[ 'Parameters']: parameters[ parameter_name] = leveled_ability_info[ 'Parameters'][parameter_name] func = getattr(CityCitizens, ability_type_name, None) if func is not None: INFO_MSG( f"Working {ability_type_name} in {building_name} with citizen {citizen}" ) try: func(result, **parameters, **kwargs) except Exception as e: ERROR_MSG( f"Something went wrong in CityCitizens::{ability_type_name}" ) print_exc() else: ERROR_MSG(f"Ability {ability_type_name} not released") if amount > 0: citizen.Abilities[ability_name] += amount return result
async def __aexit__(self, exc_type, exc_val, exc_tb): do_not_raise_exc = True if exc_type is None: new_variables = TArray[FPropertyInfo]() for context, varname in self.vars: var = getattr(context, varname) info = FPropertyInfo(EntityDBID=context.dbid, EntityClass=context.__class__.__name__, PropertyName=var.property_name, PropertyTypeName=var.get_type_name(), SerializedValue=var.serialize()) new_variables.append(info) success = await Globals.this_service.db.UpdateVariablesTransactionally( self.old_variables, new_variables) if success: self.unlock_all() INFO_MSG("%s done" % self, depth=1) Replication.__exit__(self, exc_type, exc_val, exc_tb) else: self.rollback("DB error") self.unlock_all() ERROR_MSG( "%s failed, because values in database not actual. Variables rolled back" % self, depth=1) raise TransactionError("Variables changed outside transaction") elif exc_type == TransactionExitException: self.rollback("Interrupted") self.unlock_all() do_not_raise_exc = True WARN_MSG( "Transaction %s interrupted from code. Variables rolled back" % self, depth=1) else: self.rollback("Code error") self.unlock_all() do_not_raise_exc = False ERROR_MSG( "%s failed, because errors in code. Variables rolled back" % self, depth=1) # for context, varname in self.vars: # var = getattr(context, varname) # if var.locked: # var.unlock() return do_not_raise_exc
async def perform(self): """ Запуск текущего матча """ INFO_MSG(f"Performing match {self}") if self.dedicated_server: return \ ERROR_MSG(f"The match {self} already performing") self.performing = True self.update_state(EMatchState.Preparing) try: await self.wake_up_dedicated_server_for_match() except WakeupError: self.rollback() raise MatchMakingError( "Unable to wakeup dedicated server for match") await self.dedicated_server.SetupGame(self.max_players) self.assign_teams() self.started = True for player_info in self.players.values(): await self.register_player_info(player_info) self.update_state(EMatchState.InGame) self.dedicated_server.MatchStart(self.id) self.handle_start() for session, player_info in self.players.items(): player_info.join(self.ip, self.port)
async def __aenter__(self): while any([ getattr(context, varname).locked for context, varname in self.vars ]): for context, varname in self.vars: var = getattr(context, varname) if var.locked: # WARN_MSG("Variable %s locked, transaction %s waiting for unlock..." % (varname, self)) try: await asyncio.wait_for(var.waitforunlock(), 2) except asyncio.TimeoutError: ERROR_MSG( "Variable %s locked by %s more for 5 seconds! Check your code for nested transactions with the same variables" % (varname, var.locker), depth=1) # raise TransactionError("Variables locked too long") INFO_MSG("Entered %s with %s" % (self, (self.vars, )), depth=1) for context, varname in self.vars: var = getattr(context, varname) await var.lock(self) self.savepoint() return Replication.__enter__(self)
async def wake_up_dedicated_server_for_match(self): """ Поднять сервер для этого матча """ INFO_MSG(f"Waking up dedicated server for match {self}") map_info: FMapInfo = Maps.get_by("Name", self.map_name) if not map_info: return ERROR_MSG(f"Missing map {self.map_name}") port = self.match_making.new_dedicated_server_port() map_parameters = f"{ map_info.Asset }?listen&port={ port }&game={ get_uclass_true_path(map_info.GameMode) }" kw_params = dict(port=str(port), dedic_id=self.id, MaxPlayers=map_info.PlayersMax, BotsCount=map_info.PlayersMax) # MaxPlayers is BotsCount base_ip, base_port = self.service.endpoint self.dedicated_server = await self.base.supervisor.RequestDedicatedServer( map_parameters, self.service.exposed_ip, base_port, {}, kw_params) await self.dedicated_server.PrepareMatch() INFO_MSG(f"Dedicated server waked up for match {self}") self.dedicated_server.add_lost_callback( lambda: self.on_dedicated_server_dropped(self.dedicated_server)) self.port = port self.ip = Globals.this_service.exposed_ip return self.dedicated_server
def __run__(cls, *args, **kwargs): Globals.access_token = Access().register_access( "SUPER INTERNAL ACCESS", AccessLevel.Internal) Globals.service_name = cls.__name__ + CommandLine.get_arguments( ).postfix service_instance = cls(*args, **kwargs) Globals.this_service = service_instance signal.signal(signal.SIGTERM, service_instance.sigterm_handler) # signal.signal(signal.SIGBREAK, service_instance.sigterm_handler) signal.signal(signal.SIGINT, service_instance.sigterm_handler) try: asyncio.get_event_loop().run_until_complete( service_instance) # this calls __await__ asyncio.get_event_loop().run_until_complete( service_instance.done()) asyncio.get_event_loop().run_forever() except KeyboardInterrupt: INFO_MSG("Preparing to shut down service...") except Exception as exc: from traceback import print_exc ERROR_MSG("Exception raised:", exc) print_exc() INFO_MSG("Press enter to continue") input() print("What's your name?\n> ", end="") name = input() print(f"{name}, how did you come to this?\n> ", end="") reason = input() WARN_MSG( f'The program has been stopped, the {name} provoked an error, and says: "{reason}". Dismiss him' )
def __init__(self, *vars): assert len(vars) > 0 or error("To start replication context one or more variables must be passed", depth=1) self.vars = [(var.owner, var.property_name) for var in vars] if None in self.vars: ERROR_MSG("None property name passed into Replication") for var in vars: if isinstance(var, MapBase): # todo: temporary for valid buffer var.replication_buffer.append((SliceReplicationKind.Nop, None))
async def execute_rmi(self, executor_connection, entity_id, method_index, future_id, access_token, params): entity = self.entities.get(entity_id, None) if entity is None: ERROR_MSG("Failed to call rmi %i. Entity with id %i not exists!" % (method_index, entity_id)) return try: await entity.execute_rmi(executor_connection, method_index, future_id, access_token, params) except Exception as exc: ERROR_MSG("Failed to execute rmi on %s" % (entity)) from traceback import print_exc print_exc() entity.send_exception(executor_connection, exc, future_id) return
def merge_items(self, source_id, target_id): if ensure_locked(self.ItemsOrder, self.Items): source_item = self.Items[source_id] target_item = self.Items[target_id] source_item_info = item_info = ItemsCatalog.get_by( "Name", source_item["Name"]) target_item_info = ItemsCatalog.get_by("Name", target_item["Name"]) if source_item_info is None or target_item_info is None: ERROR_MSG( f"Failed to merge items with Names " f"{source_item['Name']} & { target_item['Name']}. ItemsCatalog corrupted!" ) return if source_item_info["Name"] != target_item_info["Name"]: ERROR_MSG( f"Failed to merge items with different ID {source_item_info['Name']} & {target_item_info['Name']}" ) return if source_item.Count >= item_info[ "MaxStack"] or target_item.Count >= item_info["MaxStack"]: self.swap_items(source_id, target_id) else: if (source_item.Count + target_item.Count) <= item_info["MaxStack"]: target_item.Count = source_item.Count + target_item.Count self.drop_item(source_id) else: loc_count_item = item_info["MaxStack"] - self.Items[ target_id].Count if self.Items[source_id].Count >= loc_count_item: with self.Items.edit(source_id): target_item.Count += loc_count_item with self.Items.edit(target_id): source_item.Count -= loc_count_item if source_item.Count == 0: with self.Items.edit(source_id): self.drop_item(source_id) else: with self.Items.edit(target_id): target_item.Count += source_item.Count self.drop_item(source_id)
def try_exc(self, future_id, cls_name, args): future_data = self.futures.get(future_id, None) if future_data is not None: exception_class = ExceptionsRegistry.find(cls_name) if exception_class is not None: future_data['future'].set_exception(exception_class(args)) else: ERROR_MSG("Invalid incoming exception %s" % cls_name) from Core.ClientConnectionHandler import RemoteServerException future_data['future'].set_exception(RemoteServerException())
async def exec_raw(self, *args, **kwargs): """ Выполнить сырую команду @param args, kwargs: параметры передаваемые в SQL-экзекьютор @return открытый результат запроса """ try: async with self.connection as conn: return await conn.execute(*args, **kwargs) except psycopg2.ProgrammingError as exc: ERROR_MSG("Unable to perform query: %s, \n%s" % (exc, args), depth=1) # from traceback import print_exc # print_exc() raise
async def TellPID(self, connection, pid: int32): """ Сообщить PID этому сервису @param pid: идентификатор процесса """ if pid in self.processes and not self.processes[pid]['future'].done(): INFO_MSG( f"{connection} ({self.processes[pid]['name']}) tells pid {pid} in {round(time() - self.processes[pid]['opened_at'], 6)} seconds" ) self.processes[pid]['future'].set_result( await self.create_client_connection(self.processes[pid]['endpoint'])) else: ERROR_MSG( "Failed to tell PID! " f"There are no processes runned with pid {pid} or this process already told PID." "Probably you run process which lanuched child process (be sure if this is UE4 dedicated server, " "check the right path, not a PROJECTNAME.exe in root dir)")
async def __ainit__(self, host, port, database, user, password): await super().__ainit__() try: DB_INFO( f"Login to database {host}:{port}: {database} with user {user}" ) self.engine = await create_engine(user=user, database=database, host=host, password=password, port=port) except psycopg2.OperationalError: ERROR_MSG( "Operational error, probably database engine settings is wrong. Check your Config" ) sys.exit(0) DB_INFO("Connected to DB '%s' by user '%s' at '%s'" % (database, user, host))
async def async_wakeup_service_locally(self, service_path, arguments, port, is_python_process=True, index=0, name=None): INFO_MSG( f"{service_path}, {arguments}, {port}, {is_python_process}, {index}, {name}" ) if arguments is None: arguments = dict() access_token = Access().generate(AccessLevel.Internal) # WARN_MSG(f"Opening (causer {self.exposed_ip}") proc = self.open_process(service_path, is_python_process=is_python_process, service_port=port, causer_ip=self.endpoint[0], causer_exposed_ip=self.exposed_ip, causer_port=self.endpoint[1], postfix=('[%i]' % index) if index else "", region=self.config.get_region(), **arguments, access_token=access_token, is_child_process=True) future_data = self.processes[proc.pid] = { 'future': asyncio.Future(loop=asyncio.get_event_loop()), 'endpoint': (self.endpoint[0], port), 'opened_at': time(), 'name': name } try: service_connection = await future_data['future'] except Exception as e: ERROR_MSG("Something went wrong in future", e) return None return service_connection, proc
def open_process(self, service_path, arguments=list(), is_python_process=True, extented_param=None, **kwargs): cmd_kwargs = list() kwargs['no_color_patterns'] = CommandLine.get_arguments( ).no_color_patterns for key, value in kwargs.items(): if CommandLine.has_arg(key): arg_type = CommandLine.get_arg_type(key) kwarg = "-%s=%s" % (key, arg_type(value)) cmd_kwargs.append(kwarg) else: ERROR_MSG( "Unable to pass %s parameter, not exists in CommandLine.py: Arguments class" % key) # cmd_kwargs = ["-%s=%s" % (key, value) for key, value in kwargs.items()] cmd_args = list() python_executable_name = ConfigGlobals.PythonExecutable if is_python_process: cmd_args.append(python_executable_name) cmd_args.extend(service_path) if isinstance( service_path, list) else cmd_args.append(service_path) if extented_param is not None: cmd_args.append(extented_param) cmd_args.extend(cmd_kwargs) cmd_args.extend(arguments) process = subprocess.Popen(cmd_args, shell=False) INFO_MSG(f"Opening process {process.pid}", cmd_args) # print(cmd_args) return process
async def UpdateVariablesTransactionally_cycle(self, future, olds, news): async with self.driver.connection as conn: Templ = Template(""" CREATE OR REPLACE FUNCTION public.last_transaction_function() RETURNS BOOLEAN AS $$ BEGIN {% for entity_class, variable_name, data, entity_dbid in old_variables %} IF (SELECT "{{variable_name}}" FROM "class_{{entity_class}}" WHERE db_id={{entity_dbid}} FOR UPDATE LIMIT 1) <> {{data}} THEN RETURN FALSE; END IF; {% endfor %} {% for entity_class, variable_name, data, entity_dbid in new_variables -%} UPDATE "class_{{entity_class}}" SET "{{variable_name}}"={{data}} WHERE db_id={{entity_dbid}}; {% endfor %} RETURN TRUE; END $$ LANGUAGE 'plpgsql' VOLATILE CALLED ON NULL INPUT SECURITY INVOKER; START TRANSACTION READ WRITE; SELECT * FROM public.last_transaction_function(); """).render(old_variables=olds, new_variables=news) try: res = await conn.execute(Templ) except psycopg2.ProgrammingError: ERROR_MSG("Unable to perform query %s" % Templ) raise await conn.execute(""" COMMIT; """) for r in res: future.set_result(r[0])
async def handle_message(self, data): """ Обработка входящего сообщения """ try: message_proxy = BinarySerialization(data).proxy() except Exception as e: ERROR_MSG("Unable to make proxy for data %s: %s" % (data, e)) return try: message_type = message_proxy >> int message_data = message_proxy >> bytes except: print(message_proxy) if message_type == ConnectionMessageTypes.rmi_call: proxy = BinarySerialization(message_data).proxy() entity_id = proxy >> int gen_sig = proxy >> str method_index = proxy >> int future_id = proxy >> int access_token = proxy >> str params = proxy >> bytes if gen_sig == Globals.generator_signature: await EntitiesDispatcher().execute_rmi(self.client_connection, entity_id, method_index, future_id, access_token, params) else: EntitiesDispatcher().remote_response_error(self.client_connection, entity_id, future_id, "Generator signature mismatch") elif message_type == ConnectionMessageTypes.rmi_future: proxy = BinarySerialization(message_data).proxy() entity_id = proxy >> int method_index = proxy >> int future_id = proxy >> int returns = proxy >> bytes if future_id != -1: EntitiesDispatcher().yield_rmi_result(future_id, entity_id, method_index, returns) elif message_type == ConnectionMessageTypes.rmi_error: proxy = BinarySerialization(message_data).proxy() error_source = proxy >> str error_message = proxy >> str future_id = proxy >> int WARN_MSG("Error from %s: %s" % (error_source, error_message)) if future_id != -1: EntitiesDispatcher().yield_rmi_error(future_id) elif message_type == ConnectionMessageTypes.rmi_exception: proxy = BinarySerialization(message_data).proxy() exception_source = proxy >> str exception_class = proxy >> str exception_args = proxy >> str future_id = proxy >> int WARN_MSG("Exception from %s: %s" % (exception_source, exception_class)) if future_id != -1: EntitiesDispatcher().yield_rmi_exception(future_id, exception_class, exception_args) await asyncio.sleep(0.1)
def method_caller(self, method_index, *args, **kwargs): if not self: return \ ERROR_MSG("Call to invalid mailbox %s" % self, depth=1) self.send_method_call(method_index, -1, *args)
async def __ainit__(self): await super().__ainit__() self.started_time = time() args = self.get_args() self.next_service_port = self.child_service_port_start cls_name = self.__class__.__name__ config: AppConfigType = Configuration()[cls_name] if not config: ERROR_MSG("Failed to read config! There is no section for %s" % cls_name) self.config = config Globals.no_logging = self.config.DisableLog # INFO_MSG('~') if self.config.Kind in [AppKind.Single, AppKind.Static]: self.endpoint = self.config.get_endpoint() self.exposed_ip = self.config.get_exposed_ip() # INFO_MSG("Test") if args.service_port is not None: self.endpoint = self.endpoint[0], args.service_port # ERROR_MSG(f"T {args.causer_exposed_ip}") if not args.causer_exposed_ip or args.causer_exposed_ip == '0.0.0.0': try: self.exposed_ip = self.config.get_exposed_ip() except NotImplementedError: self.exposed_ip = '...' serving = not self.config.NoServer self.tcp_server = await create_server(self.endpoint, serving) Globals.disabled_log_categories = ConfigGlobals.DisabledLogs if not self.get_args().silent: INFO_MSG("%s started! with %s" % (cls_name, self.get_args())) INFO_MSG("%s started at %s (%s)" % (cls_name, self.endpoint if serving else "None", self.get_region())) INFO_MSG("Version: %s, generator signature: %s" % (Globals.version, Globals.generator_signature)) INFO_MSG("Description: %s" % self.config.Description) self.processes = dict() self.postfix = self.get_args().postfix if args.causer_exposed_ip and args.causer_exposed_ip != '0.0.0.0': self.exposed_ip = args.causer_exposed_ip INFO_MSG(f"Causer exposed {self.exposed_ip}") await self.start() self.dedicated_servers = list() if args.causer_ip is not None and args.causer_port is not None: INFO_MSG(f"Call to causer: {args.causer_ip}:{args.causer_port}") mbox = await Service.make_mailbox( "base", "Service", (args.causer_ip, args.causer_port)) await mbox.TellPID(os.getpid())
async def async_wakeup_dedicated_server_locally(self, service_path, map_name, base_ip, base_port, ue4_arguments, arguments, keyword_arguments, port, is_python_process=True, index=0, name=None): if len(self.dedicated_servers) >= self.max_dedicated_servers: raise WakeupError( "Unable to wakeup dedicated server. Out of limit") if keyword_arguments is None: keyword_arguments = dict() extented_param = None if ue4_arguments is not None: extented_param = map_name for idx, (key, value) in enumerate(ue4_arguments.items()): extented_param += ("?" if idx == 0 else "&") + "%s=%s" % (key, value) INFO_MSG("Opening '%s' '%s'" % (service_path, extented_param)) access_token = Access().generate(AccessLevel.Internal) custom_local_network_version = Configuration( )['UE4App'].CustomLocalNetworkVersion custom_local_network_version = custom_local_network_version if custom_local_network_version else 0 proc = self.open_process( service_path, is_python_process=is_python_process, service_port=port, causer_exposed_ip=self.exposed_ip, causer_ip=self.endpoint[0], causer_port=self.endpoint[1], base_ip=base_ip, base_port=base_port, postfix=('[%i]' % index) if index else "[0]", local_network_version=custom_local_network_version, arguments=arguments, **keyword_arguments, access_token=access_token, is_child_process=True, extented_param=extented_param) future_data = self.processes[proc.pid] = { 'future': asyncio.Future(loop=asyncio.get_event_loop()), 'endpoint': (self.endpoint[0], port), 'opened_at': time(), 'name': name } try: service_connection = await future_data['future'] self.dedicated_servers.append(service_connection) service_connection.add_lost_callback( lambda conn: self.dedicated_servers.remove(conn)) INFO_MSG("Total dedicated servers %i" % len(self.dedicated_servers)) except Exception as e: ERROR_MSG("Something went wrong in future", e) return None return service_connection, proc
async def CreateClassTable(self, class_name: FString, fields: TMap[FString, FString]): """ Создать таблицу класса @param class_name: имя класса @param fields: поля {имя: тип} """ INFO_MSG(f"Create class table {class_name}, {fields}") # await self.driver.exec_raw(""" DROP TABLE IF EXISTS public.Class_{0}; """.format(class_name)) fields_substitute = str() for field_name, field_typedata in fields.items(): T = self.find_type(field_typedata) pg_spec = T.pg_spec if T else 'INTEGER' default = ConfigurationGenerator().generated_entities_info.get_by_name(class_name).get_property('base', field_name).default fields_substitute += ', "%s" %s DEFAULT %s' % (field_name, pg_spec, pg_str(default)) self.username = None current_data = await self.driver.exec_raw(""" SELECT class_data FROM public.classes WHERE class_name='{0}'; """.format(class_name)) for c in current_data: r = c['class_data'] deleted_columns = list() new_columns = list() changed_columns = list() alter_strings = list() for column_name, column_type in fields.items(): T = self.find_type(column_type) pg_spec = T.pg_spec if T else 'INTEGER' default = ConfigurationGenerator().generated_entities_info.get_by_name(class_name).get_property('base', column_name).default default = pg_str(default) if column_name not in r: new_columns.append((column_name, column_type)) alter_strings.append("ADD COLUMN {0} {1} DEFAULT {2}".format(column_name, pg_spec, default)) elif column_name in r and r[column_name] != column_type: changed_columns.append((column_name, r[column_name], column_type)) alter_strings.append("ALTER COLUMN {0} TYPE {1}, ALTER COLUMN {0} SET DEFAULT {2}".format(column_name, pg_spec, default)) for column_name, column_type in r.items(): if column_name not in fields: alter_strings.append("DROP COLUMN {0}".format(column_name)) deleted_columns.append(column_name) if deleted_columns or changed_columns or new_columns: try: await self.driver.exec_raw(""" ALTER TABLE IF EXISTS "class_{0}" {1}; """.format(class_name, ", ".join(alter_strings))) except Exception as e: ERROR_MSG("An exception occurred, returning...", e) return if deleted_columns: WARN_MSG("Deleted columns in %s %i: [%s]" % (class_name, len(deleted_columns), ", ".join(deleted_columns))) if changed_columns: WARN_MSG("Changed columns in %s %i: [%s]" % (class_name, len(changed_columns), ", ".join(["%s from %s to %s" % c for c in changed_columns]))) if new_columns: INFO_MSG("New columns in %s %i: [%s]" % (class_name, len(new_columns), ", ".join("%s %s" % c for c in new_columns))) INFO_MSG(class_name, fields, fields_substitute) await self.driver.exec_raw(""" INSERT INTO public.classes (class_name, class_data) VALUES ('{0}', '{1}') ON CONFLICT (class_name) DO UPDATE SET class_data = '{1}'; """.format(class_name, json.dumps(fields))) await self.driver.exec_raw(""" CREATE TABLE IF NOT EXISTS "class_{0}" ( rec_id SERIAL PRIMARY KEY NOT NULL, db_id SERIAL {1} ); CREATE UNIQUE INDEX IF NOT EXISTS "class_{0}_rec_id_uindex" ON "class_{0}" (rec_id); """.format(class_name, fields_substitute)) INFO_MSG(fields)
def join(self, ip, port): if not self.session: ERROR_MSG("Session is invalid") self.session.join_session(ip, port)
async def generate_bugreport(username, title, description, image): """ Создаёт автоматический отчёт об ошибке в редмайне с картинкой и текстом @param username: @param title: @param description: @param image: @return: """ redmine_config = ConfigGlobals.Redmine if not redmine_config: ERROR_MSG("redmine bugtracker not configured") return False redmine_host = redmine_config.get("host", None) if not redmine_host: ERROR_MSG("redmine host not configured") return False api_access_key = redmine_config.get("api_access_key", None) # 'd41d39caf172705b8b63b290cec7c577b780cd11' if not api_access_key: ERROR_MSG("redmine api_access_key not configured") return False project_id = redmine_config.get("project_id", 1) async with aiohttp.ClientSession() as session: url = redmine_host + '/uploads.json' headers = {'content-type': 'application/octet-stream', 'X-Redmine-API-Key': api_access_key} token = "" result = await session.post(url, data=image, headers=headers) async with result as resp: try: response_json = await resp.json() INFO_MSG("Getting token") token = response_json.get("upload", {}).get("token", None) except Exception as e: print_exc() url = redmine_host + '/issues.json' data = f""" <?xml version="1.0"?> <issue> <project_id>{project_id}</project_id> <subject>[AUTO] {title}</subject> <description> h2. Баг от пользователя +{username}+ сгенерирован автоматически h1. Проблема: {description} </description> <priority_id>4</priority_id> <category_id>1</category_id> <uploads type="array"> <upload> <token>{token}</token> <filename>image.jpg</filename> <description>Screenshot</description> <content_type>image/jpeg</content_type> </upload> </uploads> </issue> """ headers = {'content-type': 'application/xml', 'X-Redmine-API-Key': api_access_key} result = await session.post(url, data=data, headers=headers) async with result as resp: response_json = await resp.json() issue = response_json['issue'] issue_id = issue['id'] INFO_MSG(f"Bug report generated by {username} at {redmine_host}/issues/{issue_id}") return True