Exemple #1
0
 def url_rewritable(self, task, entry):
     logger.trace('running url_rewritable')
     logger.trace(self.resolves)
     for _, config in self.resolves.get(task.name, {}).items():
         regexp = config['regexp_compiled']
         logger.trace('testing {}', config['regexp'])
         if regexp.search(entry['url']):
             return True
     return False
Exemple #2
0
    def write(self, cert: StoreObject) -> bool:
        """ ACM Store Engine Write the certificate to specified region and account

            :param cert: The StoreObject to persist in AWS ACM Store
            :type cert: Cert
            :return: bool

        """
        logger.trace(
            f'[ACMStoreEngine]:\nPUB  :{cert.public}\nKEY  :REDACTED\nCHAIN:{cert.chain}\n'
        )

        try:
            if len(cert.data['cert_body']['chain']) > 0:
                acm_res = self.__session.client('acm').import_certificate(
                    Certificate=cert.data['cert_body']['public'],
                    PrivateKey=cert.data['cert_body']['private'],
                    CertificateChain=cert.data['cert_body']['chain'],
                    Tags=ast.literal_eval(self._tmpl_tags.render(cert=cert)))
            else:
                acm_res = self.__session.client('acm').import_certificate(
                    Certificate=cert.data['cert_body']['public'],
                    PrivateKey=cert.data['cert_body']['private'],
                    Tags=ast.literal_eval(self._tmpl_tags.render(cert=cert)))

            cert.arn = acm_res.get('CertificateArn')
            logger.trace(f'[ACMStoreEngine]: Certificate uploaded:\n'
                         f'Region: {self.region}\n'
                         f'Account: {self.profile_name}\n'
                         f'CertARN: {cert.arn}')
            return True

        except self.__AwsErrors as e:
            logger.error(
                f'[ACMStoreEngine]: Exception importing certificates to ACM {e}'
            )
            sys.exit(1)
Exemple #3
0
    def search(self, ctext: Any) -> Optional[SearchResult]:
        logger.trace(
            f"""Beginning AuSearch with {"inverted" if self.invert_priority else "normal"} priority"""
        )

        try:
            root = Node.root(self._config(), ctext)
        except DuplicateNode:
            return None

        if type(ctext) == self._config().objs["format"]["out"]:
            check_res = self._config().objs["checker"](ctext)
            if check_res is not None:
                return SearchResult(check_res=check_res, path=[root.level])

        try:
            self.recursive_expand(root)

            while True:
                if self.work.empty():
                    break
                # Get the highest level result
                chunk = self.work.get_work_chunk()
                infos = [i.info for i in chunk]
                # Work through all of this level's results
                while len(chunk) != 0:
                    # if self.disable_priority:
                    #     chunk += self.work.get_work_chunk()
                    #     infos = [i.info for i in chunk]

                    logger.trace(f"{len(infos)} remaining on this level")
                    step_res = cipheycore.ausearch_minimise(infos)
                    edge: Edge = chunk.pop(step_res.index)
                    logger.trace(
                        f"Weight is currently {step_res.weight} "
                        f"when we pick {type(edge.route).__name__.lower()}"
                    )
                    del infos[step_res.index]

                    # Expand the node
                    res = edge.route(edge.source.level.result.value)
                    if res is None:
                        continue
                    for i in res:
                        try:
                            node = Node.cracker(
                                config=self._config(), edge_template=edge, result=i
                            )
                            self.recursive_expand(node)
                        except DuplicateNode:
                            continue

        except AuSearchSuccessful as e:
            logger.debug("AuSearch succeeded")
            return SearchResult(path=e.target.get_path(), check_res=e.info)

        logger.debug("AuSearch failed")
Exemple #4
0
def parse_config_directories(directories, must_exist=False):
    """Parse the config dictionary for common objects.

    Given a `base` entry that corresponds to the absolute path of a directory,
    prepend the `base` to all other relative directory entries.

    If `must_exist=True`, then only update entry if the corresponding
    directory exists on the filesystem.

    .. doctest::

        >>> dirs_config = dict(base='/tmp', foo='bar', baz='bam')
        >>> # If the relative dir doesn't exist but is required, return as is.
        >>> parse_config_directories(dirs_config, must_exist=True)
        {'base': '/tmp', 'foo': 'bar', 'baz': 'bam'}

        >>> # Default is to return anyway.
        >>> parse_config_directories(dirs_config)
        {'base': '/tmp', 'foo': '/tmp/bar', 'baz': '/tmp/bam'}

        >>> # If 'base' is not a valid absolute directory, return all as is.
        >>> dirs_config = dict(base='panoptes', foo='bar', baz='bam')
        >>> parse_config_directories(dirs_config, must_exist=False)
        {'base': 'panoptes', 'foo': 'bar', 'baz': 'bam'}

    Args:
        directories (dict): The dictionary of directory information. Usually comes
            from the "directories" entry in the config.
        must_exist (bool): Only parse directory if it exists on the filesystem,
            default False.

    Returns:
        dict: The same directory but with relative directories resolved.
    """
    resolved_dirs = directories.copy()

    # Try to get the base directory first.
    base_dir = resolved_dirs.get('base', '.')
    if os.path.isdir(base_dir):
        logger.trace(f'Using  base_dir={base_dir!r} for setting config directories')

        # Add the base directory to any relative dir.
        for dir_name, rel_dir in resolved_dirs.items():
            # Only want relative directories.
            if rel_dir.startswith('/') is False:
                abs_dir = os.path.join(base_dir, rel_dir)
                logger.trace(
                    f'base_dir={base_dir!r} rel_dir={rel_dir!r} abs_dir={abs_dir!r}  must_exist={must_exist!r}')

                if must_exist and not os.path.exists(abs_dir):
                    logger.warning(
                        f'must_exist={must_exist!r} but  abs_dir={abs_dir!r} does not exist, skipping')
                else:
                    logger.trace(f'Setting {dir_name} to {abs_dir}')
                    resolved_dirs[dir_name] = abs_dir

    return resolved_dirs
Exemple #5
0
def crack(hashvalue):

    logger.debug(f"Starting to crack hashes")
    result = False
    if len(hashvalue) == 32:
        for api in md5:
            r = api(hashvalue, "md5")
            if result is not None or r is not None:
                logger.trace("MD5 returns True {r}")
                return result
    elif len(hashvalue) == 40:
        for api in sha1:
            r = api(hashvalue, "sha1")
            if result is not None and r is not None:
                logger.trace("sha1 returns true")
                return result
    elif len(hashvalue) == 64:
        for api in sha256:
            r = api(hashvalue, "sha256")
            if result is not None and r is not None:
                logger.trace("sha256 returns true")
                return result
    elif len(hashvalue) == 96:
        for api in sha384:
            r = api(hashvalue, "sha384")
            if result is not None and r is not None:
                logger.trace("sha384 returns true")
                return result
    elif len(hashvalue) == 128:
        for api in sha512:
            r = api(hashvalue, "sha512")
            if result is not None and r is not None:
                logger.trace("sha512 returns true")
                return result

    logger.debug(f"Returning None packet")
    return None
    def authenticate_handler(cls, request: AuthRequest, auth_user: AuthUser):
        # 解析eap报文和eap_peap报文
        raw_eap_messages = EapPacket.merge_eap_message(request['EAP-Message'])
        eap: EapPacket = EapPacket.parse(packet=raw_eap_messages)
        peap: EapPeapPacket = None
        if EapPacket.is_eap_peap(type=eap.type):
            peap = EapPeapPacket.parse(packet=raw_eap_messages)
        log.trace(f'request PEAP: {peap}')

        # 判断新旧会话
        session = None
        if 'State' in request:
            session_id: str = request['State'][0].decode()
            # 2. 从redis获取会话
            session: EapPeapSession = SessionCache.load_and_housekeeping(
                session_id=session_id)  # 旧会话
            if not session:
                # 携带 State 字段表示之前已经认证成功, 现在再申请连入网络
                # 必须是 PEAP-Start 前的 identity 报文, 例如: EAP-Message: ['\x02\x01\x00\r\x01testuser']
                log.debug(f're-auth old session_id: {session_id}')
                assert eap.type == EapPacket.TYPE_EAP_IDENTITY
        session = session or EapPeapSession(
            auth_user=auth_user, session_id=str(
                uuid.uuid4()))  # 每个请求State不重复即可!!

        log.debug(
            f'outer_username: {auth_user.outer_username}, mac: {auth_user.user_mac}.'
            f'previd: {session.prev_id}, recvid: {request.id}.  prev_eapid: {session.prev_eap_id}, recv_eapid: {eap.id}]'
        )

        # 调用对应状态的处理函数
        cls.state_machine(request=request, eap=eap, peap=peap, session=session)
        session.prev_id = request.id
        session.prev_eap_id = eap.id

        # 每次处理回复后, 保存session到Redis
        SessionCache.save(session=session)
Exemple #7
0
    def on_task_filter(self, task, config):

        fields = config['fields']
        action = config['action']
        all_fields = config['all_fields']

        if not task.entries:
            logger.trace(
                'Stopping crossmatch filter because of no entries to check')
            return

        match_entries = aggregate_inputs(task, config['from'])

        # perform action on intersecting entries
        for entry in task.entries:
            for generated_entry in match_entries:
                logger.trace('checking if {} matches {}', entry['title'],
                             generated_entry['title'])
                common = self.entry_intersects(
                    entry,
                    generated_entry,
                    fields,
                    config.get('exact'),
                    config.get('case_sensitive'),
                )
                if common and (not all_fields or len(common) == len(fields)):
                    msg = 'intersects with %s on field(s) %s' % (
                        generated_entry['title'],
                        ', '.join(common),
                    )
                    for key in generated_entry:
                        if key not in entry:
                            entry[key] = generated_entry[key]
                    if action == 'reject':
                        entry.reject(msg)
                    if action == 'accept':
                        entry.accept(msg)
Exemple #8
0
    def attemptCrack(self, ctext: bytes) -> List[CrackResult]:
        logger.debug(f"Trying xorcrypt cipher on {base64.b64encode(ctext)}")

        # Analysis must be done here, where we know the case for the cache
        if self.keysize is not None:
            return self.crackOne(
                ctext,
                self.cache.get_or_update(
                    ctext,
                    f"xorcrypt::{self.keysize}",
                    lambda: cipheycore.analyse_bytes(ctext, self.keysize),
                ),
            )
        else:
            len = self.cache.get_or_update(
                ctext,
                f"xorcrypt::likely_lens",
                lambda: cipheycore.xorcrypt_guess_len(ctext),
            )

            logger.trace(f"Got possible length {len}")

            if len < 2:
                return []

            ret = []
            # Fuzz around
            for i in range(min(len - 2, 2), len + 2):
                ret += self.crackOne(
                    ctext,
                    self.cache.get_or_update(
                        ctext,
                        f"xorcrypt::{len}",
                        lambda: cipheycore.analyse_bytes(ctext, len),
                    ))

            return ret
Exemple #9
0
def fetch_tags_frequencies(number_of_tags: int,
                           base_offset: int = 0) -> List[TagFrequencyDTO]:
    try:
        logger.info(
            f"start fetching tag frequencies: {number_of_tags}, offset: {base_offset}"
        )

        number_of_required_iterations: int = ceil(number_of_tags /
                                                  LIMIT_OF_TAGS)
        most_popular_tags_url = f"{config['NEXT_PUBLIC_BASICS_API']}{urls.MOST_POPULAR_TAGS}"

        tag_frequency_urls = []
        for i in range(number_of_required_iterations):
            url = f"{most_popular_tags_url}?limit={LIMIT_OF_TAGS}&offset={i * LIMIT_OF_TAGS + base_offset}"
            tag_frequency_urls.append(url)

        requests = (grequests.get(tag_frequency_url)
                    for tag_frequency_url in tag_frequency_urls)
        responses = grequests.map(requests,
                                  exception_handler=request_exception_handler)

        aggregated_responses: List[TagFrequencyDTO] = []
        for response in responses:
            aggregated_responses.extend(
                map(
                    lambda tag_frequency_list: TagFrequencyDTO(
                        *tag_frequency_list), response.json()))

        logger.success(
            f"fetching tags frequencies completed: {number_of_tags}")
        logger.trace(f"aggregated responses: {aggregated_responses}")

    except Exception as e:
        logger.error(f"api failed: {e}")
        return []

    return aggregated_responses
Exemple #10
0
    def decode(self, ctext: T) -> Optional[U]:
        """
        Performs Braille decoding
        """
        logger.trace("Attempting Braille")
        ctext_decoded = ""
        braille_matches = 0
        for symbol in self.BRAILLE_DICT_INV.values():
            if symbol in ctext:
                braille_matches += 1
            else:
                continue
        if braille_matches == 0:
            logger.trace("Failed to decode Braille due to invalid characters")
            return None

        for pattern, value in self.BRAILLE_DICT.items():
            ctext = re.sub(pattern, value, ctext)

        wordArr = []
        for word in ctext.split(" "):
            # If two commas are in front of a word, uppercase the word and remove the comma
            if word[:2].find(",,") != -1:
                wordArr.append(word.replace(",,", "").upper())
            else:
                wordArr.append(word)

        result = []
        for word in wordArr:
            # If one comma is in front of a word, capitalize the word and remove the comma
            if word[0].find(",") != -1:
                result.append(word.replace(",", "").capitalize())
            else:
                result.append(word)
        ctext_decoded = " ".join(result)
        logger.debug(f"Braille successful, returning '{ctext_decoded}'")
        return ctext_decoded
Exemple #11
0
async def playerjoin(line, inst):
    newline = line[:-17].split(":")
    player = await db.fetchone(
        f"SELECT * FROM players WHERE steamname = '{cleanstring(newline[1].strip())}'"
    )
    if player:
        if player["homeserver"] != inst:
            xferpointsdata = await db.fetchone(
                f"""SELECT * FROM transferpoints WHERE steamid = '{player["steamid"]}' and server = '{inst}'"""
            )
            if xferpointsdata:
                log.trace(f"xferpointsdata: {xferpointsdata}")
                command = (
                    f'tcsar setarctotal {player["steamid"]} {xferpointsdata["points"]}'
                )
                await asyncserverscriptcmd(inst, command)
        steamid = player["steamid"]
        await db.update(
            f"""UPDATE players SET online = True, refreshsteam = True, lastlogin = '******', lastseen = '{Now()}', refreshauctions = True, server = '{inst}', connects = {player["connects"] + 1} WHERE steamid = '{steamid}'"""
        )
        if Now() - player["lastseen"] > 250:
            log.log(
                "JOIN",
                f'Player [{player["playername"].title()}] joined the cluster on [{inst.title()}] Connections: {player["connects"] + 1}',
            )
            message = f'{player["playername"].title()} has joined the server'
            await asyncserverchat(inst, message)
            await asyncwritechat(
                inst,
                "ALERT",
                f'<<< {player["playername"].title()} has joined the server',
                wcstamp(),
            )
    else:
        log.debug(
            f"player [{cleanstring(newline[1].strip())}] joined that is not found in database"
        )
Exemple #12
0
def main():
    arguments = docopt(__doc__, version=__version__)

    if arguments.get("--verbose", False):
        log.remove()
        log.add(
            sys.stdout,
            format="<level>{level}</level> {message}",
            level="DEBUG",
            colorize=True,
            enqueue=True,
        )

    log.debug("Mutation at {}", PRONOTION)

    log.trace(arguments)

    if arguments["replay"]:
        replay(arguments)
        sys.exit(0)

    if arguments.get("list", False):
        mutation_list()
        sys.exit(0)

    if arguments.get("show", False):
        mutation_show(arguments["MUTATION"])
        sys.exit(0)

    if arguments.get("apply", False):
        mutation_apply(arguments["MUTATION"])
        sys.exit(0)

    # Otherwise run play.
    loop = asyncio.get_event_loop()
    loop.run_until_complete(play(loop, arguments))
    loop.close()
Exemple #13
0
 async def _get(self, path, **kwargs):
     params = kwargs
     url = parse.urljoin(self.url, path)
     log.trace(f'NexusAPI Retreiving URL: {url}')
     try:
         async with self.session.get(url, params=params,
                                     timeout=5) as response:
             log.trace(f'NexusAPI HTTP Response: {response.status}')
             if response.status == 200:
                 respo = await response.json()
                 if len(respo) == 0:
                     return json.loads(json.dumps([{'error': 400}]))
                 else:
                     return respo
             elif response.status == 401:
                 log.warning(
                     f'NexusAPI Failed Request [{responses[response.status]}] api:{self.api_key} {url}'
                 )
                 return json.loads(json.dumps([{'error': response.status}]))
             elif response.status - 400 >= 0 and response.status - 400 < 100:
                 log.debug(
                     f'NexusAPI client error [{response.status}] [{responses[response.status]}] {url}'
                 )
                 return json.loads(json.dumps([{'error': response.status}]))
             elif response.status - 500 >= 0 and response.status - 500 < 100:
                 log.warning(
                     f'NexusAPI server error [{response.status}] [{responses[response.status]}] {url}'
                 )
                 return json.loads(json.dumps([{'error': response.status}]))
             else:
                 log.error(
                     f'NexusAPI UNKNOWN ERROR! [{response.status}] [{responses[response.status]}] {url}'
                 )
                 return json.loads(json.dumps([{'error': response.status}]))
     except asyncio.exceptions.TimeoutError:
         log.error(f'WarcraftLogsAPI Timeout Error!')
         return json.loads(json.dumps([{'error': 'timeout'}]))
Exemple #14
0
    def last_match(
        self, game: str = "aoe2de", steam_id: int = None, profile_id: int = None
    ) -> LastMatchResponse:
        """
        Request the last match the player started playing, this will be the current match if they
        are still in game. Either 'steam_id' or 'profile_id' required.

        Args:
            game (str): The game for which to extract the list of strings. Defaults to 'aoe2de'.
                Possibilities are 'aoe2hd' (Age of Empires 2: HD Edition) and 'aoe2de' (Age of
                Empires 2: Definitive Edition).
            steam_id (int): The player's steamID64 (ex: 76561199003184910).
            profile_id (int): The player's profile ID (ex: 459658).

        Raises:
            Aoe2NetException: if the not one of 'steam_id' or 'profile_id' are provided.

        Returns:
            A LastMatchResponse validated object with the information of the game, including the
            following attributes: 'profile_id', 'steam_id', 'name', 'clan', 'country' and
            'last_match'.
        """
        if not steam_id and not profile_id:
            logger.error("Missing one of 'steam_id', 'profile_id'.")
            raise Aoe2NetException("Either 'steam_id' or 'profile_id' required, please provide one.")

        logger.debug("Preparing parameters for last match query")
        query_params = {"game": game, "steam_id": steam_id, "profile_id": profile_id}

        processed_response = _get_request_response_json(
            session=self.session,
            url=self.LAST_MATCH_ENDPOINT,
            params=query_params,
            timeout=self.timeout,
        )
        logger.trace(f"Validating response from '{self.LAST_MATCH_ENDPOINT}'")
        return LastMatchResponse(**processed_response)
Exemple #15
0
 def run(self):
     logger.info("start to acquire data")
     for row in self.source:
         logger.debug(row)
         try:
             if row.provider is None:
                 row.provider = "user"
             logger.trace(f"{row.provider=}\t{row.record=}")
             self.stats.input += 1
             x = self.process(row)
             if not x:
                 self.stats.filtered += 1
                 continue
             self.stats.processed += 1
             msg = x.json() if isinstance(x, DataModel) else x
             self.sink.write(msg)
             self.stats.output += 1
             if self.side_effect:
                 side_entities = self.side_effect(row, self.sink, x)
                 self.stats.side_entities += side_entities
         except Exception as e:
             self.stats.error += 1
             logger.error(f"Cannot process record : {e}")
     return self
Exemple #16
0
async def asyncarkserverdatafetcher(session):
    instances = await globalvar.getlist("allinstances")
    for inst in instances:
        svrifo = await db.fetchone(
            f"SELECT * from instances WHERE name = '{inst}'")
        try:
            url = f'https://ark-servers.net/api/?object=servers&element=detail&key={svrifo["arkserverkey"]}'
            adata = await asyncfetchurldata(session, url)
        except:
            log.error(f"Error fetching ArkServers data from web: {adata}")
        else:
            log.trace(f"Updated ArkServerNet API information for [{inst}]")
            if adata is not None:
                await db.update(
                    "UPDATE instances SET hostname = '%s', rank = '%s', score = '%s', uptime = '%s', votes = '%s' WHERE name = '%s'"
                    % (
                        adata["hostname"],
                        adata["rank"],
                        adata["score"],
                        adata["uptime"],
                        adata["votes"],
                        inst,
                    ))
        await asyncio.sleep(5)
Exemple #17
0
 def fit(self, x, y):
     for values in product(*self.param_grid.values()):
         params = dict(zip(self.param_grid.keys(), values))
         self.estimator.set_params(**params)
         logger.trace(f'CV started: {params}')
         scores: numpy.ndarray = cross_val_score(self.estimator,
                                                 x,
                                                 y,
                                                 scoring=self.scoring,
                                                 cv=self.cv)
         score: float = scores.mean()
         logger.debug(f'Score: {score:.4f} with {params}.')
         if not self.is_better_score(score, scores):
             continue
         logger.success(f'Found significantly better score: {score:.4f}.')
         self.best_params_ = params
         self.best_score_ = score
         self.best_scores_ = scores
         self.best_confidence_interval_ = stats.t.interval(
             self.alpha,
             len(scores) - 1,
             loc=scores.mean(),
             scale=stats.sem(scores),
         )
Exemple #18
0
def convert_rgb_to_xyz(source_color_rgb: tuple) -> tuple:
    """
    Converts a color from the RGB to the CIE XYZ 1931 colorspace.

    Args:
        source_color_rgb:  a tuple with R, G and B values of the color.

    Returns:
        a tuple with the X, Y, Z values of the color.
    """
    logger.trace("Converting RGB components to XYZ")
    colors = [x / 255.0 for x in source_color_rgb]

    for index in range(3):
        if colors[index] > 0.04045:
            colors[index] = ((colors[index] + 0.055) / 1.055) ** 2.4
        else:
            colors[index] /= 12.92
    colors = [100 * x for x in colors]

    x_val = colors[0] * 0.4124 + colors[1] * 0.3575 + colors[2] * 0.1805
    y_val = colors[0] * 0.2126 + colors[1] * 0.7152 + colors[2] * 0.0722
    z_val = colors[0] * 0.0193 + colors[1] * 0.1192 + colors[2] * 0.9505
    return x_val, y_val, z_val
Exemple #19
0
    def handle(self, data, address):
        log.trace(f'receive bytes: {data}')

        # 收取报文并解析
        try:
            request = AcctRequest(secret=RADIUS_SECRET,
                                  dict=self.dictionary,
                                  packet=data,
                                  socket=self.socket,
                                  address=address)
            log.trace(f'request Radius: {request}')
            acct_user = AcctUser(request=request)
        except KeyError:
            log.warning(f'packet corrupt from {address}')
            return

        try:
            # 验证用户
            verify(request, acct_user)
        except Exception as e:
            log.critical(traceback.format_exc())
            sentry_sdk.capture_exception(e)
        finally:
            Flow.account_response(request=request, acct_user=acct_user)
Exemple #20
0
def place_teams_in_rankings(
    teams_to_place_by_wins: Dict[int, List[str]],
    ranking_dict: Dict[int, List[str]],
    next_rank: int = 1,
) -> None:
    """
    Inserts teams in ranking based on their amount of wins.

    Args:
        teams_to_place_by_wins (Dict[int, List[str]]): dict of teams organized by wins.
        ranking_dict (Dict[int, List[str]]): the rankings in which to place teams.
        next_rank (int): the rank at which to start inserting teams.
    """
    logger.trace("Inserting teams in rankings")
    next_rank = next_rank
    for wins, teams_with_these_wins in sorted(teams_to_place_by_wins.items(),
                                              reverse=True):
        rank = next_rank
        for team in teams_with_these_wins:
            logger.trace(f"Inserting {team} at rank {rank}")
            if not ranking_dict.get(rank):
                ranking_dict[rank]: List[str] = []
                ranking_dict[rank].append(team)
                next_rank += 1
Exemple #21
0
    def match(self, game: str = "aoe2de", uuid: str = None, match_id: int = None) -> MatchLobby:
        """
        Request details about a match. Either 'uuid' or 'match_id' required.

        Args:
            game (str): The game for which to extract the list of strings. Defaults to 'aoe2de'.
                Possibilities are 'aoe2hd' (Age of Empires 2: HD Edition) and 'aoe2de' (Age of
                Empires 2: Definitive Edition).
            uuid (str): match UUID (ex: '66ec2575-5ee4-d241-a1fc-d7ffeffb48b6').
            match_id (int): match ID.

        Raises:
            Aoe2NetException: if the not one of 'uuid' or 'match_id' are provided.

        Returns:
            A MatchLobby validated object with the information of the specific match, including.
        """
        if not uuid and not match_id:
            logger.error("Missing one of 'uuid', 'match_id'.")
            raise Aoe2NetException("Either 'uuid' or 'match_id' required, please provide one.")

        logger.debug("Preparing parameters for single match query")
        query_params = {
            "game": game,
            "uuid": uuid,
            "match_id": match_id,
        }

        processed_response = _get_request_response_json(
            session=self.session,
            url=self.MATCH_ENDPOINT,
            params=query_params,
            timeout=self.timeout,
        )
        logger.trace(f"Validating response from '{self.MATCH_ENDPOINT}'")
        return MatchLobby(**processed_response)
Exemple #22
0
    def lobbies(self, game: str = "aoe2de") -> List[MatchLobby]:
        """
        Request all open lobbies.

        Args:
            game (str): The game for which to extract the list of strings. Defaults to 'aoe2de'.
                Possibilities are 'aoe2hd' (Age of Empires 2: HD Edition) and 'aoe2de' (Age of
                Empires 2: Definitive Edition).

        Returns:
            A list of MatchLobby valideted objects, each one encapsulating the data for a currently
            open lobby.
        """
        logger.debug("Preparing parameters for open lobbies query")
        query_params = {"game": game}

        processed_response = _get_request_response_json(
            session=self.session,
            url=self.LOBBIES_ENDPOINT,
            params=query_params,
            timeout=self.timeout,
        )
        logger.trace(f"Validating response from '{self.LOBBIES_ENDPOINT}'")
        return parse_obj_as(List[MatchLobby], processed_response)
Exemple #23
0
 def status(self) -> dict:
     logger.debug("ask http server status")
     try:
         if 'Content-Type' in self.headers: # workaround unwanted Content-Type
             headers = self.headers.copy()
             del headers['Content-Type']
         else:
             headers = self.headers()
         r = requests.get(self.status_url, headers=headers)
         logger.trace(dump.dump_all(r).decode('utf-8'))
         r.raise_for_status()
         return r.json()
     except requests.exceptions.HTTPError as e:
         logger.error(e)
         orion_status = {}
         orion_status['state'] = 'Down or Unreachable'
         orion_status['exception'] = e
         orion_status['server_message'] = r.text
         return orion_status
     except Exception as e:
         logger.error(e)
         orion_status = {}
         orion_status['state'] = 'Down or Unreachable'
         return orion_status
def _get_request_text_response_decoded(
    session: requests.Session,
    url: str,
    params: Dict[str, Any] = None,
    timeout: Union[float, Tuple[float, float]] = None,
) -> str:
    """
    Helper function to handle a GET request to an endpoint and return the response JSON content
    as a dictionary.

    Args:
        session (requests.Session): Session object to use, for connection pooling and performance.
        url (str): API endpoint to send the request to.
        params (dict): A dictionary of parameters for the GET request.

    Raises:
        Aoe2NetException: if the status code returned is not 200.

    Returns:
        The request's JSON response as a dictionary.
    """
    default_headers = {"content-type": "application/json;charset=UTF-8"}
    logger.debug(f"Sending GET request at '{url}'")
    logger.trace(f"Parameters are: {str(params)}")

    response = session.get(url,
                           params=params,
                           headers=default_headers,
                           timeout=timeout)
    if response.status_code != 200:
        logger.error(
            f"GET request at '{response.url}' returned a {response.status_code} status code"
        )
        raise NightBotException(
            f"Expected status code 200 - got {response.status_code} instead.")
    return response.text
Exemple #25
0
    def leaderboard(leaderboard_response: LeaderBoardResponse) -> pd.DataFrame:
        """
        Convert the result given by a call to AoE2NetAPI().leaderboard to a pandas DataFrame.

        Args:
            leaderboard_response (LeaderBoardResponse): the response directly returned by your AoE2NetAPI
                client.

        Returns:
            A pandas DataFrame from the LeaderBoardResponse, each row being an entry in the leaderboard.
            Top level attributes such as 'start' or 'total' are broadcast to an entire array the size of
            the dataframe, and timestamps are converted to datetime objects.
        """
        if not isinstance(leaderboard_response, LeaderBoardResponse):
            logger.error(
                "Tried to use method with a parameter of type != LeaderBoardResponse"
            )
            raise TypeError(
                "Provided parameter should be an instance of 'LeaderBoardResponse'"
            )

        logger.debug("Converting LeaderBoardResponse leaderboard to DataFrame")
        dframe = pd.DataFrame(leaderboard_response.leaderboard)
        dframe = _export_tuple_elements_to_column_values_format(dframe)

        logger.trace("Inserting LeaderBoardResponse attributes as columns")
        dframe["leaderboard_id"] = leaderboard_response.leaderboard_id
        dframe["start"] = leaderboard_response.start
        dframe["count"] = leaderboard_response.count
        dframe["total"] = leaderboard_response.total

        logger.trace("Converting datetimes")
        dframe["last_match"] = pd.to_datetime(dframe["last_match"], unit="s")
        dframe["last_match_time"] = pd.to_datetime(dframe["last_match_time"],
                                                   unit="s")
        return dframe
Exemple #26
0
def timeseries_array_as_df(body: dict):
    """Parse timeseries-objects as pd.DataFrame."""

    tmp_dfs = []
    for result in body["data"]:
        tmp = pd.read_json(json.dumps(result["timeseries"]))
        tmp = tmp.set_index("timestamp")
        tmp[f"{result['label']}_unit"] = result["unit"]
        tmp.rename(columns={"value": result["label"]}, inplace=True)
        tmp.sort_values("timestamp", axis="index", inplace=True)

        if "datetime" in tmp.columns:
            tmp.drop(columns="datetime", inplace=True)
        logger.trace(f"tmp\n{tmp}")

        tmp_dfs.append(tmp)

    if len(tmp_dfs) > 1:
        df = tmp_dfs[0].join(tmp_dfs[1:], how="outer")
    else:
        df = tmp_dfs[0]

    logger.trace(f"df\n{df}")
    return df
Exemple #27
0
def power_landau_octupoles(madx: Madx,
                           mo_current: float,
                           beam: int,
                           defective_arc: bool = False) -> None:
    """
    Power the Landau octupoles in the (HL)LHC.

    Args:
        madx (cpymad.madx.Madx): an instanciated cpymad Madx object.
        mo_current (float): MO powering in Amps.
        beam (int): beam to use.
        defective_arc: If set to `True`, the KOD in Arc 56 are powered for less Imax.
    """
    try:
        brho = madx.globals.nrj * 1e9 / madx.globals.clight  # clight is MAD-X constant
    except AttributeError as madx_error:
        logger.error(
            "The global MAD-X variable 'NRJ' should have been set in the optics files but is not defined."
        )
        raise EnvironmentError(
            "No 'NRJ' variable found in scripts") from madx_error

    logger.info(
        f"Powering Landau Octupoles, beam {beam} @ {madx.globals.nrj} GeV with {mo_current} A."
    )
    strength = mo_current / madx.globals.Imax_MO * madx.globals.Kmax_MO / brho
    beam = 2 if beam == 4 else beam

    for arc in _all_lhc_arcs(beam):
        for fd in "FD":
            octupole = f"KO{fd}.{arc}"
            logger.trace(f"Powering element '{octupole}' at {strength} Amps")
            madx.globals[octupole] = strength

    if defective_arc and (beam == 1):
        madx.globals["KOD.A56B1"] = strength * 4.65 / 6  # defective MO group
Exemple #28
0
    async def modify_rescue(
            self,
            key: BoardKey,
            impersonation: typing.Optional[Impersonation] = None) -> Rescue:
        """
        Context manager to modify a Rescue

        Args:
            impersonation: User account this modification was issued by
            key ():

        Yields:
            Rescue: rescue to modify based on its `key`
        """
        logger.trace("acquiring modification lock...")
        async with self._modification_lock:
            logger.trace("acquired modification lock.")
            if isinstance(key, Rescue):
                key = key.board_index

            target = self[key]

            # most tracked attributes may be modified in here, so we pop the rescue
            # from tracking and append it after

            del self[key]

            self._modification_lock.release()
            try:
                # Yield so the caller can modify the rescue
                yield target

            finally:
                # we need to be sure to re-append the rescue upon completion
                # (so errors don't drop cases)
                await self.append(target)
                # append will reacquire the lock, so don;t reacquire it ourselves (damn no rlocks),
                # but the context manger is gunna freak out if we don't re-acquire it though.
                await self._modification_lock.acquire()
            # If we are in online mode, emit update event to API.
            if self.online:
                logger.trace("updating API...")
                await self._handler.update_rescue(target,
                                                  impersonating=impersonation)

        logger.trace("released modification lock.")
Exemple #29
0
    def new(self, id_list: List[str]) -> int:
        """Captures ID of new torrent.

        Returns:
            An integer key that can be used to retrieve the torrent's ID
            (by indexing into `self.ids`).
        """
        log.trace("id_list = {}", id_list)
        with self.lock:
            self.next_key += 1
            self.active_torrents += 1

            for ID in id_list:
                if ID in self.ids.values():
                    continue

                self.ids[self.next_key] = ID
                break
            else:
                raise RuntimeError(
                    "Something has gone wrong. "
                    "All Magnet IDs appear to have been used already.")

            return self.next_key
Exemple #30
0
async def startup_event():
    """
    Startup events for application
    """
    try:
        await database.connect()
        logger.info("Connecting to database")

    except Exception as e:
        logger.info(f"Error: {e}")
        logger.trace(f"tracing: {e}")

    # initiate log with statement
    if RELEASE_ENV.lower() == "dev":
        logger.debug("Initiating logging for API")
        logger.info(f"API initiated Release_ENV: {RELEASE_ENV}")

        if CREATE_SAMPLE_DATA == "True":
            create_data()
            logger.info("Create Data")
    else:
        logger.info(f"API initiated Release_ENV: {RELEASE_ENV}")

    if CREATE_SAMPLE_DATA == "True":
        create_data()

    if HTTPS_ON == "True":
        app.add_middleware(HTTPSRedirectMiddleware)
        logger.warning(
            f"HTTPS is set to {HTTPS_ON} and will required HTTPS connections")
    if ADD_DEFAULT_GROUP == "True":
        logger.warning("Adding Default group")
        await add_default_group(add_default=ADD_DEFAULT_GROUP)

    app.add_route("/api/health/metrics", handle_metrics)
    logger.info("prometheus route added")