示例#1
0
def handle_event(event: dict, channel: str, channel_id: str, message: str,
                 sc: SlackClient, logger: logging.Logger) -> None:
    pretty_event = pformat(event)
    logger.debug(f"Event received:\n{pretty_event}")

    subtype = event.get('subtype')
    user = event.get('user')

    if subtype in ('group_join', 'channel_join') and user:

        # We will use the event's channel ID to send a response and refer to
        # users by their display_name in accordance with new guidelines.
        # https://api.slack.com/changelog/2017-09-the-one-about-usernames
        event_channel_id = event.get('channel')
        user_profile = event.get('user_profile')
        username = user_profile.get('display_name')
        user_mention = f"<@{user}>"
        message = message.replace('{user}', user_mention)

        if event_channel_id == channel_id:
            try:
                sc.rtm_send_message(event_channel_id, message)
                logger.info(f"Welcomed {username} to #{channel}")
            except AttributeError:
                logger.error(f"Couldn't send message to #{channel}")
示例#2
0
def is_satisfied(requirement: Requirement, logger: logging.Logger) -> bool:
    try:
        requirement.check()
        logger.debug("Requirement '%s' satisfied", requirement.description)
        return True
    except Exception as e:
        logger.error("Requirement '%s' not satisfied: %s", requirement.description, e)
        return False
示例#3
0
 def __init__(self, name: str, logger: Logger, ttl: int, config: IdPConfig, lock = None):
     self.logger = logger
     self._cache: ExpiringCache
     if (config.redis_sentinel_hosts or config.redis_host) and config.session_app_key:
         self._cache = ExpiringCacheCommonSession(name, logger, ttl, config, secret=config.session_app_key)
     else:
         # This is used in tests
         self._cache = ExpiringCacheMem(name, logger, ttl, lock)
     logger.debug('Set up IDP ticket cache {!s}'.format(self._cache))
示例#4
0
class CAENRFIDEventArgs:
	'''This class defines the CAENRFID event arguments.'''
	def __init__(self):
		self._log = Logger("CAENRFIDEventArgs")
		self._log.debug( "Class %s created", self.__class__.__name__ )		

	def getData(self):
		'''Returns the event object value.'''
		raise Exception("Not implemented yet!")
示例#5
0
文件: func_tests.py 项目: 5nizza/aisy
def run_tests(
    test_files,
    run_tool: "func(file, result_file)->(rc, out, err)",
    check_answer: "func(test_file, result_file, rc, out, err)->(rc, out, err)",
    stop_on_error,
    logger: Logger,
    output_folder=None,
):
    """
    :param output_folder: if not None, intermediate results are saved there.
                          Files in that folder will be overwritten.
    """

    if output_folder:
        output_dir = output_folder
        makedirs(output_dir, exist_ok=True)
    else:
        output_dir = get_tmp_dir_name()

    logger.info("using " + output_dir + " as the temporal folder")

    failed_tests = list()
    for test in test_files:
        logger.info("testing {test}..".format(test=test))

        log_stream = open(_generate_name(output_dir, test) + ".log", "w")

        result_file = _generate_name(output_dir, test) + ".model"
        r_rc, r_out, r_err = run_tool(test, result_file)

        logger.debug(rc_out_err_to_str(r_rc, r_out, r_err))
        print(rc_out_err_to_str(r_rc, r_out, r_err), file=log_stream)

        c_rc, c_out, c_err = check_answer(test, result_file, r_rc, r_out, r_err)
        logger.debug(rc_out_err_to_str(c_rc, c_out, c_err))
        print(rc_out_err_to_str(c_rc, c_out, c_err), file=log_stream)

        if c_rc != 0:
            logger.info("    FAILED")
            failed_tests.append(test)
            if stop_on_error:
                break

    if failed_tests:
        logger.info(
            "The following tests failed: %s \n%s",
            "".join("\n    " + t for t in failed_tests),
            "See logs in " + output_dir,
        )
    else:
        logger.info("ALL TESTS PASSED")

    if not output_folder and not failed_tests:
        shutil.rmtree(output_dir)

    return not failed_tests
示例#6
0
def delete_cookie(name: str, logger: logging.Logger, config: IdPConfig) -> None:
    """
    Ask browser to delete a cookie.

    :param name: cookie name as string
    :param logger: logging instance
    :param config: IdPConfig instance
    """
    logger.debug("Delete cookie: {!s}".format(name))
    return set_cookie(name, '/', logger, config, value='')
def readUrlsFromFile():
    global urlFile
    global urls
    global logger    
    for line in open(urlFile, 'r').readlines():
        line = line.rstrip("\r\n")
        Logger.debug(logger, "Loading URL %s from %s" % (line, urlFile))
        urls.append(line)
    if (len(urls) < 1):
        print "No urls were able to be loaded from %s, exiting!" % urlFile
        exit(1)
示例#8
0
class CAENRFIDTag:
	'''This class is used to define objects representing the tags. These objects are used as return value for the inventory methods and as arguments for many tag access methods.'''
	def __init__(self):
		self._log = Logger("CAENRFIDTag")
		self._log.debug( "Class %s created", self.__class__.__name__ )

	class MemBanks:
		self.RESERVED	=	0	#Indicates the reserved bank
		self.EPC		=	1	#Indicates the EPC bank
		self.TID		=	2	#Indicates the TID bank
		self.USER		=	3	#Indicates the USER bank

	def GetId(self):
		'''Returns the tag's ID (the EPC code in Gen2 tags).'''
		pass

	def GetLength(self):
		'''Returns the tag's ID length.'''
		pass

	def GetPC(self):
		'''Returns the tag's PC code'''
		pass

	def GetReadPoint(self):
		'''Returns the read point that has detected the tag.'''
		pass

	def GetRSSI(self):
		'''Returns the RSSI value measured for the tag.'''
		pass

	def GetSource(self):
		'''Returns the name of the logical source that has detected the tag.'''
		pass

	def GetTID(self):
		'''Returns the tag's TID (valid only for EPC Class 1 Gen 2 tags).'''
		pass

	def GetTimeStamp(self):
		'''Gets the Tag's TimeStamp.'''
		pass

	def GetType(self):
		'''Returns the air protocol of the tag.'''
		pass

	def GetXPC(self):
		'''Returns the tag’s XPC words.'''
		pass
示例#9
0
def run(sc: SlackClient, channel: str, message: str, retries: int,
        logger: logging.Logger) -> None:
    if sc.rtm_connect():
        logger.info("Connected to Slack")

        channel_id = find_channel_id(channel, sc)
        logger.debug(f"Found channel ID {channel_id} for #{channel}")

        logger.info(f"Listening for joins in #{channel}")

        retry_count = 0
        backoff = 0.5

        while True:
            try:
                # Handle dem events!
                for event in sc.rtm_read():
                    handle_event(event, channel, channel_id, message, sc, logger)

                # Reset exponential backoff retry strategy every time we
                # successfully loop. Failure would have happened in rtm_read()
                retry_count = 0

                time.sleep(0.5)

            # This is necessary to handle an error caused by a bug in Slack's
            # Python client. For more information see
            # https://github.com/slackhq/python-slackclient/issues/127
            #
            # The TimeoutError could be more elegantly resolved by making a PR
            # to the websocket-client library and letting them coerce that
            # exception to a WebSocketTimeoutException.
            except (websocket.WebSocketConnectionClosedException, TimeoutError):
                logger.error("Lost connection to Slack, reconnecting...")
                if not sc.rtm_connect():
                    logger.info("Failed to reconnect to Slack")
                    if retry_count >= retries:
                        sys.exit(bail(
                            'fatal',
                            'red',
                            "Too many failed reconnect attempts, shutting down")
                        )
                    time.sleep((backoff ** 2) / 4)
                else:
                    logger.info("Reconnected to Slack")

                retry_count += 1

    else:
        sys.exit(bail('fatal', 'red', "Couldn't connect to Slack"))
示例#10
0
文件: core.py 项目: neveralso/JStack
def main_loop(judge_logger: logging.Logger):
    """
    A loop to check if there is
    :param logger:
    :return:
    """
    try:
        db_coon = JudgeDBConnection(judge_config)
        while True:
            time.sleep(3)
            db_coon.has_new_run()
            judge_logger.debug('Judge daemon runs for 3s.')
    except Exception as e:
        judge_logger.error(e)
        return
示例#11
0
def read_cookie(name: str, logger: Logger) -> Optional[str]:
    """
    Read a browser cookie.

    :param logger: logging logger
    :returns: string with cookie content, or None
    :rtype: string | None
    """
    cookie = cherrypy.request.cookie
    logger.debug('Reading cookie(s): {}'.format(cookie))
    cookie = cookie.get(name)
    if not cookie:
        logger.debug('No {} cookie'.format(name))
        return None
    return cookie.value
示例#12
0
def main():
    global start
    global logger
    
    for i in range(maxThreadCount):
        Logger.debug(logger, "Starting thread #%d" % i)
        t = ThreadUrl(queue)
        t.setDaemon(True)
        t.start()
              
    #populate queue with data
    for j in range(maxQueryCount):
        Logger.debug(logger, "Populating URL #%d" % j)
        queue.put(urls[randint(0,len(urls)-1)])
           
    start = time.time()
    queue.join()
示例#13
0
文件: util.py 项目: SUNET/eduid-IdP
def get_requested_authn_context(idp: Saml2Server, saml_req: IdP_SAMLRequest, logger: Logger) -> Optional[str]:
    """
    Check if the SP has explicit Authn preferences in the metadata (some SPs are not
    capable of conveying this preference in the RequestedAuthnContext)
    """
    res = saml_req.get_requested_authn_context()

    attributes = saml_req.sp_entity_attributes

    if 'http://www.swamid.se/assurance-requirement' in attributes:
        # XXX don't just pick the first one from the list - choose the most applicable one somehow.
        new_authn = attributes['http://www.swamid.se/assurance-requirement'][0]
        logger.debug(f'Entity {saml_req.sp_entity_id} has AuthnCtx preferences in metadata. '
                     f'Overriding {res} -> {new_authn}')
        res = new_authn

    return res
示例#14
0
    def __init__(self, maker_key: str, event_name: str, logger: logging.Logger):
        """
        :param maker_key: The authenticating maker key
        :param event_name: The name of the event to trigger
        :param logger: Logger for logging purposes
        """
        if logger is None:
            raise Exception("Logger is missing!")
        if not maker_key:
            logger.debug('IFTT: NO maker key provided')
            raise Exception("NO maker key provided!")
        if not event_name:
            logger.debug('IFTT: NO event name provided')
            raise Exception("NO event name provided!")

        self.maker_key = maker_key
        self.event_name = event_name
        self.logger = logger
示例#15
0
def get_idpauthn_cookie(logger: Logger) -> Optional[bytes]:
    """
    Decode information stored in the 'idpauthn' browser cookie.

    The idpauthn cookie holds a value used to lookup `userdata' in context.sso_sessions.

    :param logger: logging logger
    :returns: string with cookie content, or None
    :rtype: string | None
    """
    _authn = read_cookie('idpauthn', logger)
    if _authn:
        try:
            cookie_val = base64.b64decode(_authn)
            logger.debug('idpauthn cookie value={!r}'.format(cookie_val))
            return cookie_val
        except binascii.Error:
            logger.debug('Could not b64 decode idpauthn value: {!r}'.format(_authn))
            raise
    return None
示例#16
0
文件: data.py 项目: morganics/BayesPy
    def coerce_to_numeric(df: pd.DataFrame, logger: logging.Logger, cutoff=0.10, ignore=[]):
        for col in df.columns:
            if col in ignore:
                continue
            values = df[col].dropna().unique()
            ratio = 0
            pre_length = len(values)

            if pre_length > 0:
                new_values = pd.to_numeric(df[col].dropna().unique(), errors='coerce')
                post_length = len(new_values[~np.isnan(new_values)])
                ratio = (pre_length - post_length) / pre_length

            if ratio <= cutoff:
                logger.debug("Converting column {} to numeric (ratio: {})".format(col, ratio))
                df[col] = pd.to_numeric(df[col], errors='coerce')
            else:
                logger.debug("Not converting column {} (ratio: {})".format(col, ratio))

        return df
示例#17
0
    def save_state(self, logger: logging.Logger) -> None:
        # If Redis is enabled, store the current state
        if self._redis_enabled:
            logger.debug(
                'Saving %s state: _referendum_count=%s, '
                '_public_prop_count=%s, _council_prop_count=%s, '
                '_validator_set_size=%s',
                self.name, self._referendum_count, self._public_prop_count,
                self._council_prop_count, self._validator_set_size)

            # Set values
            self._redis.hset_multiple(self._redis_hash, {
                Keys.get_blockchain_referendum_count(self.name):
                    self._referendum_count,
                Keys.get_blockchain_public_prop_count(self.name):
                    self._public_prop_count,
                Keys.get_blockchain_council_prop_count(self.name):
                    self._council_prop_count,
                Keys.get_blockchain_validator_set_size(self.name):
                    self._validator_set_size
            })
示例#18
0
def load_config_file(path: str, logger: logging.Logger) -> configobj.ConfigObj:
    if path and not os.path.exists(args.config):
        logger.error(
            "Config file '{}' does not exist. Falling back to "
            "default.".format(args.config)
        )
        args.config = "tmp.config"

    logger.debug("Loading config file.")

    try:
        path = os.path.join(util.get_script_path(), "configspec.config")
        config = configobj.ConfigObj(args.config, configspec=path)
    except Exception as e:
        msg = f"{e} error occurred during reading of config file. Aborting now."
        logger.critical(msg)
        raise ValueError

    config = validate_config_file(config, logger)

    return config
示例#19
0
def write_message(logger: logging.Logger, fhandle: BinaryIO,
                  message: str) -> None:
    """Write message into file handle.

    Sets up SIGALRM and raises `ValueError` if alarm is due.
    """
    signal.signal(signal.SIGALRM, signal_handler)
    signal.alarm(5)
    try:
        fhandle_stat = os.fstat(fhandle.fileno())
        is_fifo = stat.S_ISFIFO(fhandle_stat.st_mode)
        if not is_fifo:
            raise ValueError('fhandle is expected to be a FIFO pipe')

        logger.debug('Will write %s', repr(message))
        fhandle.write(message.encode('utf-8'))
        signal.alarm(0)
    except Exception as exception:
        raise exception
    finally:
        signal.alarm(0)
示例#20
0
def do_command(cmd: str,
               cwd: str = None,
               logger: logging.Logger = None) -> str:
    """
    Spawn process, print stdout/stderr to console.
    Throws exception on non-zero returncode.
    """
    if logger:
        logger.debug('cmd: %s', cmd)
    proc = subprocess.Popen(cmd,
                            cwd=cwd,
                            stdout=subprocess.PIPE,
                            stderr=subprocess.PIPE)
    stdout, stderr = proc.communicate()
    if proc.returncode:
        if stderr:
            msg = 'ERROR\n {} '.format(stderr.decode('utf-8').strip())
        raise Exception(msg)
    if stdout:
        return stdout.decode('utf-8').strip()
    return None
示例#21
0
def read_chars_groups(chars_groups_file: str,
                      logger: logging.Logger = None) -> (dict, int):
    if logger is None:
        logger = logging.getLogger(__name__)

    logger.debug('Reading the char groups: "%s"' % chars_groups_file)

    with open(chars_groups_file, mode='r', encoding='utf8') as f:
        chars_groups = f.readlines()
        chars_groups = [x.strip('\n') for x in chars_groups]
        char_dim = len(chars_groups) + 1  # plus one for unknown characters
        chars_dict = {'UNK': 0}
        for i, chars_group in enumerate(chars_groups):
            for char in chars_group:
                if char in chars_dict:
                    raise ValueError('Duplicated character')
                chars_dict[char] = i + 1

        logger.debug('%d char groups were read' % len(chars_groups))

    return chars_dict, char_dim
示例#22
0
def format_xml(input_string: str, logger: logging.Logger) -> str:
    """Format xml

    Args:
        input_string: xml to format
        logger: logger instance

    Returns:
        formatted xml
    """
    logger.debug('before:\n{}'.format(input_string))

    # https://stackoverflow.com/questions/14479656/empty-lines-while-using-minidom-toprettyxml
    parsed = xml.dom.minidom.parseString(input_string)
    result_with_empty_lines = parsed.toprettyxml(indent=' '*4)
    output = '\n'.join(
        [line for line in result_with_empty_lines.split('\n') if line.strip()])

    logger.debug('after:\n{}'.format(output))

    return output
示例#23
0
def _get_sensors(log: logging.Logger) -> List[TempSensor]:
    """
    Function to return the sensors. In case they get disconnected, there has to be a
    way to re-create the sensor objects. The first sensor in the list will be the
    left side.

    Returns
    -------
    list:
        List of temperature sensors
    """

    sensors = []
    sides = ["left", "right"]
    log.debug("Fetching sensors")
    for index, sensor in enumerate(SENSORS):
        print(f"Init: {sides[index]} Pin: {sensor}")
        sensors.append(TempSensor(sensor, sides[index]))

    log.debug(f"Fetched sensors {sensors}")
    return sensors
示例#24
0
 def test_debug(self):
     logger = Logger(__name__)
     logger.debug = mock.MagicMock()
     logging.debug(logger, 'hi', {'a': 'b'})
     logger.debug.assert_called_with(
         'hi\n%s',
         '\n`a`: b',
         exc_info=False,
         extra={'data': {
             'a': 'b'
         }},
     )
示例#25
0
    def save_state(self, logger: logging.Logger) -> None:
        # If Redis is enabled, store the current state
        if self._redis_enabled:
            logger.debug(
                'Saving %s state: _referendum_count=%s, '
                '_public_prop_count=%s, _council_prop_count=%s, '
                '_validator_set_size=%s', self.name, self._referendum_count,
                self._public_prop_count, self._council_prop_count,
                self._validator_set_size)

            # Set values
            self._redis.set_multiple({
                self._redis_prefix + '_referendum_count':
                self._referendum_count,
                self._redis_prefix + '_public_prop_count':
                self._public_prop_count,
                self._redis_prefix + '_council_prop_count':
                self._council_prop_count,
                self._redis_prefix + '_validator_set_size':
                self._validator_set_size
            })
示例#26
0
def year_span(target_year: int,
              base_year: int,
              yr_span: int,
              hdr_span: int,
              logger: lg.Logger = None) -> int:
    """
    Calculate which row to update, factoring in the header row placed every $hdr_span years.
    :param   target_year: year to calculate for
    :param   base_year: starting year in the sheet
    :param   yr_span: number of rows between equivalent positions in adjacent years, not including header rows
    :param   hdr_span: number of rows between header rows
    :param   logger: optional
    :return  span as int
    """
    if logger:
        logger.debug(
            F"target year = {target_year}; base year = {base_year}; year span = {yr_span}; header span = {hdr_span}"
        )
    year_diff = target_year - base_year
    hdr_adjustment = 0 if hdr_span <= 0 else (year_diff // hdr_span)
    return (year_diff * yr_span) + hdr_adjustment
示例#27
0
def get_idpauthn_cookie(logger: Logger) -> Optional[bytes]:
    """
    Decode information stored in the 'idpauthn' browser cookie.

    The idpauthn cookie holds a value used to lookup `userdata' in context.sso_sessions.

    :param logger: logging logger
    :returns: string with cookie content, or None
    :rtype: string | None
    """
    _authn = read_cookie('idpauthn', logger)
    if _authn:
        try:
            cookie_val = base64.b64decode(_authn)
            logger.debug('idpauthn cookie value={!r}'.format(cookie_val))
            return cookie_val
        except binascii.Error:
            logger.debug(
                'Could not b64 decode idpauthn value: {!r}'.format(_authn))
            raise
    return None
示例#28
0
def log_scaffold_stats(data: MoleculeDataset,
                       index_sets: List[Set[int]],
                       num_scaffolds: int = 10,
                       num_labels: int = 20,
                       logger: logging.Logger = None) -> List[Tuple[List[float], List[int]]]:
    # print some statistics about scaffolds
    target_avgs = []
    counts = []
    for index_set in index_sets:
        data_set = [data[i] for i in index_set]
        targets = [d.targets for d in data_set]
        targets = np.array(targets, dtype=np.float)
        target_avgs.append(np.nanmean(targets, axis=0))
        counts.append(np.count_nonzero(~np.isnan(targets), axis=0))
    stats = [(target_avgs[i][:num_labels], counts[i][:num_labels]) for i in range(min(num_scaffolds, len(target_avgs)))]

    if logger is not None:
        logger.debug('Label averages per scaffold, in decreasing order of scaffold frequency,'
                     f'capped at {num_scaffolds} scaffolds and {num_labels} labels: {stats}')

    return stats
示例#29
0
def start_node_monitor(node_monitor: NodeMonitor, monitor_period: int,
                       logger: logging.Logger):
    # Start
    while True:
        # Read node data
        try:
            logger.debug('Reading %s.', node_monitor.node)
            node_monitor.monitor()
            logger.debug('Done reading %s.', node_monitor.node)
        except ReqConnectionError:
            node_monitor.node.set_as_down(node_monitor.channels, logger)
        except ReadTimeout:
            node_monitor.node.set_as_down(node_monitor.channels, logger)
        except (IncompleteRead, ChunkedEncodingError, ProtocolError) as e:
            logger.error(
                'Error when reading data from %s: %s. '
                'Alerter will continue running normally.', node_monitor.node,
                e)
        except Exception as e:
            logger.exception(e)
            raise e

        # Save all state
        node_monitor.save_state()
        node_monitor.node.save_state(logger)

        # Sleep
        logger.debug('Sleeping for %s seconds.', monitor_period)
        time.sleep(monitor_period)
示例#30
0
async def is_node_ready(
    task: RowProxy,
    graph: nx.DiGraph,
    db_connection: SAConnection,
    _logger: logging.Logger,
) -> bool:
    query = comp_tasks.select().where(
        and_(
            comp_tasks.c.node_id.in_(list(graph.predecessors(task.node_id))),
            comp_tasks.c.project_id == task.project_id,
        ))
    result = await db_connection.execute(query)
    tasks = await result.fetchall()

    _logger.debug("TASK %s ready? Checking ..", task.internal_id)
    for dep_task in tasks:
        job_id = dep_task.job_id
        if not job_id:
            return False
        _logger.debug(
            "TASK %s DEPENDS ON %s with stat %s",
            task.internal_id,
            dep_task.internal_id,
            dep_task.state,
        )
        if not dep_task.state == StateType.SUCCESS:
            return False
    _logger.debug("TASK %s is ready", task.internal_id)
    return True
示例#31
0
def SimpleSubprocess(Name: str,
                     Command: str,
                     Logger: logging.Logger,
                     CheckPipefail: bool = False,
                     Env: Union[str, None] = None,
                     AllowedCodes: list = []) -> None:

    # Timestamp
    StartTime = time.time()

    # Compose command
    Command = (f"source {Env}; " if Env is not None else f"") + (
        f"set -o pipefail; " if CheckPipefail else f"") + Command
    Logger.debug(Command)

    # Shell
    Shell = subprocess.Popen(Command,
                             shell=True,
                             executable="/bin/bash",
                             stdout=subprocess.PIPE,
                             stderr=subprocess.PIPE)
    Stdout, Stderr = Shell.communicate()
    if Shell.returncode != 0 and Shell.returncode not in AllowedCodes:
        ErrorMessages = [
            f"Command '{Name}' has returned non-zero exit code [{str(Shell.returncode)}]",
            f"Command: {Command}", f"Details: {Stderr.decode('utf-8')}"
        ]
        for line in ErrorMessages:
            Logger.error(line)
        raise OSError(f"{ErrorMessages[0]}\n{ErrorMessages[2]}")
    if Shell.returncode in AllowedCodes:
        Logger.warning(
            f"Command '{Name}' has returned ALLOWED non-zero exit code [{str(Shell.returncode)}]"
        )

    # Timestamp
    Logger.info(f"{Name} - %s" % (SecToTime(time.time() - StartTime)))

    # Return
    return Stdout[:-1]
示例#32
0
def communicate(tensors: List[torch.Tensor],
                communication_op: Any,
                logger: logging.Logger = None) -> None:
    """
    Communicate a list of tensors
    Args:
        tensors (Iterable[Tensor]): list of tensors
        communication_op: a method or partial object which takes a tensor as
            input and communicates it. It can be a partial object around
            something like torch.distributed.all_reduce
    """
    tensors_by_dtype = group_by_dtype(tensors)
    for tensors_with_same_dtype in tensors_by_dtype.values():
        flat_tensor = flatten_tensors(tensors_with_same_dtype)
        if logger is not None:
            logger.debug("Flatten completed")
        communication_op(tensor=flat_tensor)
        if logger is not None:
            logger.debug("Commmunication completed")
        with torch.no_grad():
            for f, t in zip(
                    unflatten_tensors(flat_tensor, tensors_with_same_dtype),
                    tensors_with_same_dtype,
            ):
                t.copy_(f)
        if logger is not None:
            logger.debug("Unflatten completed")
示例#33
0
def start_network_monitor(network_monitor: NetworkMonitor, monitor_period: int,
                          logger: logging.Logger):
    # Start
    while True:
        # Read network data
        try:
            logger.debug('Reading network data.')
            network_monitor.monitor()
            logger.debug('Done reading network data.')
        except NoLiveFullNodeException:
            network_monitor.channels.alert_major(
                CouldNotFindLiveFullNodeAlert(network_monitor.monitor_name))
        except (ReqConnectionError, ReadTimeout):
            network_monitor.last_full_node_used.set_as_down(
                network_monitor.channels, logger)
        except (IncompleteRead, ChunkedEncodingError, ProtocolError) as e:
            network_monitor.channels.alert_error(
                ErrorWhenReadingDataFromNode(
                    network_monitor.last_full_node_used))
            logger.error('Error when reading data from %s: %s',
                         network_monitor.last_full_node_used, e)
        except Exception as e:
            logger.exception(e)
            raise e

        # Save all state
        network_monitor.save_state()

        # Sleep
        if not network_monitor.is_syncing():
            logger.debug('Sleeping for %s seconds.', monitor_period)
            time.sleep(monitor_period)
示例#34
0
async def ensure(
    log: logging.Logger, client: aiodocker.docker.Docker,
    config: DockerVolumeConfig
) -> AsyncGenerator[aiodocker.volumes.DockerVolume, None]:
    "Creates or returns an existing volume"
    volume: aiodocker.volumes.DockerVolume
    volume = aiodocker.volumes.DockerVolume(docker=client, name=config.name)
    try:
        volume_details = await volume.show()
        log.debug(f"found volume {config.name}")
    except aiodocker.exceptions.DockerError as e:
        log.debug(f"error finding docker volume {config.name}: {e}")
        if e.status == 404:
            volume = await create(client,
                                  name=config.name,
                                  labels=config.labels,
                                  driver=config.driver)
            log.debug(f"create volume {config.name} response: {volume}")
            log.info(f"created volume {config.name}")

    try:
        yield volume
    finally:
        if config.delete:
            await delete(client, volume)
            log.info(f"deleted volume {config.name}")
        else:
            log.info(f"did not delete volume {config.name}")
示例#35
0
def _get_user_info_from_jwt(logger: logging.Logger) -> Tuple[Any, Any, Optional[Any]]:
    logger.debug("headers: %s", json.dumps(dict(request.headers)))
    encoded_jwt = request.headers["x-amzn-oidc-data"]
    logger.debug("encoded_jwt 'x-amzn-oidc-data':\n %s", encoded_jwt)
    jwt_headers = encoded_jwt.split(".")[0]
    decoded_jwt_headers_bytes = base64.b64decode(jwt_headers)
    decoded_jwt_headers = decoded_jwt_headers_bytes.decode("utf-8")
    decoded_json = json.loads(decoded_jwt_headers)
    kid = decoded_json["kid"]
    region = os.environ["AWS_REGION"]
    # Step 2: Get the public key from regional endpoint
    url = "https://public-keys.auth.elb." + region + ".amazonaws.com/" + kid
    req = requests.get(url)
    pub_key = req.text
    # Step 3: Get the payload
    payload = jwt.decode(encoded_jwt, pub_key, algorithms=["ES256"])
    logger.debug("payload:\n %s", payload)

    username = payload["username"]
    if "preferred_username" in payload:
        username = payload["preferred_username"]

    email = payload["email"]

    groups = None
    if "custom:groups" in payload:
        groups = payload["custom:groups"].strip("][").split(", ")

    return email, username, groups
示例#36
0
def login(logger: logging.Logger, app: Flask) -> Any:
    logger.debug("cookies: %s", json.dumps(request.cookies))
    email, username, groups = _get_user_info_from_jwt(logger)

    # If we have groups, then the provider sent them.
    # Match them to the proper user groups
    if groups is not None:
        logger.info("We got groups in the auth payload, we need to align them to teams")
        user_groups = _get_user_groups_from_provider(logger, list(groups))
    else:
        logger.info("No groups in auth payload, we are fetchng the from the Cognito User Pool")
        user_groups = _get_user_groups_from_jwt(logger)

    logger.debug("username: %s, email: %s, groups: %s", username, email, user_groups)
    ready = _is_profile_ready_for_user(logger, username, email)
    logger.debug("user space is READY? %s", ready)

    client_id, cognito_domain, hostname, logout_uri = get_logout_url(logger)
    env_name = os.environ["ENV_NAME"]
    return render_template(
        "index.html",
        title="login",
        username=username,
        hostname=hostname,
        logout_uri=logout_uri,
        client_id=client_id,
        cognito_domain=cognito_domain,
        teams=user_groups,
        env_name=env_name,
    )
示例#37
0
def voting(logger: logging.Logger):
    connection = sql.createDBConnection(sql.DB_FILE)

    time.sleep(30)  # staggers voting actions and persistence actions

    while True:
        try:
            postIDList = sql.fetchPostsNeedingVotingFromDB(connection)
            for postID in postIDList:
                try:
                    logger.debug(postID)
                    submission = reddit.submission(id=postID)
                    if submission is not None:
                        if sql.isVoteable(connection, submission.id):
                            votingAction(submission, connection, logger)
                        sql.removePostFromDB(connection, submission)
                    logger.debug(f"Processed voting on {submission}")
                    time.sleep(
                        5
                    )  # Throttles the bot some to avoid hitting the rate limit
                except Exception as innerException:
                    logger.warning(
                        f"There was an issue processing voting for post {postID}"
                    )
                    logger.warning(
                        "The post was removed from the database and will not be processed"
                    )
                    logger.warning("Printing stack strace...")
                    logger.warning(innerException)
                    sql.removePostFromDB(connection, submission)

            time.sleep(
                300
            )  # No need to query the DB constantly doing voting. 300s = 5m
        except Exception as outerException:
            logger.warning(
                "The voting thread raised an exception. It will try to continue."
            )
            logger.warning("Printing stack strace...")
            logger.warning(outerException)
def copy_unique_filepairs(difference_matrix, src_path, list_land, list_port,
                          img_path,
                          logger: logging.Logger):
    logger.info('Copying new unique files...')

    cnt = 0
    while True:
        n = len(list_land)
        m = len(list_port)
        if not n:
            break

        arg_land, arg_port = np.unravel_index(np.argmin(difference_matrix), (n, m))

        logger.debug('  {0}'.format(list_land[arg_land]))
        logger.debug('  {0}'.format(list_port[arg_land]))
        logger.debug('    {0}'.format(list_land[arg_land] + 'XXXX' +
                                      list_port[arg_port] + '.jpg'))

        shutil.copyfile(src_path + list_land[arg_land],
                        img_path +
                        list_land[arg_land] + '-land-' +
                        list_port[arg_port] + '.jpg')
        shutil.copyfile(src_path + list_port[arg_port],
                        img_path +
                        list_land[arg_land] + '-port-' +
                        list_port[arg_port] + '.jpg')
        cnt += 1

        del list_land[arg_land], list_port[arg_port]
        difference_matrix = np.delete(difference_matrix, arg_land, 0)
        difference_matrix = np.delete(difference_matrix, arg_port, 1)

    logger.debug('{0} files copied'.format(cnt))
def calculate_unperturbated_empiricals(
    default_vs30,
    extended_period,
    fsf,
    im_config,
    n_processes,
    sim_root,
    empirical_im_logger: Logger = get_basic_logger(),
):
    events = load_fault_selection_file(fsf)
    empirical_im_logger.debug(
        f"Loaded {len(events)} events from the fault selection file"
    )
    events = [
        name if count == 1 else get_realisation_name(name, 1)
        for name, count in events.items()
    ]
    tasks = create_event_tasks(
        events, sim_root, im_config, default_vs30, extended_period, empirical_im_logger
    )

    pool = Pool(min(n_processes, len(tasks)))
    empirical_im_logger.debug(f"Running empirical im calculations")
    pool.starmap(calculate_empirical, tasks)
    empirical_im_logger.debug(f"Empirical ims calculated")
示例#40
0
def download_prices_from_s3(bucket: ServiceResource, dir_prices: Path,
                            remote_dir_prices: Path, missing_rics: List[str],
                            logger: logging.Logger) -> None:

    dir_prices.mkdir(parents=True, exist_ok=True)

    for ric in missing_rics:

        remote_filename = ric2filename(remote_dir_prices, ric, 'csv.gz')

        basename = remote_filename.name
        dest_parent = dir_prices
        dest = dest_parent / Path(basename)

        if dest.is_file():
            logger.debug('skip downloading {}'.format(basename))
        else:
            logger.debug('start downloading {}'.format(basename))
            try:
                bucket.download_file(Key=str(remote_filename),
                                     Filename=str(dest))
            except ClientError as e:
                code = e.response.get('Error', {}).get('Code', '')
                if str(code) == str(HTTPStatus.NOT_FOUND.value):
                    logger.critical('{} is not found'.format(
                        str(remote_filename)))
            logger.debug('end downloading {}'.format(basename))
示例#41
0
def pp_syllogism(logger: logging.Logger, task, response=None):
    logger.debug(pp_statement(task[0]) + '\n' + pp_statement(task[1]))
    if response:
        logger.debug("-----------------")
        for r in response:
            logger.debug(pp_statement(r))
    return None
示例#42
0
def rewrite_imports(
    doc: Tree.Document, zip_paths: Dict[str, str], logger: logging.Logger
) -> List[str]:
    # rewrite doc source_lines, changing import statements to refer to relative path in zip
    source_lines = doc.source_lines.copy()

    for imp in doc.imports:
        lo = imp.pos.line - 1
        hi = imp.pos.end_line
        found = False
        for lineno in range(lo, hi):
            line = source_lines[lineno]
            old_uri = imp.uri
            new_uri = os.path.relpath(
                zip_paths[imp.doc.pos.abspath], os.path.dirname(zip_paths[doc.pos.abspath])
            )
            old_uri_pattern = f'"{old_uri}"'
            if old_uri_pattern in line:
                found = True
            line2 = line.replace(old_uri_pattern, f'"{new_uri}"')
            if line != line2:
                logger.debug(doc.pos.abspath)
                logger.debug("  " + line)
                logger.debug("  => " + line2)
                source_lines[lineno] = line2
        assert found

    return source_lines
示例#43
0
def start_network_monitor(network_monitor: NetworkMonitor, monitor_period: int,
                          logger: logging.Logger):
    # Start
    while True:
        # Read network data
        try:
            logger.debug('Reading network data.')
            network_monitor.monitor()
            logger.debug('Done reading network data.')
        except NoLiveFullNodeException:
            network_monitor.channels.alert_major(
                CouldNotFindLiveFullNodeAlert())
        except (ConnectionError, ReadTimeout) as conn_err:
            network_monitor.last_full_node_used.set_as_down(
                network_monitor.channels, conn_err, logger)
        except (urllib3.exceptions.IncompleteRead,
                http.client.IncompleteRead) as incomplete_read:
            network_monitor.channels.alert_error(
                ErrorWhenReadingDataFromNode(
                    network_monitor.last_full_node_used))
            logger.error('Error when reading data from %s: %s',
                         network_monitor.last_full_node_used, incomplete_read)
        except Exception as e:
            logger.error(e)
            raise e

        # Save all state
        network_monitor.save_state()

        # Sleep
        logger.debug('Sleeping for %s seconds.', monitor_period)
        time.sleep(monitor_period)
示例#44
0
def start_github_monitor(github_monitor: GitHubMonitor, monitor_period: int,
                         logger: logging.Logger):
    # Set up alert limiter
    github_error_alert_limiter = TimedTaskLimiter(
        InternalConf.github_error_interval_seconds)

    # Start
    while True:
        # Read GitHub releases page
        try:
            logger.debug('Reading %s.', github_monitor.releases_page)
            github_monitor.monitor()
            logger.debug('Done reading %s.', github_monitor.releases_page)

            # Save all state
            github_monitor.save_state()

            # Reset alert limiter
            github_error_alert_limiter.reset()
        except (ConnectionError, ReadTimeout) as conn_err:
            if github_error_alert_limiter.can_do_task():
                github_monitor.channels.alert_error(
                    CannotAccessGitHubPageAlert(github_monitor.releases_page))
                github_error_alert_limiter.did_task()
            logger.error('Error occurred when accessing {}: {}.'
                         ''.format(github_monitor.releases_page, conn_err))
        except JSONDecodeError as json_error:
            logger.error(json_error)  # Ignore such errors
        except Exception as e:
            logger.error(e)
            raise e

        # Sleep
        logger.debug('Sleeping for %s seconds.', monitor_period)
        time.sleep(monitor_period)
示例#45
0
def start_node_monitor(node_monitor: NodeMonitor, monitor_period: int,
                       logger: logging.Logger):
    # Start
    while True:
        # Read node data
        try:
            logger.debug('Reading %s.', node_monitor.node)
            node_monitor.monitor()
            logger.debug('Done reading %s.', node_monitor.node)
        except ConnectionError as conn_err:
            node_monitor.node.set_as_down(node_monitor.channels, conn_err,
                                          logger)
        except ReadTimeout as read_timeout:
            node_monitor.node.set_as_down(node_monitor.channels, read_timeout,
                                          logger)
        except (urllib3.exceptions.IncompleteRead,
                http.client.IncompleteRead) as incomplete_read:
            logger.error('Error when reading data from {}: {}. '
                         'Alerter will continue running normally.'
                         ''.format(node_monitor.node, incomplete_read))
        except Exception as e:
            logger.error(e)
            raise e

        # Save all state
        node_monitor.save_state()
        node_monitor.node.save_state(logger)

        # Sleep
        logger.debug('Sleeping for %s seconds.', monitor_period)
        time.sleep(monitor_period)
示例#46
0
def __disable_profiler(
    profiler: Optional[Profile],
    profiling_dir: Optional[str],
    pstat_filename: Optional[str],
    logger: logging.Logger,
):
    """
    Disable given profiler and dump pipelinewise stats into a pStat file
    Args:
        profiler: optional instance of cprofile.Profiler to disable
        profiling_dir: profiling dir where pstat file will be created
        pstat_filename: custom pstats file name, the extension .pstat will be appended to the name
        logger: Logger instance to do some info and debug logging
    """
    if profiler is not None:
        logger.debug('disabling profiler and dumping stats...')

        profiler.disable()

        if not pstat_filename.endswith('.pstat'):
            pstat_filename = f'{pstat_filename}.pstat'

        dump_file = os.path.join(profiling_dir, pstat_filename)

        logger.debug('Attempting to dump profiling stats in file "%s" ...',
                     dump_file)
        profiler.dump_stats(dump_file)
        logger.debug('Profiling stats dump successful')

        logger.info('Profiling stats files are in folder "%s"', profiling_dir)

        profiler.clear()
示例#47
0
def _find_reports(report_type: str, input_file_directory: str, log: logging.Logger) -> list:
    """ Go through the input_file_directory and find all personnel dosimetry reports of the specified type

    :param report_type: 'ORIGINAL' or 'NEW' for separating the files that only contain updates from the reports
                        containing all personnel for that measurement period
    :param input_file_directory: Path to directory containing dose report files to be parsed
    :return: List of file paths to *.xls and *.xlsx files found in input_file_directory
    """
    reports = []

    # List file paths to original or new reports depending on input report_type
    if report_type.upper() == 'ORIGINAL':
        log.debug('Searching for original Landauer reports')
        reports = [os.path.join(input_file_directory, filename) for filename in os.listdir(input_file_directory) if
                   (filename.lower().endswith('.xls') or filename.lower().endswith('.xlsx'))
                   and 'NEW' not in filename.upper()]

    elif report_type.upper() == 'NEW':
        log.debug('Searching for Landauer reports marked as new')
        reports = [os.path.join(input_file_directory, filename) for filename in os.listdir(input_file_directory) if
                   (filename.lower().endswith('.xls') or filename.lower().endswith('.xlsx'))
                   and 'NEW' in filename.upper()]

    log.debug('Finished search for Lnadauer personnel dosimetry reports')
    return reports
示例#48
0
def set_cookie(name: str, path: str, logger: logging.Logger, config: IdPConfig, value: str, b64: bool = True) -> None:
    """
    Ask browser to store a cookie.

    Since eduID.se is HTTPS only, the cookie parameter `Secure' is set.

    :param name: Cookie identifier (string)
    :param path: The path specification for the cookie
    :param logger: logging instance
    :param config: IdPConfig instance
    :param value: The value to assign to the cookie
    """
    cookie = cherrypy.response.cookie
    if b64:
        cookie[name] = b64encode(value)
    else:
        cookie[name] = value
    cookie[name]['path'] = path
    if not config.insecure_cookies:
        cookie[name]['secure'] = True  # ask browser to only send cookie using SSL/TLS
    cookie[name]['httponly'] = True # protect against common XSS vulnerabilities
    logger.debug("Set cookie {!r} : {}".format(name, cookie))
示例#49
0
def setup_localisations(logger: logging.Logger):
    """Setup gettext localisations."""
    import gettext
    import locale
    # Get the 'en_US' style language code
    lang_code = locale.getdefaultlocale()[0]

    # Allow overriding through command line.
    if len(sys.argv) > 1:
        for arg in sys.argv[1:]:
            if arg.casefold().startswith('lang='):
                lang_code = arg[5:]
                break

    # Expands single code to parent categories.
    expanded_langs = gettext._expand_lang(lang_code)

    logger.info('Language: {!r}', lang_code)
    logger.debug('Language codes: {!r}', expanded_langs)

    for lang in expanded_langs:
        try:
            file = open('../i18n/{}.mo'.format(lang), 'rb')
        except FileNotFoundError:
            pass
        else:
            trans = gettext.GNUTranslations(file)
            break
    else:
        # No translations, fallback to English.
        # That's fine if the user's language is actually English.
        if 'en' not in expanded_langs:
            logger.warning(
                "Can't find translation for codes: {!r}!",
                expanded_langs,
            )
        trans = gettext.NullTranslations()
    # Add these functions to builtins, plus _=gettext
    trans.install(['gettext', 'ngettext'])
示例#50
0
def get_debug_logger(name, strm=None):
    """Creates a basic debug log function with prettyprint capabilities.

    A basic logger is created.
    The logger's ``debug`` method is returned.
    The logger itself is returned as ``return.logger``.
    The handler is returned as ``return.handler``.
    A pretty-printing version of the log function is returned as ``return.pp``.

    >>> from sys import stdout
    >>> debug = get_debug_logger('boogie', strm=stdout)
    >>> debug('Git yer gittin it on on and boogie!')
    Git yer gittin it on on and boogie!
    >>> debug.pp(debug.__dict__)  # doctest: +ELLIPSIS
    { 'handler': <logging.StreamHandler object at 0x...>,
      'logger': <logging.Logger object at 0x...>,
      'pp': <function <lambda> at 0x...>}

    Subsequent loggers do not issue duplicate output.
    >>> debug_two = get_debug_logger('boogie', strm=stdout)
    >>> debug('Hit me one time!  OW!')
    Hit me one time!  OW!

    How does that work?
    >>> debug.logger is debug_two.logger
    False

    So logging.Logger(name) doesn't always return the same object.
    """
    from logging import Logger, StreamHandler, DEBUG
    logger = Logger(name)
    debug = lambda *args, **kwargs: logger.debug(*args, **kwargs)
    debug.logger = logger

    handler = StreamHandler(stream=strm)
    logger.addHandler(handler)
    debug.handler = handler

    from pprint import PrettyPrinter
    pformat = PrettyPrinter(indent=2).pformat
    debug.pp = lambda *args, **kwargs: debug(pformat(*args, **kwargs))
    return debug
示例#51
0
def create_html_response(binding: str, http_args: dict, start_response: Callable, logger: Logger) -> bytes:
    """
    Create a HTML response based on parameters compiled by pysaml2 functions
    like apply_binding().

    :param binding: SAML binding
    :param http_args: response data
    :param start_response: WSGI-like start_response function
    :param logger: logging logger

    :return: HTML response
    """
    if binding == BINDING_HTTP_REDIRECT:
        # XXX This URL extraction code is untested in practice, but it appears
        # the should be HTTP headers in http_args['headers']
        urls = [v for (k, v) in http_args['headers'] if k == 'Location']
        logger.debug('Binding {!r} redirecting to {!r}'.format(binding, urls))
        if 'url' in http_args:
            del http_args['headers']  # less debug log below
            logger.debug('XXX there is also a "url" in http_args :\n{!s}'.format(pprint.pformat(http_args)))
            if not urls:
                urls = [http_args.get('url')]
        raise cherrypy.HTTPRedirect(urls)

    # Parse the parts of http_args we know how to parse, and then warn about any remains.
    message = http_args.pop('data')
    status = http_args.pop('status', '200 Ok')
    headers = http_args.pop('headers', [])
    headers_lc = [x[0].lower() for x in headers]
    if 'content-type' not in headers_lc:
        _content_type = http_args.pop('content', 'text/html')
        headers.append(('Content-Type', _content_type))

    if http_args != {}:
        logger.debug('Unknown HTTP args when creating {!r} response :\n{!s}'.format(
            status, pprint.pformat(http_args)))

    start_response(status, headers)
    if not isinstance(message, six.binary_type):
        message = message.encode('utf-8')
    return message
示例#52
0
文件: printer.py 项目: Helgart/raiden
class Printer:
	"""
		Printer utility, can display depending on 4 mods : debug, info, warning and error
		Will be writen again later with a proper logger implementation
		Bear with it for now !
	"""

	__metaclass__ = Singleton

	DEBUG = 0
	INFO = 1
	WARNING = 2
	ERROR = 3

	def __init__(self):
		self.level = self.INFO
		self.logger = None

	def setLogger(self, filepath, level):
		""" Define logger """

		if not os.path.isdir(os.path.dirname(filepath)):
			raise Exception("Unknown directory " + os.path.dirname(filepath))

		## Why ? well ... https://docs.python.org/2/library/logging.html#levels
		logLevel = 10 if not level else int(level) * 10
		handler = FileHandler(filepath)
		formatter = Formatter('%(asctime)s - %(levelname)-8s - %(message)s')
		handler.setFormatter(formatter)
		
		self.logger = Logger('main')
		self.logger.addHandler(handler)
		self.logger.setLevel(logLevel)

	def debug(self, origin, message):
		""" print a debug message """

		if self.logger:
			self.logger.debug(message, {'origin' : origin})

		if self.level > self.DEBUG:
			return

		print '[DEBUG][' + str(datetime.datetime.now()) + '][' + origin + '] ' + message

	def info(self, origin, message):
		""" print an info message """

		if self.logger:
			self.logger.info(message, {'origin' : origin})

		if self.level > self.INFO:
			return

		print Color.INFO + '[INFO][' + str(datetime.datetime.now()) + '][' + origin + '] ' + message + Color.ENDC

	def warning(self, origin, message):
		""" print a warning message """

		if self.logger:
			self.logger.warning(message, {'origin' : origin})

		if self.level > self.WARNING:
			return

		print Color.WARNING + '[WARNING][' + str(datetime.datetime.now()) + '][' + origin + '] ' + message + Color.ENDC

	def error(self, origin, message):
		""" print an error message """

		if self.logger:
			self.logger.error(message, {'origin' : origin})

		if self.level > self.ERROR:
			return

		print Color.FAIL + '[ERROR][' + str(datetime.datetime.now()) + '][' + origin + '] ' + message + Color.ENDC
示例#53
0
def debugError(_logger: logging.Logger, msg: Any) -> None:
    """Log error messages."""
    if pyppeteer.DEBUG:
        _logger.error(msg)
    else:
        _logger.debug(msg)
示例#54
0
	def __init__(self, filename, openflags='wb'):
		self.needs_close = 1
		super(OutputFile, self).__init__(filename, openflags)

if __name__ == '__main__':
	try:
		parser = ArgumentParser()
		parser.add_argument('-d', '--debug', action='store_const', const=DEBUG, default=WARNING, dest='loglevel',
			help='enable debug tracing')
		parser.add_argument('-f', '--file', action='store', default=stdout, type=OutputFile,
			help='the file to which output will be directed')
		group = parser.add_mutually_exclusive_group(required=True)
		group.add_argument(nargs='?', action='store', dest='target', choices=get_targets(),
			help='display the contents of this target', metavar="TARGET")
		group.add_argument('-l', '--list', action='store_const', const=print_targets_list, default=print_target, dest='action',
			help='list available targets')

		namespace = parser.parse_args()

		handler.setLevel(namespace.loglevel)

		logger.debug(namespace)

		namespace.action(namespace)

	finally:
		if (vars().has_key('namespace') and hasattr(namespace, 'file')
		  and hasattr(namespace.file, 'needs_close')):
			logger.debug('closing file {0}'.format(namespace.file))
			namespace.file.close()
示例#55
0
 def _fix_error(self, fsck_dir: Path, log: logging.Logger, error: Error) -> bool:
     log.info(f"Processing error: {error}")
     detail = error.detailed_description()
     if detail:
         log.debug(detail)
     return error.repair(log=log, overlay=self.overlay, fsck_dir=fsck_dir)
示例#56
0
文件: logger.py 项目: HPCL/autoperf
 def debug(self, msg, *args, **kwargs):
     msg = "%s# %s" % (MyLogger.indent, msg)
     Logger.debug(self, msg, *args, **kwargs)
示例#57
0
 def debug(self, msg, *args, **kwargs):
     kwargs = self.configKeys(**kwargs)
     return OriginalLogger.debug(self, msg, *args, **kwargs)
示例#58
0
文件: utils.py 项目: BenVlodgi/BEE2.4
def setup_localisations(logger: logging.Logger) -> None:
    """Setup gettext localisations."""
    from srctools.property_parser import PROP_FLAGS_DEFAULT
    import gettext
    import locale

    # Get the 'en_US' style language code
    lang_code = locale.getdefaultlocale()[0]

    # Allow overriding through command line.
    if len(sys.argv) > 1:
        for arg in sys.argv[1:]:
            if arg.casefold().startswith('lang='):
                lang_code = arg[5:]
                break

    # Expands single code to parent categories.
    expanded_langs = gettext._expand_lang(lang_code)

    logger.info('Language: {!r}', lang_code)
    logger.debug('Language codes: {!r}', expanded_langs)

    # Add these to Property's default flags, so config files can also
    # be localised.
    for lang in expanded_langs:
        PROP_FLAGS_DEFAULT['lang_' + lang] = True

    lang_folder = install_path('i18n')

    for lang in expanded_langs:
        try:
            file = open(lang_folder / (lang + '.mo').format(lang), 'rb')
        except FileNotFoundError:
            continue
        with file:
            trans = gettext.GNUTranslations(file)  # type: gettext.NullTranslations
            break
    else:
        # No translations, fallback to English.
        # That's fine if the user's language is actually English.
        if 'en' not in expanded_langs:
            logger.warning(
                "Can't find translation for codes: {!r}!",
                expanded_langs,
            )
        trans = gettext.NullTranslations()
    # Add these functions to builtins, plus _=gettext
    trans.install(['gettext', 'ngettext'])

    # Some lang-specific overrides..

    if trans.gettext('__LANG_USE_SANS_SERIF__') == 'YES':
        # For Japanese/Chinese, we want a 'sans-serif' / gothic font
        # style.
        try:
            from tkinter import font
        except ImportError:
            return
        font_names = [
            'TkDefaultFont',
            'TkHeadingFont',
            'TkTooltipFont',
            'TkMenuFont',
            'TkTextFont',
            'TkCaptionFont',
            'TkSmallCaptionFont',
            'TkIconFont',
            # Note - not fixed-width...
        ]
        for font_name in font_names:
            font.nametofont(font_name).configure(family='sans-serif')