Example #1
0
# -*-coding:utf-8-*-
# @作者:haiyu.ma
# @创建日期:2020-06-05 0:25
# 使用默认的日志收集器RootLogger,输出日志(默认输出WARN级别以上的日志)
import logging
from logging.handlers import RotatingFileHandler
import time
from Common import dir_config

fmt = "%(asctime)s %(levelname)s %(filename)s " \
      "%(funcName)s [line:%(lineno)s %(levelname)s 日志信息:%(message)s]"

datefmt = "%a, %d %b %Y %H:%M:%S"

handler_1 = logging.StreamHandler()

curTime = time.strftime("%Y-%m–%d %H%M", time.localtime())

handler_2 = RotatingFileHandler(dir_config.logs_dir +
                                "/Web_Autotest_{0}.log".format(curTime),
                                backupCount=10,
                                encoding='utf-8')
#设置rootlogger 的输出内容形式,输出渠道
logging.basicConfig(format=fmt,
                    datefmt=datefmt,
                    level=logging.INFO,
                    handlers=[handler_1, handler_2])

logging.info("hehehe")
Example #2
0
import logging

logger = logging.getLogger("CSGO Update Bot Logger")
logger.setLevel(logging.INFO)

formatter = logging.Formatter('%(asctime)s - %(levelname)s: %(message)s')

console_handler = logging.StreamHandler()
console_handler.setLevel(logging.DEBUG)
console_handler.setFormatter(formatter)

logger.addHandler(console_handler)
Example #3
0
def setup_logger(output=None,
                 distributed_rank=0,
                 *,
                 color=True,
                 name="detectron2",
                 abbrev_name=None):
    """
    Initialize the detectron2 logger and set its verbosity level to "DEBUG".

    Args:
        output (str): a file name or a directory to save log. If None, will not save log file.
            If ends with ".txt" or ".log", assumed to be a file name.
            Otherwise, logs will be saved to `output/log.txt`.
        name (str): the root module name of this logger
        abbrev_name (str): an abbreviation of the module, to avoid long names in logs.
            Set to "" to not log the root module in logs.
            By default, will abbreviate "detectron2" to "d2" and leave other
            modules unchanged.

    Returns:
        logging.Logger: a logger
    """
    logger = logging.getLogger(name)
    logger.setLevel(logging.DEBUG)
    logger.propagate = False

    if abbrev_name is None:
        abbrev_name = "d2" if name == "detectron2" else name

    plain_formatter = logging.Formatter(
        "[%(asctime)s] %(name)s %(levelname)s: %(message)s",
        datefmt="%m/%d %H:%M:%S")
    # stdout logging: master only
    if distributed_rank == 0:
        ch = logging.StreamHandler(stream=sys.stdout)
        ch.setLevel(logging.DEBUG)
        if color:
            formatter = _ColorfulFormatter(
                colored("[%(asctime)s %(name)s]: ", "green") + "%(message)s",
                datefmt="%m/%d %H:%M:%S",
                root_name=name,
                abbrev_name=str(abbrev_name),
            )
        else:
            formatter = plain_formatter
        ch.setFormatter(formatter)
        logger.addHandler(ch)

    # file logging: all workers
    if output is not None:
        if output.endswith(".txt") or output.endswith(".log"):
            filename = output
        else:
            filename = os.path.join(output, "log.txt")
        if distributed_rank > 0:
            filename = filename + ".rank{}".format(distributed_rank)
        PathManager.mkdirs(os.path.dirname(filename))

        fh = logging.StreamHandler(_cached_log_stream(filename))
        fh.setLevel(logging.DEBUG)
        fh.setFormatter(plain_formatter)
        logger.addHandler(fh)

    return logger
Example #4
0
import logging
import logging.handlers
import logging.config

# setting root logger
_root_logger = logging.getLogger('')
_root_logger.setLevel(logging.DEBUG)

# create log formatter
_simpleFormatter = logging.Formatter(
    fmt=
    '%(levelname)-8s %(asctime)s [%(module)s %(funcName)s %(lineno)-4s] %(message)s'
)

# setting console handler
_consoleHandler = logging.StreamHandler(sys.stdout)
_consoleHandler.setLevel(logging.DEBUG)
_consoleHandler.setFormatter(_simpleFormatter)

_root_logger.addHandler(_consoleHandler)

# setting file handler
_fileHandler = logging.handlers.RotatingFileHandler(
    filename='gameinn_client.log',
    maxBytes=1000000,
    backupCount=3,
    encoding='utf-8')
_fileHandler.setLevel(logging.INFO)
_fileHandler.setFormatter(_simpleFormatter)

_root_logger.addHandler(_fileHandler)
    is_shell = False

try:
    # SJVA
    from framework.util import Util
    package_name = __name__.split('.')[0]
    logger = logging.getLogger(package_name)
    is_shell = False
except:
    is_sjva = False

####################################################
if is_shell:
    logger = logging.getLogger(__name__)
    logger.setLevel(logging.INFO)
    logger.addHandler(logging.StreamHandler())


def log_debug(msg, *args, **kwargs):
    if logger is not None:
        logger.debug(msg, *args, **kwargs)
    else:
        Log(msg, *args, **kwargs)


def log_error(msg, *args, **kwargs):
    if logger is not None:
        logger.error(msg, *args, **kwargs)
    else:
        Log(msg, *args, **kwargs)
Example #6
0
def main():	
    validate_host()	
    install_prefix = os.environ.get('TLJH_INSTALL_PREFIX', '/opt/tljh')	
    hub_prefix = os.path.join(install_prefix, 'hub')	

    # Set up logging to print to a file and to stderr	
    os.makedirs(install_prefix, exist_ok=True)	
    file_logger_path = os.path.join(install_prefix, 'installer.log')	
    file_logger = logging.FileHandler(file_logger_path)	
    # installer.log should be readable only by root	
    os.chmod(file_logger_path, 0o500)	

    file_logger.setFormatter(logging.Formatter('%(asctime)s %(message)s'))	
    file_logger.setLevel(logging.DEBUG)	
    logger.addHandler(file_logger)	

    stderr_logger = logging.StreamHandler()	
    stderr_logger.setFormatter(logging.Formatter('%(message)s'))	
    stderr_logger.setLevel(logging.INFO)	
    logger.addHandler(stderr_logger)	
    logger.setLevel(logging.DEBUG)	

    logger.info('Checking if TLJH is already installed...')	
    if os.path.exists(os.path.join(hub_prefix, 'bin', 'python3')):	
        logger.info('TLJH already installed, upgrading...')	
        initial_setup = False	
    else:	
        logger.info('Setting up hub environment')	
        initial_setup = True	
        # Install software-properties-common, so we can get add-apt-repository	
        # That helps us make sure the universe repository is enabled, since	
        # that's where the python3-pip package lives. In some very minimal base	
        # VM images, it looks like the universe repository is disabled by default,	
        # causing bootstrapping to fail.	
        run_subprocess(['apt-get', 'update', '--yes'])	
        run_subprocess(['apt-get', 'install', '--yes', 'software-properties-common'])	
        run_subprocess(['add-apt-repository', 'universe'])	

        run_subprocess(['apt-get', 'update', '--yes'])	
        run_subprocess(['apt-get', 'install', '--yes', 	
            'python3',	
            'python3-venv',	
            'python3-pip',	
            'git'	
        ])	
        logger.info('Installed python & virtual environment')	
        os.makedirs(hub_prefix, exist_ok=True)	
        run_subprocess(['python3', '-m', 'venv', hub_prefix])	
        logger.info('Set up hub virtual environment')	

    if initial_setup:	
        logger.info('Setting up TLJH installer...')	
    else:	
        logger.info('Upgrading TLJH installer...')	

    pip_flags = ['--upgrade']	
    if os.environ.get('TLJH_BOOTSTRAP_DEV', 'no') == 'yes':	
        pip_flags.append('--editable')	
    tljh_repo_path = os.environ.get(	
        'TLJH_BOOTSTRAP_PIP_SPEC',	
        'git+https://github.com/jupyterhub/the-littlest-jupyterhub.git'	
    )	

    run_subprocess([	
        os.path.join(hub_prefix, 'bin', 'pip'),	
        'install'	
    ] + pip_flags + [tljh_repo_path])	
    logger.info('Setup tljh package')	

    logger.info('Starting TLJH installer...')	
    os.execv(	
        os.path.join(hub_prefix, 'bin', 'python3'),	
        [	
            os.path.join(hub_prefix, 'bin', 'python3'),	
            '-m',	
            'tljh.installer',	
        ] + sys.argv[1:]	
    )	
Example #7
0
    def __init__(self, **kwargs):
        self.logger = logging.getLogger('gbdxtools')
        self.logger.setLevel(logging.ERROR)
        self.console_handler = logging.StreamHandler()
        self.console_handler.setLevel(logging.ERROR)
        self.formatter = logging.Formatter(
            '%(asctime)s - %(name)s - %(levelname)s - %(message)s')
        self.console_handler.setFormatter(self.formatter)
        self.logger.addHandler(self.console_handler)
        self.logger.info('Logger initialized')

        if 'host' in kwargs:
            self.root_url = 'https://%s' % kwargs.get('host')

        if (kwargs.get('username') and kwargs.get('password')
                and kwargs.get('client_id') and kwargs.get('client_secret')):
            self.gbdx_connection = gbdx_auth.session_from_kwargs(**kwargs)
        elif kwargs.get('gbdx_connection'):
            self.gbdx_connection = kwargs.get('gbdx_connection')
        elif self.gbdx_connection is None:
            # This will throw an exception if your .ini file is not set properly
            self.gbdx_connection = gbdx_auth.get_session(
                kwargs.get('config_file'))

        def expire_token(r, *args, **kw):
            """
            Requests a new token if 401, retries request, mainly for auth v2 migration
            :param r:
            :param args:
            :param kw:
            :return:
            """
            if r.status_code == 401:
                try:
                    # remove hooks so it doesn't get into infinite loop
                    r.request.hooks = None
                    # expire the token
                    gbdx_auth.expire_token(
                        token_to_expire=self.gbdx_connection.token,
                        config_file=kwargs.get('config_file'))
                    # re-init the session
                    self.gbdx_connection = gbdx_auth.get_session(
                        kwargs.get('config_file'))
                    # make original request, triggers new token request first
                    return self.gbdx_connection.request(
                        method=r.request.method, url=r.request.url)

                except Exception as e:
                    r.request.hooks = None
                    print(
                        "Error expiring token from session, Reason {}".format(
                            e.message))

        if self.gbdx_connection is not None:

            self.gbdx_connection.hooks['response'].append(expire_token)

            # status_forcelist=[500, 502, 504]))
            self.gbdx_connection.mount(VIRTUAL_IPE_URL,
                                       HTTPAdapter(max_retries=5))

        self.gbdx_futures_session = FuturesSession(
            session=self.gbdx_connection, max_workers=64)

        if 'GBDX_USER' in os.environ:
            header = {'User-Agent': os.environ['GBDX_USER']}
            self.gbdx_futures_session.headers.update(header)
            self.gbdx_connection.headers.update(header)
Example #8
0
def main():
    strtime = time.strftime("%Y-%m-%d-%H_%M_%S", time.localtime())

    logging.basicConfig(level=logging.DEBUG)

    fmt = logging.Formatter(
        fmt='%(asctime)s: %(module)s.%(funcName)s +%(lineno)s: %(levelname)-8s %(message)s',
        datefmt='%H:%M:%S')
    for handler in logging.getLogger().handlers:
        handler.setFormatter(fmt)

    _scoring_help = "\n".join([format_help_for_epilog(scoring.get_scoring_argparse(name), prefix=" scoring: ")
                               for name in sorted(scoring.allScoringFunctions)]) + "\n"

    parser = UnderscoreArgumentParser(formatter_class=argparse.RawDescriptionHelpFormatter, add_help=False,
                                      epilog=_scoring_help)

    requiredArgs = parser.add_argument_group('required arguments')
    optionalArgs = parser.add_argument_group('optional arguments')
    optionalArgs.add_argument('-h', '--help', action='help', default=argparse.SUPPRESS,
                              help='show this help message and exit')

    requiredArgs.add_argument("--scoring-function",
                              help='Scoring function to use. Allowed values are: ' +
                                   ', '.join(sorted(scoring.allScoringFunctions.keys())),
                              metavar="<scoring>", type=str, required=True,
                              choices=sorted(list(set(
                                  list(scoring.allScoringFunctions.keys()) + [name.replace("-", "_") for name in
                                                                              scoring.allScoringFunctions.keys()]))))
    optionalArgs.add_argument("--name", help="Name of the experiment. Default: if no name is provided and the "
                                             "script is running within SLURM it uses the name provided by "
                                             "SLURM_JOB_NAME otherwise noname",
                              type=str,
                              default=None,
                              metavar="<str>")
    optionalArgs.add_argument("--description", help="Description of the experiment. Currently just used in "
                                                    "Vizor. Default N/A", type=str,
                              default="N/A", metavar="<str>")

    optionalArgs.add_argument("--prior", help='Prior to use. Default priors/ChEMBL/Prior.ckpt', type=str,
                              default='priors/ChEMBL/Prior.ckpt', metavar="<{}>".format(str(FilePath.__name__)))
    optionalArgs.add_argument("--agent", help='Agent to use. If None the agent is initialized from the prior.',
                              type=str, default='None', metavar="<{}>".format(str(FilePath.__name__)))

    optionalArgs.add_argument("--steps", help='Iterations to run. Default: 500', type=int, default=500, metavar="<int>")
    optionalArgs.add_argument("--reset", help="Number of iteration after which the Agent is reset after the first "
                                              "time the average score is above reset-cutoff-score."
                                              "Default 0 (not active)",
                              type=int, default=0, metavar="<int>")
    optionalArgs.add_argument("--reset-cutoff-score", help="Average Score which have to be reached to start the "
                                                           "reset countdown of the Agent. Default 0.6",
                              type=float, default=0.6, metavar="<float>")
    optionalArgs.add_argument("--sigma", help='Scoring Sigma. Default: 120', type=float, default=120, metavar="<int>")

    optionalArgs.add_argument("--temperature", "-t",
                              help=("Temperature for the sequence sampling. Has to be larger than 0. "
                                    "Values below 1 make the RNN more confident in it's generation, "
                                    "but also more conservative. Values larger than 1 result in more random sequences. "
                                    "[DEFAULT: 1.0]"),
                              type=float, default=1.0, metavar="<float>")

    optionalArgs.add_argument("--debug", "-v", help='Verbose messages', action='store_true', default=False)
    optionalArgs.add_argument("--noteset", "-vv", help='More verbose messages', action='store_true', default=False)

    optionalArgs.add_argument("--experience", help='Enable experience replay. Default False', type=bool,
                              default=False, metavar="<bool>")
    optionalArgs.add_argument("--lr", help='Optimizer learning rate. Default: 0.0001', type=float, default=0.0001,
                              metavar="<float>")
    optionalArgs.add_argument("--batch-size", help='How many compounds are sampled per step. Default: 128', type=int,
                              default=128, metavar="<int>")

    optionalArgs.add_argument("--logdir",
                              help="Dictionary to save the log. Default ~/REINVENT/logs/<name>",
                              type=str, metavar="<{}>".format(str(FilePath.__name__)),
                              default=None)
    optionalArgs.add_argument("--resultdir",
                              help="Dictionary to save the results.  Default ~/REINVENT/results/<name>",
                              type=str, metavar="<{}>".format(str(FilePath.__name__)),
                              default=None)

    args, extra_args = parser.parse_known_args()

    # Setup the name
    if args.name is None:
        if "SLURM_JOB_NAME" in os.environ:
            args.name = os.environ["SLURM_JOB_NAME"]
        else:
            args.name = "noname"

    # Setup the logdir and resultdir
    if args.logdir is None:
        args.logdir = os.path.join(os.path.expanduser('~'), "REINVENT/logs/{}".format(args.name))
    if args.resultdir is None:
        args.resultdir = os.path.join(os.path.expanduser('~'), "REINVENT/results/{}".format(args.name))

    args.logdir = os.path.normpath(args.logdir)
    args.resultdir = os.path.normpath(args.resultdir)

    if os.path.exists(args.logdir):
        new_logdir = find_dir_suffix(args.logdir)
        logging.info("Logdir already exists. Using {} instead".format(new_logdir))
        args.logdir = new_logdir
    if os.path.exists(args.resultdir):
        new_resultdir = find_dir_suffix(args.resultdir)
        logging.info("Resultdir already exists. Using {} instead".format(new_resultdir))
        args.resultdir = new_resultdir

    os.makedirs(args.logdir)
    os.makedirs(args.resultdir)

    # Set up the logging
    fh = logging.FileHandler(os.path.join(args.logdir, 'output.log'))
    fh.setLevel(logging.INFO)
    dh = logging.FileHandler(os.path.join(args.logdir, 'debug.log'))
    dh.setLevel(logging.DEBUG)
    ch = logging.StreamHandler(sys.stdout)
    if args.noteset:
        ch.setLevel(logging.NOTSET)
    elif args.debug:
        ch.setLevel(logging.DEBUG)
    else:
        ch.setLevel(logging.INFO)
    logginghandler = [fh, dh, ch]
    for handler in logging.getLogger().handlers[:]:
        logging.getLogger().removeHandler(handler)
    for handler in logginghandler:
        handler.setFormatter(fmt)
        logging.getLogger().addHandler(handler)

    # first we get the scoring function
    scoring_parser = scoring.get_scoring_argparse(args.scoring_function)
    scoring_args, extra_args = scoring_parser.parse_known_args(extra_args)
    scoring_function = scoring.get_scoring_function(args.scoring_function, **vars(scoring_args))

    # lets hope we have no arguments left. Otherwise we fail
    if len(extra_args) > 0:
        print("\n\033[91mERROR: unrecognized arguments: " + " ".join(extra_args) + "\033[0m\n")
        parser.print_help()
        with contextlib.suppress(FileNotFoundError):
            os.remove(os.path.join(args.logdir, 'output.log'))
        with contextlib.suppress(FileNotFoundError):
            os.remove(os.path.join(args.logdir, 'debug.log'))
        with contextlib.suppress(FileNotFoundError):
            os.rmdir(args.logdir)
        with contextlib.suppress(FileNotFoundError):
            os.rmdir(args.resultdir)
        exit(2)

    prior = models.reinvent.Model.load_from_file(args.prior)
    if args.agent == "None":
        agent = models.reinvent.Model.load_from_file(args.prior)
    else:
        agent = models.reinvent.Model.load_from_file(args.agent)

    metadata = {"name": args.name, "description": args.description, "date": strtime, "commit": get_commit_hash(),
                'arguments': sys.argv}
    metadata = json.dumps(metadata, sort_keys=True, indent=4, separators=(',', ': '))
    with open(args.logdir + "/metadata.json", 'w') as f:
        f.write(metadata + "\n")

    reinforcement.reinforcement_learning(agent=agent, prior=prior,
                                         scoring_function=scoring_function,
                                         n_steps=args.steps,
                                         experience_replay=args.experience, reset=args.reset,
                                         reset_score_cutoff=args.reset_cutoff_score,
                                         logdir=args.logdir, resultdir=args.resultdir,
                                         lr=args.lr, sigma=args.sigma,
                                         batch_size=args.batch_size,
                                         temperature=args.temperature)
Example #9
0
def get_console_handler():
    console_handler = logging.StreamHandler(sys.stdout)
    console_handler.setFormatter(
        logging.Formatter(
            '%(asctime)s - %(filename)s - %(levelname)s - %(message)s'))
    return console_handler
Example #10
0
def main():
    try:
        args = parse_args()

        logging.addLevelName(RESULT, "RESULT")
        logging.basicConfig(format="%(levelname)-8s %(message)s",
                            handlers=[logging.StreamHandler(sys.stdout)],
                            level=args.loglevel)
        # disable the logging for the 'urllib3' lib
        logging.getLogger("urllib3").setLevel(logging.CRITICAL)

        # help required to:
        # tidy-up this piece of code
        if args.subcommand == "nessus":
            if args.output_file and not args.list:
                output_file = "{}.xlsx".format(args.output_file)
            elif not args.output_file and not args.list:
                output_file = "nessus-results_{}".format(
                    time.strftime("%Y%m%d-%H%M%S"))
            else:
                output_file = "N/A"
        elif args.subcommand == "nmap":
            if args.output_file:
                output_file = "{}".format(args.output_file)
            else:
                output_file = "nmap-results_{}".format(
                    time.strftime("%Y%m%d-%H%M%S"))

        if args.subcommand == "nessus":
            # variables summary
            logging.info("Nessus login: {}".format(args.login))
            logging.info("Nessus password: {}".format(args.password))
            if args.folders:
                logging.info("Nessus folder(s): {}".format(";".join(
                    sorted(args.folders))))
            if args.scans:
                logging.info("Nessus scan(s): {}".format(";".join(
                    sorted(args.scans))))
            logging.info("Nessus URL: https://{}:{}".format(
                args.host, args.port))
            if args.config_file:
                logging.info(
                    "Configuration file for Nessus vulnerabilities: {}".format(
                        args.config_file.name))
            logging.info(
                "XLSX results output_file: {}.xlsx".format(output_file))

            scanner = ness6rest.Scanner(insecure=True,
                                        login=args.login,
                                        password=args.password,
                                        url="https://{}:{}".format(
                                            args.host, args.port))

            if args.list:
                if args.list == "folders":
                    if args.folders:
                        results = nessus.get_folders(scanner, args.folders)
                    else:
                        results = nessus.get_all_folders(scanner)
                elif args.list == "scans":
                    if args.folders:
                        results = nessus.fetch_scans(scanner, args.folders)
                    elif args.scans:
                        results = nessus.get_scans(scanner, args.scans)
                    else:
                        results = nessus.get_all_scans(scanner)

                for result in results:
                    logging.log(RESULT, "{}".format(result["name"]))
            elif args.folders or args.scans:
                if args.folders:
                    scans = nessus.fetch_scans(scanner, args.folders)
                elif args.scans:
                    scans = nessus.get_scans(scanner, args.scans)

                if scans:
                    workbook = xlsxwriter.Workbook(
                        "{}.xlsx".format(output_file))

                    logging.log(
                        RESULT,
                        "generating 'Host vs Vulnerabilities' worksheet...")
                    parse_ness_host_vulns(workbook,
                                          scanner,
                                          scans,
                                          config_file=args.config_file)

                    logging.log(
                        RESULT,
                        "generating 'Vulnerability vs Hosts' worksheet...")
                    parse_ness_vuln_hosts(workbook,
                                          scanner,
                                          scans,
                                          config_file=args.config_file)

                    logging.log(RESULT,
                                "generating 'Host vs OSs' worksheet...")
                    parse_ness_host_oss(workbook, scanner, scans)
                    logging.log(RESULT,
                                "generating 'OS vs Hosts' worksheet...")
                    parse_ness_os_hosts(workbook, scanner, scans)

                    workbook.close()

        elif args.subcommand == "nmap":
            # variables summary
            # help required to:
            # add regex support
            input_files = []
            for input_file in args.input_files:
                input_files.append(input_file.name)
            logging.info("Nmap XML results file(s): {}".format(";".join(
                sorted(input_files))))
            logging.info("XLSX results file: {}.xlsx".format(output_file))

            workbook = xlsxwriter.Workbook("{}.xlsx".format(output_file))
            logging.log(RESULT, "generating 'Host vs Services' worksheet...")
            parse_nmap_host_services(workbook, input_files)
            logging.log(RESULT, "generating 'Host vs OSs' worksheet...")
            parse_nmap_host_oss(workbook, input_files)
            logging.log(RESULT, "generating 'OS vs Hosts' worksheet...")
            parse_nmap_os_hosts(workbook, input_files)

            workbook.close()
    except KeyboardInterrupt:
        logging.exception("'CTRL+C' pressed, exiting...")
Example #11
0
def main():
    signal.signal(signal.SIGTERM, signal_handler)

    parser = argparse.ArgumentParser(
        description='Export ES query results to Prometheus.')
    parser.add_argument(
        '-e',
        '--es-cluster',
        default='localhost',
        help=
        'addresses of nodes in a Elasticsearch cluster to run queries on. Nodes should be separated by commas e.g. es1,es2. Ports can be provided if non-standard (9200) e.g. es1:9999 (default: localhost)'
    )
    parser.add_argument(
        '--ca-certs',
        help=
        'path to a CA certificate bundle. Can be absolute, or relative to the current working directory. If not specified, SSL certificate verification is disabled.'
    )
    parser.add_argument(
        '-p',
        '--port',
        type=int,
        default=9206,
        help='port to serve the metrics endpoint on. (default: 9206)')
    parser.add_argument(
        '--query-disable',
        action='store_true',
        help=
        'disable query monitoring. Config file does not need to be present if query monitoring is disabled.'
    )
    parser.add_argument(
        '-c',
        '--config-file',
        default='exporter.cfg',
        help=
        'path to query config file. Can be absolute, or relative to the current working directory. (default: exporter.cfg)'
    )
    parser.add_argument('--cluster-health-disable',
                        action='store_true',
                        help='disable cluster health monitoring.')
    parser.add_argument(
        '--cluster-health-timeout',
        type=float,
        default=10.0,
        help=
        'request timeout for cluster health monitoring, in seconds. (default: 10)'
    )
    parser.add_argument(
        '--cluster-health-level',
        default='indices',
        choices=['cluster', 'indices', 'shards'],
        help=
        'level of detail for cluster health monitoring.  (default: indices)')
    parser.add_argument('--nodes-stats-disable',
                        action='store_true',
                        help='disable nodes stats monitoring.')
    parser.add_argument(
        '--nodes-stats-timeout',
        type=float,
        default=10.0,
        help=
        'request timeout for nodes stats monitoring, in seconds. (default: 10)'
    )
    parser.add_argument(
        '--nodes-stats-metrics',
        type=nodes_stats_metrics_parser,
        help=
        'limit nodes stats to specific metrics. Metrics should be separated by commas e.g. indices,fs.'
    )
    parser.add_argument('--indices-stats-disable',
                        action='store_true',
                        help='disable indices stats monitoring.')
    parser.add_argument(
        '--indices-stats-timeout',
        type=float,
        default=10.0,
        help=
        'request timeout for indices stats monitoring, in seconds. (default: 10)'
    )
    parser.add_argument(
        '--indices-stats-mode',
        default='cluster',
        choices=['cluster', 'indices'],
        help='detail mode for indices stats monitoring. (default: cluster)')
    parser.add_argument(
        '--indices-stats-metrics',
        type=indices_stats_metrics_parser,
        help=
        'limit indices stats to specific metrics. Metrics should be separated by commas e.g. indices,fs.'
    )
    parser.add_argument(
        '--indices-stats-fields',
        type=indices_stats_fields_parser,
        help=
        'include fielddata info for specific fields. Fields should be separated by commas e.g. indices,fs. Use \'*\' for all.'
    )
    parser.add_argument('-j',
                        '--json-logging',
                        action='store_true',
                        help='turn on json logging.')
    parser.add_argument(
        '--log-level',
        default='INFO',
        choices=['DEBUG', 'INFO', 'WARNING', 'ERROR', 'CRITICAL'],
        help='detail level to log. (default: INFO)')
    parser.add_argument(
        '-v',
        '--verbose',
        action='store_true',
        help='turn on verbose (DEBUG) logging. Overrides --log-level.')
    args = parser.parse_args()

    log_handler = logging.StreamHandler()
    log_format = '[%(asctime)s] %(name)s.%(levelname)s %(threadName)s %(message)s'
    formatter = JogFormatter(
        log_format) if args.json_logging else logging.Formatter(log_format)
    log_handler.setFormatter(formatter)

    log_level = getattr(logging, args.log_level)
    logging.basicConfig(handlers=[log_handler],
                        level=logging.DEBUG if args.verbose else log_level)
    logging.captureWarnings(True)

    port = args.port
    es_cluster = args.es_cluster.split(',')
    if args.ca_certs:
        es_client = Elasticsearch(es_cluster,
                                  verify_certs=True,
                                  ca_certs=args.ca_certs)
    else:
        es_client = Elasticsearch(es_cluster, verify_certs=False)

    scheduler = None

    if not args.query_disable:
        scheduler = sched.scheduler()

        config = configparser.ConfigParser()
        config.read_file(open(args.config_file))

        query_prefix = 'query_'
        queries = {}
        for section in config.sections():
            if section.startswith(query_prefix):
                query_name = section[len(query_prefix):]
                query_interval = config.getfloat(section,
                                                 'QueryIntervalSecs',
                                                 fallback=15)
                query_timeout = config.getfloat(section,
                                                'QueryTimeoutSecs',
                                                fallback=10)
                query_indices = config.get(section,
                                           'QueryIndices',
                                           fallback='_all')
                query = json.loads(config.get(section, 'QueryJson'))

                queries[query_name] = (query_interval, query_timeout,
                                       query_indices, query)

        if queries:
            for name, (interval, timeout, indices, query) in queries.items():
                func = partial(run_query, es_client, name, indices, query,
                               timeout)
                run_scheduler(scheduler, interval, func)
        else:
            logging.warn('No queries found in config file %s',
                         args.config_file)

    if not args.cluster_health_disable:
        REGISTRY.register(
            ClusterHealthCollector(es_client, args.cluster_health_timeout,
                                   args.cluster_health_level))

    if not args.nodes_stats_disable:
        REGISTRY.register(
            NodesStatsCollector(es_client,
                                args.nodes_stats_timeout,
                                metrics=args.nodes_stats_metrics))

    if not args.indices_stats_disable:
        parse_indices = args.indices_stats_mode == 'indices'
        REGISTRY.register(
            IndicesStatsCollector(es_client,
                                  args.indices_stats_timeout,
                                  parse_indices=parse_indices,
                                  metrics=args.indices_stats_metrics,
                                  fields=args.indices_stats_fields))

    logging.info('Starting server...')
    start_http_server(port)
    logging.info('Server started on port %s', port)

    try:
        if scheduler:
            scheduler.run()
        else:
            while True:
                time.sleep(5)
    except KeyboardInterrupt:
        pass

    shutdown()
Example #12
0
import logging
import gnupg
import os

log = logging.getLogger(__name__)
log.setLevel(logging.DEBUG)
#handler = logging.FileHandler('gpg_handler.log')
handler2 = logging.StreamHandler()
formatter = logging.Formatter(
    '%(asctime)s - %(name)s - %(levelname)s - %(message)s')
# handler.setFormatter(formatter)
handler2.setFormatter(formatter)
# log.addHandler(handler)
log.addHandler(handler2)


class Gpg:
    def __init__(self, gnupghome=''):
        self.gnupghome = gnupghome
        log.info('New session.\n')
        if len(gnupghome) == 0:
            log.debug("Path not specified. Setting default gnupg directory.")
            log.debug(
                'Creating gnupg instance in {}'.format("default directory."))
            self.gpg = gnupg.GPG()
            return None
        else:
            log.debug("Setting {} as gnupg directory".format(gnupghome))
            if os.path.exists(gnupghome):
                log.debug('Creating gnupg instance in {}'.format(gnupghome))
                self.gpg = gnupg.GPG(gnupghome=gnupghome)
Example #13
0
# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.

from unicorn_binance_websocket_api.unicorn_binance_websocket_api_manager import BinanceWebSocketApiManager
import logging
import time

# import class to process stream data
from unicorn_binance_websocket_api_process_streams import BinanceWebSocketApiProcessStreams

# https://docs.python.org/3/library/logging.html#logging-levels
logging.getLogger('unicorn-log').setLevel(logging.INFO)
logging.getLogger('unicorn-log').addHandler(logging.StreamHandler())

# create instance of BinanceWebSocketApiManager and provide the function for stream processing
binance_websocket_api_manager = BinanceWebSocketApiManager(
    BinanceWebSocketApiProcessStreams.process_stream_data)

# define markets
markets = {'bnbbtc', 'ethbtc', 'btcusdt', 'bchabcusdt', 'eosusdt'}

markets_mega_list = {
    'xrpusdt', 'rvnbtc', 'ltcusdt', 'adausdt', 'eosusdt', 'xmrusdt', 'xmrbnb',
    'neousdt', 'bnbusdt', 'adabtc', 'ethusdt', 'trxbtc', 'bchabcbtc', 'ltcbtc',
    'xrpbtc', 'ontbtc', 'bttusdt', 'eosbtc', 'xlmbtc', 'bttbtc', 'tusdusdt',
    'xlmusdt', 'qkcbtc', 'zrxbtc', 'neobtc', 'adaeth', 'icxusdt', 'btctusd',
    'icxbtc', 'btcusdc', 'wanbtc', 'zecbtc', 'wtcbtc', 'batbtc', 'adabnb',
    'etcusdt', 'qtumusdt', 'xmrbtc', 'trxeth', 'adatusd', 'trxxrp', 'trxbnb',
Example #14
0
# login manager 
login_manager = LoginManager()
login_manager.setup_app(app)
login_manager.login_view = 'login'
login_manager.login_message = "Please login to Access ps Dashboard, Contact psRO incase you dont have an id or any password issues"

@login_manager.user_loader
def load_user(user_id):
	return db.session.query(User).get(user_id)

# Load custom Jinja filters from the `filters` module.
filters.init_app(app)

# loggin information when debug is set to false
if not app.debug:
	app.logger.setHandler(logging.StreamHandler()) #log to stderr
	app.logger.setLevel(logging.INFO)


@app.errorhandler(404)
def error_not_found(err):
	return render_template('error/not_found.html'), 404

@app.route('/')
def index_page():
	return render_template('index.html')

@app.route('/searchdoc/',methods=['GET','POST'])
@login_required
def search_doc():
	loading=False
    def run_desktop_test(self, suite=None, test_file=None, debugger=None,
        debugger_args=None, shuffle=False, keep_open=False, rerun_failures=False,
        no_autorun=False, repeat=0, run_until_failure=False, slow=False,
        chunk_by_dir=0, total_chunks=None, this_chunk=None):
        """Runs a mochitest.

        test_file is a path to a test file. It can be a relative path from the
        top source directory, an absolute filename, or a directory containing
        test files.

        suite is the type of mochitest to run. It can be one of ('plain',
        'chrome', 'browser', 'metro', 'a11y').

        debugger is a program name or path to a binary (presumably a debugger)
        to run the test in. e.g. 'gdb'

        debugger_args are the arguments passed to the debugger.

        shuffle is whether test order should be shuffled (defaults to false).

        keep_open denotes whether to keep the browser open after tests
        complete.
        """
        if rerun_failures and test_file:
            print('Cannot specify both --rerun-failures and a test path.')
            return 1

        # Need to call relpath before os.chdir() below.
        test_path = ''
        if test_file:
            test_path = self._wrap_path_argument(test_file).relpath()

        failure_file_path = os.path.join(self.statedir, 'mochitest_failures.json')

        if rerun_failures and not os.path.exists(failure_file_path):
            print('No failure file present. Did you run mochitests before?')
            return 1

        from StringIO import StringIO
        from automation import Automation

        # runtests.py is ambiguous, so we load the file/module manually.
        if 'mochitest' not in sys.modules:
            import imp
            path = os.path.join(self.mochitest_dir, 'runtests.py')
            with open(path, 'r') as fh:
                imp.load_module('mochitest', fh, path,
                    ('.py', 'r', imp.PY_SOURCE))

        import mochitest

        # This is required to make other components happy. Sad, isn't it?
        os.chdir(self.topobjdir)

        automation = Automation()

        # Automation installs its own stream handler to stdout. Since we want
        # all logging to go through us, we just remove their handler.
        remove_handlers = [l for l in logging.getLogger().handlers
            if isinstance(l, logging.StreamHandler)]
        for handler in remove_handlers:
            logging.getLogger().removeHandler(handler)

        runner = mochitest.Mochitest(automation)

        opts = mochitest.MochitestOptions(automation)
        options, args = opts.parse_args([])

        appname = ''
        if sys.platform.startswith('darwin'):
            appname = os.path.join(self.distdir, self.substs['MOZ_MACBUNDLE_NAME'],
            'Contents', 'MacOS', 'webapprt-stub' + automation.BIN_SUFFIX)
        else:
            appname = os.path.join(self.distdir, 'bin', 'webapprt-stub' +
            automation.BIN_SUFFIX)

        # Need to set the suite options before verifyOptions below.
        if suite == 'plain':
            # Don't need additional options for plain.
            pass
        elif suite == 'chrome':
            options.chrome = True
        elif suite == 'browser':
            options.browserChrome = True
        elif suite == 'metro':
            options.immersiveMode = True
            options.browserChrome = True
        elif suite == 'a11y':
            options.a11y = True
        elif suite == 'webapprt-content':
            options.webapprtContent = True
            options.app = appname
        elif suite == 'webapprt-chrome':
            options.webapprtChrome = True
            options.app = appname
            options.browserArgs.append("-test-mode")
        else:
            raise Exception('None or unrecognized mochitest suite type.')

        options.autorun = not no_autorun
        options.closeWhenDone = not keep_open
        options.shuffle = shuffle
        options.consoleLevel = 'INFO'
        options.repeat = repeat
        options.runUntilFailure = run_until_failure
        options.runSlower = slow
        options.testingModulesDir = os.path.join(self.tests_dir, 'modules')
        options.extraProfileFiles.append(os.path.join(self.distdir, 'plugins'))
        options.symbolsPath = os.path.join(self.distdir, 'crashreporter-symbols')
        options.chunkByDir = chunk_by_dir
        options.totalChunks = total_chunks
        options.thisChunk = this_chunk

        options.failureFile = failure_file_path

        if test_path:
            test_root = runner.getTestRoot(options)
            test_root_file = mozpack.path.join(self.mochitest_dir, test_root, test_path)
            if not os.path.exists(test_root_file):
                print('Specified test path does not exist: %s' % test_root_file)
                print('You may need to run |mach build| to build the test files.')
                return 1

            options.testPath = test_path

        if rerun_failures:
            options.testManifest = failure_file_path

        if debugger:
            options.debugger = debugger

        if debugger_args:
            if options.debugger == None:
                print("--debugger-args passed, but no debugger specified.")
                return 1
            options.debuggerArgs = debugger_args

        options = opts.verifyOptions(options, runner)

        if options is None:
            raise Exception('mochitest option validator failed.')

        automation.setServerInfo(options.webServer, options.httpPort,
            options.sslPort, options.webSocketPort)


        # We need this to enable colorization of output.
        self.log_manager.enable_unstructured()

        # Output processing is a little funky here. The old make targets
        # grepped the log output from TEST-UNEXPECTED-* and printed these lines
        # after test execution. Ideally the test runner would expose a Python
        # API for obtaining test results and we could just format failures
        # appropriately. Unfortunately, it doesn't yet do that. So, we capture
        # all output to a buffer then "grep" the buffer after test execution.
        # Bug 858197 tracks a Python API that would facilitate this.
        test_output = StringIO()
        handler = logging.StreamHandler(test_output)
        handler.addFilter(UnexpectedFilter())
        handler.setFormatter(StructuredHumanFormatter(0, write_times=False))
        logging.getLogger().addHandler(handler)

        result = runner.runTests(options)

        # Need to remove our buffering handler before we echo failures or else
        # it will catch them again!
        logging.getLogger().removeHandler(handler)
        self.log_manager.disable_unstructured()

        if test_output.getvalue():
            result = 1
            for line in test_output.getvalue().splitlines():
                self.log(logging.INFO, 'unexpected', {'msg': line}, '{msg}')

        return result
Example #16
0
        
        """
        for i in range(1,5):
            print '--- data.%d ---' % i
            f = open('/tmp/mercator.due/data.%d' % i, 'r')
            while True:    
                try:
                    print cPickle.load(f)
                except EOFError:
                    break
            f.close()
        """
        
        print due        
        
        self.assertEqual(due.file_version, 4)
        self.assertEqual(len(due._buffer), 0)
        
        self.assertEqual(due.receiver.results, 'B3.0 B2.1 B2.2 B1.0 A1.0 A2.0 A3.0 A3.1 A4.2 A0.0 A5.0 ')
        


if __name__ == '__main__':
    logger = logging.getLogger('')
    logger.setLevel(logging.WARNING)
    #logger.setLevel(logging.DEBUG)
    h = logging.StreamHandler(sys.stderr)       
    logger.addHandler(h) 

    unittest.main()
    
Example #17
0
def main(body, title, config, attach, urls, notification_type, theme, tag,
         input_format, dry_run, recursion_depth, verbose, disable_async,
         debug, version):
    """
    Send a notification to all of the specified servers identified by their
    URLs the content provided within the title, body and notification-type.

    For a list of all of the supported services and information on how to
    use them, check out at https://github.com/caronc/apprise
    """
    # Note: Click ignores the return values of functions it wraps, If you
    #       want to return a specific error code, you must call sys.exit()
    #       as you will see below.

    debug = True if debug else False
    if debug:
        # Verbosity must be a minimum of 3
        verbose = 3 if verbose < 3 else verbose

    # Logging
    ch = logging.StreamHandler(sys.stdout)
    if verbose > 3:
        # -vvvv: Most Verbose Debug Logging
        logger.setLevel(logging.TRACE)

    elif verbose > 2:
        # -vvv: Debug Logging
        logger.setLevel(logging.DEBUG)

    elif verbose > 1:
        # -vv: INFO Messages
        logger.setLevel(logging.INFO)

    elif verbose > 0:
        # -v: WARNING Messages
        logger.setLevel(logging.WARNING)

    else:
        # No verbosity means we display ERRORS only AND any deprecation
        # warnings
        logger.setLevel(logging.ERROR)

    # Format our logger
    formatter = logging.Formatter('%(asctime)s - %(levelname)s - %(message)s')
    ch.setFormatter(formatter)
    logger.addHandler(ch)

    # Update our asyncio logger
    asyncio_logger = logging.getLogger('asyncio')
    for handler in logger.handlers:
        asyncio_logger.addHandler(handler)
    asyncio_logger.setLevel(logger.level)

    if version:
        print_version_msg()
        sys.exit(0)

    # Simple Error Checking
    notification_type = notification_type.strip().lower()
    if notification_type not in NOTIFY_TYPES:
        logger.error(
            'The --notification-type (-n) value of {} is not supported.'
            .format(notification_type))
        # 2 is the same exit code returned by Click if there is a parameter
        # issue.  For consistency, we also return a 2
        sys.exit(2)

    input_format = input_format.strip().lower()
    if input_format not in NOTIFY_FORMATS:
        logger.error(
            'The --input-format (-i) value of {} is not supported.'
            .format(input_format))
        # 2 is the same exit code returned by Click if there is a parameter
        # issue.  For consistency, we also return a 2
        sys.exit(2)

    # Prepare our asset
    asset = AppriseAsset(
        # Our body format
        body_format=input_format,

        # Set the theme
        theme=theme,

        # Async mode is only used for Python v3+ and allows a user to send
        # all of their notifications asyncronously.  This was made an option
        # incase there are problems in the future where it's better that
        # everything run sequentially/syncronously instead.
        async_mode=disable_async is not True,
    )

    # Create our Apprise object
    a = Apprise(asset=asset, debug=debug, location=ContentLocation.LOCAL)

    # The priorities of what is accepted are parsed in order below:
    #    1. URLs by command line
    #    2. Configuration by command line
    #    3. URLs by environment variable: APPRISE_URLS
    #    4. Configuration by environment variable: APPRISE_CONFIG
    #    5. Default Configuration File(s) (if found)
    #
    if urls:
        if tag:
            # Ignore any tags specified
            logger.warning(
                '--tag (-g) entries are ignored when using specified URLs')
            tag = None

        # Load our URLs (if any defined)
        for url in urls:
            a.add(url)

        if config:
            # Provide a warning to the end user if they specified both
            logger.warning(
                'You defined both URLs and a --config (-c) entry; '
                'Only the URLs will be referenced.')

    elif config:
        # We load our configuration file(s) now only if no URLs were specified
        # Specified config entries trump all
        a.add(AppriseConfig(
            paths=config, asset=asset, recursion=recursion_depth))

    elif os.environ.get('APPRISE_URLS', '').strip():
        logger.debug('Loading provided APPRISE_URLS environment variable')
        if tag:
            # Ignore any tags specified
            logger.warning(
                '--tag (-g) entries are ignored when using specified URLs')
            tag = None

        # Attempt to use our APPRISE_URLS environment variable (if populated)
        a.add(os.environ['APPRISE_URLS'].strip())

    elif os.environ.get('APPRISE_CONFIG', '').strip():
        logger.debug('Loading provided APPRISE_CONFIG environment variable')
        # Fall back to config environment variable (if populated)
        a.add(AppriseConfig(
            paths=os.environ['APPRISE_CONFIG'].strip(),
            asset=asset, recursion=recursion_depth))
    else:
        # Load default configuration
        a.add(AppriseConfig(
            paths=[f for f in DEFAULT_SEARCH_PATHS if isfile(expanduser(f))],
            asset=asset, recursion=recursion_depth))

    if len(a) == 0:
        logger.error(
            'You must specify at least one server URL or populated '
            'configuration file.')
        print_help_msg(main)
        sys.exit(1)

    # each --tag entry comprises of a comma separated 'and' list
    # we or each of of the --tag and sets specified.
    tags = None if not tag else [parse_list(t) for t in tag]

    if not dry_run:
        if body is None:
            logger.trace('No --body (-b) specified; reading from stdin')
            # if no body was specified, then read from STDIN
            body = click.get_text_stream('stdin').read()

        # now print it out
        result = a.notify(
            body=body, title=title, notify_type=notification_type, tag=tags,
            attach=attach)
    else:
        # Number of rows to assume in the terminal.  In future, maybe this can
        # be detected and made dynamic. The actual row count is 80, but 5
        # characters are already reserved for the counter on the left
        rows = 75

        # Initialize our URL response;  This is populated within the for/loop
        # below; but plays a factor at the end when we need to determine if
        # we iterated at least once in the loop.
        url = None

        for idx, server in enumerate(a.find(tag=tags)):
            url = server.url(privacy=True)
            click.echo("{: 3d}. {}".format(
                idx + 1,
                url if len(url) <= rows else '{}...'.format(url[:rows - 3])))
            if server.tags:
                click.echo("{} - {}".format(' ' * 5, ', '.join(server.tags)))

        # Initialize a default response of nothing matched, otherwise
        # if we matched at least one entry, we can return True
        result = None if url is None else True

    if result is None:
        # There were no notifications set.  This is a result of just having
        # empty configuration files and/or being to restrictive when filtering
        # by specific tag(s)

        # Exit code 3 is used since Click uses exit code 2 if there is an
        # error with the parameters specified
        sys.exit(3)

    elif result is False:
        # At least 1 notification service failed to send
        sys.exit(1)

    # else:  We're good!
    sys.exit(0)
Example #18
0
REDIS = None
if APP.config['EVENTSOURCE_SOURCE']:
    POOL = redis.ConnectionPool(
        host=APP.config['REDIS_HOST'],
        port=APP.config['REDIS_PORT'],
        db=APP.config['REDIS_DB'])
    REDIS = redis.StrictRedis(connection_pool=POOL)

if not APP.debug:
    APP.logger.addHandler(pagure.mail_logging.get_mail_handler(
        smtp_server=APP.config.get('SMTP_SERVER', '127.0.0.1'),
        mail_admin=APP.config.get('MAIL_ADMIN', APP.config['EMAIL_ERROR'])
    ))

# Send classic logs into syslog
SHANDLER = logging.StreamHandler()
SHANDLER.setLevel(APP.config.get('log_level', 'INFO'))
APP.logger.addHandler(SHANDLER)

LOG = APP.logger


def authenticated():
    ''' Utility function checking if the current user is logged in or not.
    '''
    return hasattr(flask.g, 'fas_user') and flask.g.fas_user is not None


def api_authenticated():
    ''' Utility function checking if the current user is logged in or not
    in the API.
Example #19
0
import platform
import random

project_name = "performance"
wkdir = os.getcwd()
logging.basicConfig(
    level=logging.DEBUG,
    format=
    '%(asctime)s %(levelname)s | %(message)s--[%(filename)-5s:%(lineno)d]',
    datefmt='%y%m%d %H:%M:%S',
    filename='%s%s%s%slog%s%s.log' %
    (wkdir, os.sep, project_name, os.sep, os.sep,
     time.strftime("%Y%m%d %H-%M-%S")),
    filemode='w')
if True:
    console = logging.StreamHandler()
    console.setLevel(logging.INFO)
    formatter = logging.Formatter(
        '%(asctime)s %(levelname)s | %(message)s--[%(filename)-5s:%(lineno)d]')
    console.setFormatter(formatter)
    logging.getLogger('').addHandler(console)


class Config:

    # 配置 package_name, adb_location, mail_host, mail_user, mail_pass
    package_name = ""
    adb_location = ''
    adb = 'adb'
    mail_host = ""  # 设置邮箱服务器
    mail_user = ""  # 邮箱用户名
Example #20
0
import sys
from django.contrib.auth.models import AnonymousUser
from django_testscenarios.ubertest import TestCase

from lava_common.decorators import nottest
from lava_scheduler_app.models import Device, DeviceType, TestJob
from lava_server.lavatable import LavaTable, LavaView
from lava_scheduler_app.tables import (
    JobTable,
    DeviceTable,
    visible_jobs_with_custom_sort,
)

LOGGER = logging.getLogger()
LOGGER.level = logging.INFO  # change to DEBUG to see *all* output
LOGGER.addHandler(logging.StreamHandler(sys.stdout))

# pylint does not like TestCaseWithFactory


@nottest
class TestTable(LavaTable):
    pass


@nottest
class TestLengthTable(LavaTable):
    def __init__(self, *args, **kwargs):
        super().__init__(*args, **kwargs)
        self.length = 50
if not args.useWebsocket and (not args.certificatePath
                              or not args.privateKeyPath):
    parser.error("Missing credentials for authentication.")
    exit(2)

# Port defaults
if args.useWebsocket and not args.port:  # When no port override for WebSocket, default to 443
    port = 443
if not args.useWebsocket and not args.port:  # When no port override for non-WebSocket, default to 8883
    port = 8883

# Configure logging
logger = logging.getLogger("AWSIoTPythonSDK.core")
logger.setLevel(logging.DEBUG)
streamHandler = logging.StreamHandler()
formatter = logging.Formatter(
    '%(asctime)s - %(name)s - %(levelname)s - %(message)s')
streamHandler.setFormatter(formatter)
logger.addHandler(streamHandler)

# Init AWSIoTMQTTClient
myAWSIoTMQTTClient = None
if useWebsocket:
    myAWSIoTMQTTClient = AWSIoTMQTTClient(clientId, useWebsocket=True)
    myAWSIoTMQTTClient.configureEndpoint(host, port)
    myAWSIoTMQTTClient.configureCredentials(rootCAPath)
else:
    myAWSIoTMQTTClient = AWSIoTMQTTClient(clientId)
    myAWSIoTMQTTClient.configureEndpoint(host, port)
    myAWSIoTMQTTClient.configureCredentials(rootCAPath, privateKeyPath,
Example #22
0
def add_repo(repo, rdir_in, branch=None):
    """
    This will add a git repo into the mongo modulestore.
    If branch is left as None, it will fetch the most recent
    version of the current branch.
    """
    # pylint: disable=too-many-statements

    git_repo_dir = getattr(settings, 'GIT_REPO_DIR', DEFAULT_GIT_REPO_DIR)
    git_import_static = getattr(settings, 'GIT_IMPORT_STATIC', True)
    git_import_python_lib = getattr(settings, 'GIT_IMPORT_PYTHON_LIB', True)
    python_lib_filename = getattr(settings, 'PYTHON_LIB_FILENAME', DEFAULT_PYTHON_LIB_FILENAME)

    # Set defaults even if it isn't defined in settings
    mongo_db = {
        'host': 'localhost',
        'port': 27017,
        'user': '',
        'password': '',
        'db': 'xlog',
    }

    # Allow overrides
    if hasattr(settings, 'MONGODB_LOG'):
        for config_item in ['host', 'user', 'password', 'db', 'port']:
            mongo_db[config_item] = settings.MONGODB_LOG.get(
                config_item, mongo_db[config_item])

    if not os.path.isdir(git_repo_dir):
        raise GitImportErrorNoDir(git_repo_dir)
    # pull from git
    if not (repo.endswith('.git') or
            repo.startswith(('http:', 'https:', 'git:', 'file:'))):
        raise GitImportErrorUrlBad()

    if rdir_in:
        rdir = os.path.basename(rdir_in)
    else:
        rdir = repo.rsplit('/', 1)[-1].rsplit('.git', 1)[0]
    log.debug(u'rdir = %s', rdir)

    rdirp = '{0}/{1}'.format(git_repo_dir, rdir)
    if os.path.exists(rdirp):
        log.info('directory already exists, doing a git pull instead '
                 'of git clone')
        cmd = ['git', 'pull', ]
        cwd = rdirp
    else:
        cmd = ['git', 'clone', repo, ]
        cwd = git_repo_dir

    cwd = os.path.abspath(cwd)
    try:
        ret_git = cmd_log(cmd, cwd=cwd)
    except subprocess.CalledProcessError as ex:
        log.exception(u'Error running git pull: %r', ex.output)
        raise GitImportErrorCannotPull()

    if branch:
        switch_branch(branch, rdirp)

    # get commit id
    cmd = ['git', 'log', '-1', '--format=%H', ]
    try:
        commit_id = cmd_log(cmd, cwd=rdirp)
    except subprocess.CalledProcessError as ex:
        log.exception(u'Unable to get git log: %r', ex.output)
        raise GitImportErrorBadRepo()

    ret_git += u'\nCommit ID: {0}'.format(commit_id)

    # get branch
    cmd = ['git', 'symbolic-ref', '--short', 'HEAD', ]
    try:
        branch = cmd_log(cmd, cwd=rdirp)
    except subprocess.CalledProcessError as ex:
        # I can't discover a way to excercise this, but git is complex
        # so still logging and raising here in case.
        log.exception(u'Unable to determine branch: %r', ex.output)
        raise GitImportErrorBadRepo()

    ret_git += u'{0}Branch: {1}'.format('   \n', branch)

    # Get XML logging logger and capture debug to parse results
    output = StringIO.StringIO()
    import_log_handler = logging.StreamHandler(output)
    import_log_handler.setLevel(logging.DEBUG)

    logger_names = ['xmodule.modulestore.xml_importer', 'git_add_course',
                    'xmodule.modulestore.xml', 'xmodule.seq_module', ]
    loggers = []

    for logger_name in logger_names:
        logger = logging.getLogger(logger_name)
        logger.setLevel(logging.DEBUG)
        logger.addHandler(import_log_handler)
        loggers.append(logger)

    try:
        management.call_command(
            'import', git_repo_dir, rdir,
            nostatic=not git_import_static, nopythonlib=not git_import_python_lib,
            python_lib_filename=python_lib_filename
        )
    except CommandError:
        raise GitImportErrorXmlImportFailed()
    except NotImplementedError:
        raise GitImportErrorUnsupportedStore()

    ret_import = output.getvalue()

    # Remove handler hijacks
    for logger in loggers:
        logger.setLevel(logging.NOTSET)
        logger.removeHandler(import_log_handler)

    course_key = None
    location = 'unknown'

    # extract course ID from output of import-command-run and make symlink
    # this is needed in order for custom course scripts to work
    match = re.search(r'(?ms)===> IMPORTING courselike (\S+)', ret_import)
    if match:
        course_id = match.group(1).split('/')
        # we need to transform course key extracted from logs into CourseLocator instance, because
        # we are using split module store and course keys store as instance of CourseLocator.
        # please see common.lib.xmodule.xmodule.modulestore.split_mongo.split.SplitMongoModuleStore#make_course_key
        # We want set course id in CourseImportLog as CourseLocator. So that in split module
        # environment course id remain consistent as CourseLocator instance.
        course_key = CourseLocator(*course_id)
        cdir = '{0}/{1}'.format(git_repo_dir, course_key.course)
        log.debug(u'Studio course dir = %s', cdir)

        if os.path.exists(cdir) and not os.path.islink(cdir):
            log.debug('   -> exists, but is not symlink')
            log.debug(subprocess.check_output(['ls', '-l', ],
                                              cwd=os.path.abspath(cdir)))
            try:
                os.rmdir(os.path.abspath(cdir))
            except OSError:
                log.exception('Failed to remove course directory')

        if not os.path.exists(cdir):
            log.debug(u'   -> creating symlink between %s and %s', rdirp, cdir)
            try:
                os.symlink(os.path.abspath(rdirp), os.path.abspath(cdir))
            except OSError:
                log.exception('Unable to create course symlink')
            log.debug(subprocess.check_output(['ls', '-l', ],
                                              cwd=os.path.abspath(cdir)))

    # store import-command-run output in mongo
    mongouri = 'mongodb://{user}:{password}@{host}:{port}/{db}'.format(**mongo_db)

    try:
        if mongo_db['user'] and mongo_db['password']:
            mdb = mongoengine.connect(mongo_db['db'], host=mongouri)
        else:
            mdb = mongoengine.connect(mongo_db['db'], host=mongo_db['host'], port=mongo_db['port'])
    except mongoengine.connection.ConnectionError:
        log.exception('Unable to connect to mongodb to save log, please '
                      'check MONGODB_LOG settings')
    cil = CourseImportLog(
        course_id=course_key,
        location=location,
        repo_dir=rdir,
        created=timezone.now(),
        import_log=ret_import,
        git_log=ret_git,
    )
    cil.save()

    log.debug(u'saved CourseImportLog for %s', cil.course_id)
    mdb.disconnect()
Example #23
0
CURDIR = os.getcwd()
CONFIG_FILES = 'pylama.ini', 'setup.cfg', 'tox.ini', 'pytest.ini'

#: The skip pattern
SKIP_PATTERN = re.compile(r'# *noqa\b', re.I).search

# Parse a modelines
MODELINE_RE = re.compile(
    r'^\s*#\s+(?:pylama:)\s*((?:[\w_]*=[^:\n\s]+:?)+)',
    re.I | re.M)

# Setup a logger
LOGGER = logging.getLogger('pylama')
LOGGER.propagate = False
STREAM = logging.StreamHandler(sys.stdout)
LOGGER.addHandler(STREAM)


class _Default(object):  # pylint: disable=too-few-public-methods

    def __init__(self, value=None):
        self.value = value

    def __str__(self):
        return str(self.value)

    def __repr__(self):
        return "<_Default [%s]>" % self.value

Example #24
0
def setup_logging(
    root_log_level: Optional[str] = None,
    module_log_levels: Optional[Mapping[str, Union[str, int]]] = None,
    add_console_handler: bool = True,
    console_handler_format: Optional[str] = None,
    exception_handler: Optional[Callable[[Type[BaseException], BaseException, TracebackType], None]] = default_exc_handler
):
    """Configure and install the log handlers for each application's namespace.

    :param root_log_level: The log level all loggers use by default. (Default: `"WARNING"`)
    :param module_log_levels: A mapping of module names to their desired log levels.
    :param add_console_handler: If `True`, enable logging to stdout. (Default: `True`).
    :param console_handler_format: Specifies the format of stdout logs. (Default: DEFAULT_LOG_FORMAT).
    :param exception_handler: Specifies the exception handler to use after setting up muselog.
        If `None`, do not install an exception handler.
        (Default: default_exc_handler)
    """
    if root_log_level is None:
        root_log_level = "WARNING"

    root_logger = logging.getLogger()
    root_logger.setLevel(root_log_level)

    if module_log_levels:
        for module_name, log_level in module_log_levels.items():
            logging.getLogger(module_name).setLevel(log_level)

    if add_console_handler:
        if os.environ.get("ENABLE_DATADOG_JSON_FORMATTER", "false").lower() == "true":
            from muselog.datadog import DatadogJSONFormatter
            if os.environ.get("DD_TRACE_ENABLED", "false").lower() == "true":
                trace_enabled = True
            elif os.environ.get("DATADOG_TRACE_ENABLED", "false").lower() == "true":
                trace_enabled = True
            else:
                trace_enabled = False
            formatter = DatadogJSONFormatter(trace_enabled=trace_enabled)
        else:
            formatter = logging.Formatter(fmt=console_handler_format or DEFAULT_LOG_FORMAT)
        console_handler = logging.StreamHandler()
        console_handler.setFormatter(formatter)
        if root_logger.handlers:
            # Python's root_logger by default adds a StreamHandler if none is specified.
            # If this is present, we want it tracked and taken out later.
            # We do not to be sent to datadog (remove double entry as
            # it doesn't have the JSON formatter for easy interpretation on Datadog)
            root_logger.removeHandler(root_logger.handlers[0])
        root_logger.addHandler(console_handler)

    # Add datadog handler if log to datadog is enabled
    if "DATADOG_HOST" in os.environ:
        from muselog.datadog import DataDogUdpHandler
        opts = dict(
            host=os.environ["DATADOG_HOST"],
            port=int(os.environ.get("DATADOG_UDP_PORT", 10518))
        )

        datadog_handler = DataDogUdpHandler(**opts)
        root_logger.addHandler(datadog_handler)

    if exception_handler is not None:
        sys.excepthook = exception_handler
Example #25
0
import argparse
import logging
import sys
import numpy as np
from PIL import Image, ImageTk
import tkinter as tk

logger = logging.getLogger('tkcanvasimage')
logger.addHandler(logging.StreamHandler(sys.stdout))


class TkDrawable:
    def __init__(self, scaling_factor=1.0, position=[0, 0]):
        self._canvas = None
        self._canvas_id = []
        self._scaling_factor = scaling_factor
        self._position = position

    def bind_canvas(self, canvas):
        self._canvas = canvas
        self.scaling_factor = canvas.scaling_factor

    @property
    def scaling_factor(self):
        return self._scaling_factor

    @scaling_factor.setter
    def scaling_factor(self, scaling_factor):
        self._scaling_factor = scaling_factor

    @property
Example #26
0
    def __init__(self):
        self.running = True
        self.analyzers: List[SignalAnalyzer] = []
        self.args = Runner.parser.parse_args()

        # logging levels increase in steps of 10, start with warning
        logging_level = max(0, logging.WARN - (self.args.verbose * 10))
        logging_stderr = logging.StreamHandler()
        logging_stderr.setLevel(logging_level)
        logging.basicConfig(level=logging.DEBUG, handlers=[logging_stderr])

        signal.signal(signal.SIGINT, lambda sig, _: self.terminate(sig))
        signal.signal(signal.SIGTERM, lambda sig, _: self.terminate(sig))

        # initialize calibration parameter if unset
        if len(self.args.calibration) == 0:
            self.args.calibration = [0.0] * len(self.args.device)
            logger.info(f"No calibration values supplied, using {self.args.calibration}")
        elif len(self.args.calibration) != len(self.args.device):
            logger.critical(f"Calibration values {self.args.calibration} do not match devices {self.args.device}.")
            exit(1)

        # export configuration
        if self.args.export_config:
            path = f"{self.args.path}/{socket.gethostname()}/radiotracking"
            os.makedirs(path, exist_ok=True)

            ts = datetime.datetime.now()
            config_export_path = f"{path}/{self.args.station}_{ts:%Y-%m-%dT%H%M%S}.ini"
            with open(config_export_path, "w") as config_export_file:
                Runner.parser.write_config(self.args, config_export_file)

        # create process connector
        self.connector = ProcessConnector(**self.args.__dict__)

        # create signal matcher and add to connector queue
        self.matcher = SignalMatcher(signal_queue=self.connector.q, **self.args.__dict__)
        self.connector.consumers.append(self.matcher)

        # add vizualization consumer
        if self.args.dashboard:
            from radiotracking.present import Dashboard

            self.dashboard = Dashboard(**self.args.__dict__)
            self.connector.consumers.append(self.dashboard)
        else:
            self.dashboard = None

        self.schedule = []

        for entry in self.args.schedule:
            start, stop = entry.split("-")

            try:
                start_s = schedule.every().day.at(start)
                stop_s = schedule.every().day.at(stop)

                if start_s.at_time > stop_s.at_time:
                    raise schedule.ScheduleError("Schedule start is after stop")

                start_s.do(self.start_analyzers)
                stop_s.do(self.stop_analyzers)

                # check if there is an overlap with another schedule
                for other_start, other_stop in self.schedule:
                    # if they start before us and don't finish before us
                    if other_start < start_s.at_time and not other_stop < start_s.at_time:
                        raise schedule.ScheduleError(f"Scheduling overlaps with {other_start}-{other_stop}")

                    # if we start before them and do not finish before them
                    if start_s.at_time < other_start:
                        logger.debug("we start before them")
                        if not stop_s.at_time < other_start:
                            logger.debug("we don't finish before them")
                            raise schedule.ScheduleError(f"Scheduling overlaps with {other_start}-{other_stop}")

                self.schedule.append((start_s.at_time, stop_s.at_time))
                logger.debug(f"Added {start_s.at_time}-{stop_s.at_time} to schedule")

            except schedule.ScheduleError as error:
                logger.error(f"{error}, please check configuration '{entry}'.")
                exit(1)
Example #27
0
import logging
import time

import pytest
from helpers.cluster import ClickHouseCluster

logging.getLogger().setLevel(logging.INFO)
logging.getLogger().addHandler(logging.StreamHandler())


@pytest.fixture(scope="module")
def cluster():
    try:
        cluster = ClickHouseCluster(__file__)
        cluster.add_instance("node1",
                             main_configs=["configs/config.d/s3.xml"],
                             macros={'replica': '1'},
                             with_minio=True,
                             with_zookeeper=True)
        cluster.add_instance("node2",
                             main_configs=["configs/config.d/s3.xml"],
                             macros={'replica': '2'},
                             with_minio=True,
                             with_zookeeper=True)
        logging.info("Starting cluster...")
        cluster.start()
        logging.info("Cluster started")

        yield cluster
    finally:
        cluster.shutdown()
Example #28
0
from patterns.equation import Equation
from patterns.fading import Fading
from patterns.fire import Fire
from patterns.firework import FireWork
from patterns.meteor import Meteor
from patterns.music import Music
from patterns.off import Off
from patterns.pulse import Pulse
from patterns.rainbow import Rainbow
from patterns.snow import Snow
from patterns.steady import Steady

pattern_logger = logging.getLogger("pattern_logger")
pattern_logger.setLevel(logging.DEBUG)

handler = logging.StreamHandler(sys.stdout)
handler.setLevel(logging.DEBUG)
formatter = logging.Formatter(
    '%(asctime)s - %(name)s - %(levelname)s - %(message)s\n')
handler.setFormatter(formatter)
pattern_logger.addHandler(handler)

Patterns = dict(
    ColorWipe=ColorWipe,
    Fading=Fading,
    Fire=Fire,
    FireWork=FireWork,
    Meteor=Meteor,
    Off=Off,
    Rainbow=Rainbow,
    Snow=Snow,
Example #29
0
def configure_logger(app):
    """Configure loggers."""
    handler = logging.StreamHandler(sys.stdout)
    if not app.logger.handlers:
        app.logger.addHandler(handler)
Example #30
0
def setup_logging():
    handler = logging.StreamHandler(sys.stdout)
    formatter = logging.Formatter(logging.BASIC_FORMAT, None)
    handler.setFormatter(formatter)
    logger.addHandler(handler)
    logger.setLevel(logging.DEBUG)