Example #1
0
File: cmd.py Project: mbr/githome
def shell(obj, username):
    gh = obj['githome']

    try:
        # get user
        user = gh.get_user_by_name(username)

        log = Logger('githome-shell [{}]'.format(user.name))

        # we've got our user, now authorize him or not
        shell_cmd = shlex.split(os.environ.get('SSH_ORIGINAL_COMMAND', ''))
        log.debug('SSH_ORIGINAL_COMMAND {!r}'.format(shell_cmd))

        if not shell_cmd:
            log.critical('No shell command given')
            abort(1)

        cmd = gh.authorize_command(user, shell_cmd)

        log.debug('Executing {!r}', cmd)

        binary = cmd[0]  # we use path through execlp
    except Exception as e:
        log.error(str(e))
        abort(1)
    else:
        os.execlp(binary, *cmd)
Example #2
0
def dispatch(args):
    """
    Create a sample JSON file for a run, and launch it using Popen.
    """
    dispatch_log = Logger('dispatch')
    # validate that programs in setup file exist
    setup_params = json.load(args.setup)
    validate_setupfile(setup_params)

    # validate reference
    validate_reference(args.ref)

    # validate directories
    validate_directory(args.log, dispatch_log)
    validate_directory(args.stats, dispatch_log)
    validate_directory(args.bam_dir, dispatch_log)

    if (args.scythe and args.adapter is None) or (args.adapter is not None and not os.path.isfile(args.adapter)):
        logger.critical("adapter file for Scythe no specified, or does not exist.")
        sys.exit(1)

    # create sample config JSON file, starting off with global config passed through args
    global_sample_config = dict(reference=args.ref, adapters_file=args.adapter,
                                prior=str(args.prior), error=args.trim_error,
                                stats_dir=args.stats, nthreads=args.threads,
                                mem=args.mem, bam_dir=args.bam_dir)

    # which preprocess steps to use
    global_sample_config["preprocess-steps"] = list()
    for step in PREPROCESSING_STEPS:
        if step in args and args.__getattribute__(step):
            global_sample_config["preprocess-steps"].append(step)

    global_params = dict(global_sample_config.items() + setup_params.items())
    sample_config = "%s_samples.txt" % args.job
    samples = create_sample_config(args.samples, sample_config, global_params)

    # create batch script
    sbatch_params = {"log_dir":args.log, "jobname":args.job, "nthreads":args.threads,
                    "mem":args.mem, "nsamples":len(samples), "sample_dispatch_py":__file__,
                    "sample_config":sample_config, "partition":args.partition}
    batch_script = safe_templater(SLURM_BATCH, sbatch_params)
    batch_file = "%s_batch.sh" % args.job
    with open(batch_file, 'w') as f:
        f.write(batch_script)

    if not args.dry_run:
        # now, start the batch script
        dispatch_log.info("submitting sbatch script '%s'." % batch_file)
        sbatch_cmd = ["sbatch"]

        if args.email is not None:
            sbatch_cmd.extend(["--mail-type", "ALL"])
            sbatch_cmd.extend(["--mail-user", args.email])
        sbatch_cmd.append(batch_file)
        retcode = check_call(sbatch_cmd)
        if retcode != 0:
            dispatch_log.critical("submitting batch script '%s' exited abnormally with return code %d." % (batch_file, retcode))
            sys.exit(retcode.returncode)
        dispatch_log.info("submitting sbatch script '%s' complete." % batch_file)
Example #3
0
class CustomLogger(object):
    def __init__(
        self,
        log_level=LogLevel.INFO,
        format_str='[{record.time:%Y-%m-%d %H:%M:%S}] - {record.channel} - {record.level_name} '
        '- {record.message}'):
        self.logger = Logger('WindAdapter')
        set_datetime_format('local')
        StreamHandler(sys.stdout, format_string=format_str).push_application()
        FileHandler('WindAdapter.log', bubble=True,
                    format_string=format_str).push_application()
        self.set_level(log_level)

    def set_level(self, log_level):
        if log_level.lower() == LogLevel.INFO:
            self.logger.level = logbook.INFO
        elif log_level.lower() == LogLevel.WARNING:
            self.logger.level = logbook.WARNING
        elif log_level.lower() == LogLevel.CRITICAL:
            self.logger.level = logbook.CRITICAL
        elif log_level.lower() == LogLevel.NOTSET:
            self.logger.level = logbook.NOTSET

    def info(self, msg):
        self.logger.info(msg)

    def warning(self, msg):
        self.logger.warning(msg)

    def critical(self, msg):
        self.logger.critical(msg)
Example #4
0
def runner(args):
    """
    Run a sample through an NGS pipeline (as a command) using Popen.
    TOOD: logging, timing.
    """
    if args.config is None:
        sample_params = json.loads(sys.stdin.readline().rstrip())
    else:
        # read the first line from the test/debug config file
        sample_params = json.loads(args.config.readline().rstrip())

    sample = sample_params["sample_id"]
    runner_log = Logger("%s logger" % sample)

    cmd = safe_templater(SAMPLE_PROCESS_CMD, sample_params)
    runner_log.info("%s starting preprocessing and alignment of sample." %
                    sample)
    if args.dry_run:
        runner_log.debug("%s command: %s" % (sample, cmd))
        return
    tstart = time.time()
    p = Popen(cmd, shell=True, executable=find_bash())
    p.wait()
    if p.returncode != 0:
        # make this as loud as possible so Slurm can handle it
        runner_log.critical("%s exited abnormally with return code %d." %
                            (sample, p.returncode))
        sys.exit(p.returncode)
    tend = time.time()
    elapsed = tend - tstart
    runner_log.info("%s completed preprocessing and alignment in %s seconds." %
                    (sample, str(round(elapsed, 5))))
Example #5
0
def runner(args):
    """
    Run a sample through an NGS pipeline (as a command) using Popen.
    TOOD: logging, timing.
    """
    if args.config is None:
        sample_params = json.loads(sys.stdin.readline().rstrip())
    else:
        # read the first line from the test/debug config file
        sample_params = json.loads(args.config.readline().rstrip())

    sample = sample_params["sample_id"]
    runner_log = Logger("%s logger" % sample)

    cmd = safe_templater(SAMPLE_PROCESS_CMD, sample_params)
    runner_log.info("%s starting preprocessing and alignment of sample." % sample)
    if args.dry_run:
        runner_log.debug("%s command: %s" % (sample, cmd))
        return
    tstart = time.time()
    p = Popen(cmd, shell=True, executable=find_bash())
    p.wait()
    if p.returncode != 0:
        # make this as loud as possible so Slurm can handle it
        runner_log.critical("%s exited abnormally with return code %d." % (sample, p.returncode))
        sys.exit(p.returncode)
    tend = time.time()
    elapsed = tend - tstart
    runner_log.info("%s completed preprocessing and alignment in %s seconds." % (sample, str(round(elapsed, 5))))
Example #6
0
def test2():
    log = Logger('Logbook-test-2')
    log.critical("critical")
    log.error("error")
    log.warn("warn")
    log.notice("notice")
    log.info("test")
    log.debug("debug")
Example #7
0
def dispatch(args):
    """
    Create a sample JSON file for a run, and launch it using Popen.
    """
    dispatch_log = Logger("dispatch")
    # validate that programs in setup file exist
    setup_params = json.load(args.setup)
    validate_setupfile(setup_params)

    # validate reference
    validate_reference(args.ref)

    # validate directories
    validate_directory(args.log, dispatch_log)
    validate_directory(args.stats, dispatch_log)
    validate_directory(args.bam_dir, dispatch_log)

    # create sample config JSON file, starting off with global config passed through args
    global_sample_config = dict(
        reference=args.ref,
        adapters_file=args.adapter,
        prior=str(args.prior),
        error=args.trim_error,
        stats_dir=args.stats,
        nthreads=args.threads,
        mem=args.mem,
        bam_dir=args.bam_dir,
    )
    global_params = dict(global_sample_config.items() + setup_params.items())
    sample_config = "%s_samples.txt" % args.job
    samples = create_sample_config(args.samples, sample_config, global_params)

    # create batch script
    sbatch_params = {
        "log_dir": args.log,
        "jobname": args.job,
        "nthreads": args.threads,
        "mem": args.mem,
        "nsamples": len(samples),
        "sample_dispatch_py": __file__,
        "sample_config": sample_config,
        "partition": args.partition,
    }
    batch_script = safe_templater(SLURM_BATCH, sbatch_params)
    batch_file = "%s_batch.sh" % args.job
    with open(batch_file, "w") as f:
        f.write(batch_script)

    if not args.dry_run:
        # now, start the batch script
        dispatch_log.info("submitting sbatch script '%s'." % batch_file)
        retcode = check_call(["sbatch", batch_file])
        if retcode != 0:
            dispatch_log.critical(
                "submitting batch script '%s' exited abnormally with return code %d." % (batch_file, retcode)
            )
            sys.exit(retcode.returncode)
        dispatch_log.critical("submitting sbatch script '%s' complete." % batch_file)
Example #8
0
def dispatch(args):
    """
    Create a sample JSON file for a run, and launch it using Popen.
    """
    dispatch_log = Logger('dispatch')
    # validate that programs in setup file exist
    setup_params = json.load(args.setup)
    validate_setupfile(setup_params)

    # validate reference
    validate_reference(args.ref)

    # validate directories
    validate_directory(args.log, dispatch_log)
    validate_directory(args.stats, dispatch_log)
    validate_directory(args.bam_dir, dispatch_log)

    # create sample config JSON file, starting off with global config passed through args
    global_sample_config = dict(reference=args.ref,
                                adapters_file=args.adapter,
                                prior=str(args.prior),
                                error=args.trim_error,
                                stats_dir=args.stats,
                                nthreads=args.threads,
                                mem=args.mem,
                                bam_dir=args.bam_dir)
    global_params = dict(global_sample_config.items() + setup_params.items())
    sample_config = "%s_samples.txt" % args.job
    samples = create_sample_config(args.samples, sample_config, global_params)

    # create batch script
    sbatch_params = {
        "log_dir": args.log,
        "jobname": args.job,
        "nthreads": args.threads,
        "mem": args.mem,
        "nsamples": len(samples),
        "sample_dispatch_py": __file__,
        "sample_config": sample_config,
        "partition": args.partition
    }
    batch_script = safe_templater(SLURM_BATCH, sbatch_params)
    batch_file = "%s_batch.sh" % args.job
    with open(batch_file, 'w') as f:
        f.write(batch_script)

    if not args.dry_run:
        # now, start the batch script
        dispatch_log.info("submitting sbatch script '%s'." % batch_file)
        retcode = check_call(["sbatch", batch_file])
        if retcode != 0:
            dispatch_log.critical(
                "submitting batch script '%s' exited abnormally with return code %d."
                % (batch_file, retcode))
            sys.exit(retcode.returncode)
        dispatch_log.critical("submitting sbatch script '%s' complete." %
                              batch_file)
Example #9
0
                            "name"] + " : " + channel_info["channel"][
                                "purpose"]["value"]
                    sc.api_call("chat.postMessage",
                                channel=room,
                                text=text.encode("utf-8"),
                                link_names="1",
                                as_user="******")
                    time.sleep(1)
                elif is_direct_message(
                        res,
                        json.loads(sc.api_call("im.list"))["ims"]):
                    channels = json.loads(
                        sc.api_call("channels.list", exclude_archived="1"))
                    text = convert_channels_to_text(channels)
                    sc.api_call("chat.postMessage",
                                channel=res["channel"],
                                text=text,
                                link_names="1",
                                as_user="******")
                    time.sleep(1)
                elif is_emoji_changed_event(res):
                    text = "New Stamp!`:{}:`:sparkles:\nOriginal: {}".format(
                        res["name"], res["value"])
                    sc.api_call("chat.postMessage",
                                channel=room,
                                text=text,
                                as_user="******")

else:
    logger.critical("Connection Failed, invalid token?")
Example #10
0
pyfalog.debug('Getting gamedata version')
# This should be moved elsewhere, maybe as an actual query. Current, without try-except, it breaks when making a new
# game db because we haven't reached gamedata_meta.create_all()
try:
    config.gamedata_version = gamedata_session.execute(
        "SELECT `field_value` FROM `metadata` WHERE `field_name` LIKE 'client_build'"
    ).fetchone()[0]
    config.gamedata_date = gamedata_session.execute(
        "SELECT `field_value` FROM `metadata` WHERE `field_name` LIKE 'dump_time'"
    ).fetchone()[0]
except (KeyboardInterrupt, SystemExit):
    raise
except Exception as e:
    pyfalog.warning("Missing gamedata version.")
    pyfalog.critical(e)
    config.gamedata_version = None
    config.gamedata_date = None

pyfalog.debug('Initializing saveddata')
saveddata_connectionstring = config.saveddata_connectionstring
if saveddata_connectionstring is not None:
    if callable(saveddata_connectionstring):
        saveddata_engine = create_engine(creator=saveddata_connectionstring,
                                         echo=config.debug)
    else:
        saveddata_engine = create_engine(saveddata_connectionstring,
                                         echo=config.debug)

    saveddata_meta = MetaData()
    saveddata_meta.bind = saveddata_engine
Example #11
0
    logger.info('users loaded successfullly')
    #Attempt to load the webdriver
    try:
        logger.info('starting webdriver...')
        '''
            For windows users: Replace line 31 with 28 and 29
            Ensure that geckodriver is in path i.e 'C:\Users\USERNAME\geckodriver.exe'
            Edit to use chromedriver or any other webdriver available
            path = os.path.join(os.getenv('USERPROFILE'), 'geckdriver.exe')
            driver = webdriver.Firefox(executable_path=path)
        '''
        driver = webdriver.Firefox()
        logger.info('wedriver started successfully')

    except Exception as exception:
        logger.critical(type(exception).__name__+' Exception occured when loading webdriver')
        try:
            driver.quit()
        except NameError:
            pass
        exit(0)
    #Attempt to login users into url
    try:   
        for user in users:
            url = 'https://www.imlango.co.ke'
            logger.info('loading '+url)
            logger.info('Please wait this might take a while...')
            driver.get(url)
            email = driver.find_element_by_id('TxtUsername1')
            email.send_keys(user)
            password = driver.find_element_by_id('TxtPassword1')
Example #12
0
                return
            except urllib2.HTTPError, e:
                logger.error('unlock HTTPError: %s, key %r, retries left %s' %
                             (e, key, num_retries))
                if e.code == 417:  # paxos failed, retry
                    num_retries -= 1
                    time.sleep(random.random())
                else:
                    logger.critical('unlock failed because exception')
                    raise
            except urllib2.URLError, e:
                logger.error('lock URLError: %s, key %r, retries left %s' %
                             (e, key, num_retries))
                num_retries -= 1
                time.sleep(random.random())
        logger.critical('unlock failed after retries')
        raise


def test(servers):
    d = defaultdict(list)
    seed = int(time.time())
    print 'RANDOM SEED: %s' % seed
    random.seed(seed)

    def watch_on_progress(exit_event):
        done = len(d[0])
        total = NUM_ITERATIONS * NUM_WORKERS
        bar_length = 40
        previous_progress = 0
        time_since_progress = time.time()
Example #13
0
class PmLogHandler(log.CementLogHandler):  
    """
    PmLogHandler - override CementLogHandler to use logbook.

    This class uses the same configuration options as 
    :ref:`LoggingLogHandler <cement.ext.ext_logging>`
    """
    
    class Meta:
        interface = log.ILog
        """The interface that this class implements."""

        label = 'pmlog'
        """The string identifier of this handler."""

        namespace = "pm"
        """
        The logging namespace.  
        
        Note: Although Meta.namespace defaults to None, Cement will set this 
        to the application label (CementApp.Meta.label) if not set during
        setup.
        """

        file_format = "{record.time} ({record.level_name}) {record.channel} : {record.message}"
        """The logging format for the file logger."""

        console_format = "{record.time:%Y-%m-%d %H:%M} ({record.level_name}): {record.message}"
        """The logging format for the consoler logger."""

        debug_format = "{record.time} ({record.level_name}) {record.channel} : {record.message}"
        """The logging format for both file and console if ``debug==True``."""

        log_setup = None
        """Nested log setup placeholder"""

        level = 0
        """Global level for handlers"""

        clear_loggers = True
        """Whether of not to clear previous loggers first."""
        # These are the default config values, overridden by any '[log]' 
        # section in parsed config files.
        config_section = 'log'
        """
        The section of the application configuration that holds this handlers
        configuration.
        """
        
        config_defaults = dict(
            file=None,
            level='INFO',
            to_console=True,
            rotate=False,
            max_bytes=512000,
            max_files=4,
            )
        """
        The default configuration dictionary to populate the ``log`` section.
        """
            
    levels = ['INFO', 'WARN', 'ERROR', 'DEBUG', 'FATAL']


    def __init__(self, *args, **kw):
        super(PmLogHandler, self).__init__(*args, **kw)
        self.app = None
        
    def _setup(self, app_obj):
        super(PmLogHandler, self)._setup(app_obj)
        if self._meta.namespace is None:
            self._meta.namespace = self.app._meta.label

        self.backend = Logger(self._meta.namespace)

        # hack for application debugging
        if is_true(self.app._meta.debug):
            self.app.config.set('log', 'level', 'DEBUG')
            
        # Mainly for backwards compatibility since Logger level should
        # be NOTSET (level 0). Output level is controlled by handlers
        self.set_level(self.app.config.get('log', 'level'))
        
        # clear loggers?
        if is_true(self._meta.clear_loggers):
            self.clear_loggers()
            
        # console
        if is_true(self.app.config.get('log', 'to_console')):
            self._setup_console_log()
        
        # file
        if self.app.config.get('log', 'file'):
            self._setup_file_log()
        # nested setup
        self.backend.handlers.append(logbook.NullHandler(bubble=False))
        self.log_setup = logbook.NestedSetup(self.backend.handlers)
        with self._console_handler.applicationbound():
            self.debug("logging initialized for '%s' using PmLogHandler" % \
                           self._meta.namespace)


    def set_level(self, level):
        """
        Set the log level.  Must be one of the log levels configured in 
        self.levels which are ``['INFO', 'WARN', 'ERROR', 'DEBUG', 'FATAL']``.
        
        :param level: The log level to set.
        
        """
        level = level.upper()
        if level not in self.levels:
            level = 'INFO'
        level = logbook.lookup_level(level.upper())
        self.level = level
        
    def get_level(self):
        """Returns a string representation of the current log level."""
        return logbook.get_level_name(self.level)

    def _setup_console_log(self):
        """Add a console log handler."""
        if logbook.lookup_level(self.get_level()) == logbook.DEBUG:
            fmt_string = self._meta.debug_format
        else:
            fmt_string = self._meta.console_format
        console_handler = logbook.StderrHandler(
            format_string=fmt_string,
            level = logbook.lookup_level(self.get_level()),
            bubble = True)
        self._console_handler = console_handler
        self.backend.handlers.append(console_handler)

    def _setup_file_log(self):
        """Add a file log handler."""
        file_path = os.path.expandvars(fs.abspath(self.app.config.get('log', 'file')))
        log_dir = os.path.dirname(file_path)
        if not os.path.exists(log_dir):
            os.makedirs(log_dir)
        if logbook.lookup_level(self.get_level()) == logbook.DEBUG:
            fmt_string = self._meta.debug_format
        else:
            fmt_string = self._meta.file_format

        if self.app.config.get('log', 'rotate'):
            from logbook import RotatingFileHandler
            file_handler = RotatingFileHandler(
                file_path, 
                max_size=int(self.app.config.get('log', 'max_bytes')), 
                backup_count=int(self.app.config.get('log', 'max_files')),
                format_string=fmt_string,
                level = logbook.lookup_level(self.get_level()),
                bubble = True,
                )
        else:
            from logbook import FileHandler
            file_handler = FileHandler(file_path,
                                       format_string=fmt_string,
                                       level = logbook.lookup_level(self.get_level()),
                                       bubble = True,
                                       )
        
        self._file_handler = file_handler
        self.backend.handlers.append(file_handler)

    def _get_logging_kwargs(self, namespace, **kw):
        if namespace is None:
            namespace = self._meta.namespace
        if 'extra' in kw.keys() and 'namespace' in kw['extra'].keys():
            pass
        elif 'extra' in kw.keys() and 'namespace' not in kw['extra'].keys():
            kw['extra']['namespace'] = namespace
        else:
            kw['extra'] = dict(namespace=namespace)
        
        return kw

    def info(self, msg, namespace=None, **kw):
        """
        Log to the INFO facility.
        
        :param msg: The message the log.
        :param namespace: A log prefix, generally the module ``__name__`` that 
            the log is coming from.  Will default to self._meta.namespace if 
            None is passed.
        :keyword kw: Keyword arguments are passed on to the backend logging 
            system.
            
        """
        kwargs = self._get_logging_kwargs(namespace, **kw)
        self.backend.info(msg, **kwargs)

    def debug(self, msg, namespace=None, **kw):
        """
        Log to the DEBUG facility.
        
        :param msg: The message the log.
        :param namespace: A log prefix, generally the module ``__name__`` that 
            the log is coming from.  Will default to self._meta.namespace if 
            None is passed.  For debugging, it can be useful to set this to 
            ``__file__``, though ``__name__`` is much less verbose.
        :keyword kw: Keyword arguments are passed on to the backend logging 
            system.
                        
        """
        kwargs = self._get_logging_kwargs(namespace, **kw)
        self.backend.debug(msg, **kwargs)

    def warn(self, msg, namespace=None, **kw):
        """
        Log to the WARN facility.
        
        :param msg: The message the log.
        :param namespace: A log prefix, generally the module ``__name__`` that 
            the log is coming from.  Will default to self._meta.namespace if 
            None is passed.  For debugging, it can be useful to set this to 
            ``__file__``, though ``__name__`` is much less verbose.
        :keyword kw: Keyword arguments are passed on to the backend logging 
            system.
                        
        """
        kwargs = self._get_logging_kwargs(namespace, **kw)
        self.backend.warn(msg, **kwargs)

    def critical(self, msg, namespace=None, **kw):
        """
        Log to the CRITICAL facility.
        
        :param msg: The message the log.
        :param namespace: A log prefix, generally the module ``__name__`` that 
            the log is coming from.  Will default to self._meta.namespace if 
            None is passed.  For debugging, it can be useful to set this to 
            ``__file__``, though ``__name__`` is much less verbose.
        :keyword kw: Keyword arguments are passed on to the backend logging 
            system.
                        
        """
        kwargs = self._get_logging_kwargs(namespace, **kw)
        self.backend.critical(msg, **kwargs)

    def fatal(self, msg, namespace=None, **kw):
        """
        Log to the FATAL facility.
        
        :param msg: The message the log.
        :param namespace: A log prefix, generally the module ``__name__`` that 
            the log is coming from.  Will default to self._meta.namespace if 
            None is passed.  For debugging, it can be useful to set this to 
            ``__file__``, though ``__name__`` is much less verbose.
        :keyword kw: Keyword arguments are passed on to the backend logging 
            system.
                        
        """
        kwargs = self._get_logging_kwargs(namespace, **kw)
        self.backend.fatal(msg, **kwargs)

    def error(self, msg, namespace=None, **kw):
        """
        Log to the ERROR facility.
        
        :param msg: The message the log.
        :param namespace: A log prefix, generally the module ``__name__`` that 
            the log is coming from.  Will default to self._meta.namespace if 
            None is passed.  For debugging, it can be useful to set this to 
            ``__file__``, though ``__name__`` is much less verbose.
        :keyword kw: Keyword arguments are passed on to the backend logging 
            system.
                        
            """
        kwargs = self._get_logging_kwargs(namespace, **kw)
        self.backend.error(msg, **kwargs)
    
    ## NOTE: do we even need this for logbook?
    def clear_loggers(self):
        """Clear any previously configured logging namespaces.
        """
        if not self._meta.namespace:
            # _setup() probably wasn't run
            return
        self.handlers = []
Example #14
0
        if not os.path.exists(dir_path):
            os.makedirs(dir_path)
        RotatingFileHandler(logfile,
                            mode='a',
                            encoding='utf-8',
                            level=file_level,
                            format_string=format_string,
                            delay=False,
                            max_size=max_size,
                            backup_count=backup_count,
                            filter=None,
                            bubble=True).push_application()

    return None


if __name__ == '__main__':
    from ziyan.utils.util import get_conf
    from logbook import Logger

    conf = get_conf('../text_file/ziyan-main-conf.toml')['log_configuration']
    setup_logger(conf)
    log = Logger('test')
    log.debug(conf)
    log.debug('debug:test')
    log.info('info:test')
    log.notice('notice:test')
    log.warn('warning:test')
    log.error('error:test')
    log.critical('critical:test')
Example #15
0
class PmLogHandler(log.CementLogHandler):
    """
    PmLogHandler - override CementLogHandler to use logbook.

    This class uses the same configuration options as 
    :ref:`LoggingLogHandler <cement.ext.ext_logging>`
    """
    class Meta:
        interface = log.ILog
        """The interface that this class implements."""

        label = 'pmlog'
        """The string identifier of this handler."""

        namespace = "pm"
        """
        The logging namespace.  
        
        Note: Although Meta.namespace defaults to None, Cement will set this 
        to the application label (CementApp.Meta.label) if not set during
        setup.
        """

        file_format = "{record.time} ({record.level_name}) {record.channel} : {record.message}"
        """The logging format for the file logger."""

        console_format = "{record.time:%Y-%m-%d %H:%M} ({record.level_name}): {record.message}"
        """The logging format for the consoler logger."""

        debug_format = "{record.time} ({record.level_name}) {record.channel} : {record.message}"
        """The logging format for both file and console if ``debug==True``."""

        log_setup = None
        """Nested log setup placeholder"""

        level = 0
        """Global level for handlers"""

        clear_loggers = True
        """Whether of not to clear previous loggers first."""
        # These are the default config values, overridden by any '[log]'
        # section in parsed config files.
        config_section = 'log'
        """
        The section of the application configuration that holds this handlers
        configuration.
        """

        config_defaults = dict(
            file=None,
            level='INFO',
            to_console=True,
            rotate=False,
            max_bytes=512000,
            max_files=4,
        )
        """
        The default configuration dictionary to populate the ``log`` section.
        """

    levels = ['INFO', 'WARN', 'ERROR', 'DEBUG', 'FATAL']

    def __init__(self, *args, **kw):
        super(PmLogHandler, self).__init__(*args, **kw)
        self.app = None

    def _setup(self, app_obj):
        super(PmLogHandler, self)._setup(app_obj)
        if self._meta.namespace is None:
            self._meta.namespace = self.app._meta.label

        self.backend = Logger(self._meta.namespace)

        # hack for application debugging
        if is_true(self.app._meta.debug):
            self.app.config.set('log', 'level', 'DEBUG')

        # Mainly for backwards compatibility since Logger level should
        # be NOTSET (level 0). Output level is controlled by handlers
        self.set_level(self.app.config.get('log', 'level'))

        # clear loggers?
        if is_true(self._meta.clear_loggers):
            self.clear_loggers()

        # console
        if is_true(self.app.config.get('log', 'to_console')):
            self._setup_console_log()

        # file
        if self.app.config.get('log', 'file'):
            self._setup_file_log()
        # nested setup
        self.backend.handlers.append(logbook.NullHandler(bubble=False))
        self.log_setup = logbook.NestedSetup(self.backend.handlers)
        with self._console_handler.applicationbound():
            self.debug("logging initialized for '%s' using PmLogHandler" % \
                           self._meta.namespace)

    def set_level(self, level):
        """
        Set the log level.  Must be one of the log levels configured in 
        self.levels which are ``['INFO', 'WARN', 'ERROR', 'DEBUG', 'FATAL']``.
        
        :param level: The log level to set.
        
        """
        level = level.upper()
        if level not in self.levels:
            level = 'INFO'
        level = logbook.lookup_level(level.upper())
        self.level = level

    def get_level(self):
        """Returns a string representation of the current log level."""
        return logbook.get_level_name(self.level)

    def _setup_console_log(self):
        """Add a console log handler."""
        if logbook.lookup_level(self.get_level()) == logbook.DEBUG:
            fmt_string = self._meta.debug_format
        else:
            fmt_string = self._meta.console_format
        console_handler = logbook.StderrHandler(format_string=fmt_string,
                                                level=logbook.lookup_level(
                                                    self.get_level()),
                                                bubble=True)
        self._console_handler = console_handler
        self.backend.handlers.append(console_handler)

    def _setup_file_log(self):
        """Add a file log handler."""
        file_path = os.path.expandvars(
            fs.abspath(self.app.config.get('log', 'file')))
        log_dir = os.path.dirname(file_path)
        if not os.path.exists(log_dir):
            os.makedirs(log_dir)
        if logbook.lookup_level(self.get_level()) == logbook.DEBUG:
            fmt_string = self._meta.debug_format
        else:
            fmt_string = self._meta.file_format

        if self.app.config.get('log', 'rotate'):
            from logbook import RotatingFileHandler
            file_handler = RotatingFileHandler(
                file_path,
                max_size=int(self.app.config.get('log', 'max_bytes')),
                backup_count=int(self.app.config.get('log', 'max_files')),
                format_string=fmt_string,
                level=logbook.lookup_level(self.get_level()),
                bubble=True,
            )
        else:
            from logbook import FileHandler
            file_handler = FileHandler(
                file_path,
                format_string=fmt_string,
                level=logbook.lookup_level(self.get_level()),
                bubble=True,
            )

        self._file_handler = file_handler
        self.backend.handlers.append(file_handler)

    def _get_logging_kwargs(self, namespace, **kw):
        if namespace is None:
            namespace = self._meta.namespace
        if 'extra' in kw.keys() and 'namespace' in kw['extra'].keys():
            pass
        elif 'extra' in kw.keys() and 'namespace' not in kw['extra'].keys():
            kw['extra']['namespace'] = namespace
        else:
            kw['extra'] = dict(namespace=namespace)

        return kw

    def info(self, msg, namespace=None, **kw):
        """
        Log to the INFO facility.
        
        :param msg: The message the log.
        :param namespace: A log prefix, generally the module ``__name__`` that 
            the log is coming from.  Will default to self._meta.namespace if 
            None is passed.
        :keyword kw: Keyword arguments are passed on to the backend logging 
            system.
            
        """
        kwargs = self._get_logging_kwargs(namespace, **kw)
        self.backend.info(msg, **kwargs)

    def debug(self, msg, namespace=None, **kw):
        """
        Log to the DEBUG facility.
        
        :param msg: The message the log.
        :param namespace: A log prefix, generally the module ``__name__`` that 
            the log is coming from.  Will default to self._meta.namespace if 
            None is passed.  For debugging, it can be useful to set this to 
            ``__file__``, though ``__name__`` is much less verbose.
        :keyword kw: Keyword arguments are passed on to the backend logging 
            system.
                        
        """
        kwargs = self._get_logging_kwargs(namespace, **kw)
        self.backend.debug(msg, **kwargs)

    def warn(self, msg, namespace=None, **kw):
        """
        Log to the WARN facility.
        
        :param msg: The message the log.
        :param namespace: A log prefix, generally the module ``__name__`` that 
            the log is coming from.  Will default to self._meta.namespace if 
            None is passed.  For debugging, it can be useful to set this to 
            ``__file__``, though ``__name__`` is much less verbose.
        :keyword kw: Keyword arguments are passed on to the backend logging 
            system.
                        
        """
        kwargs = self._get_logging_kwargs(namespace, **kw)
        self.backend.warn(msg, **kwargs)

    def critical(self, msg, namespace=None, **kw):
        """
        Log to the CRITICAL facility.
        
        :param msg: The message the log.
        :param namespace: A log prefix, generally the module ``__name__`` that 
            the log is coming from.  Will default to self._meta.namespace if 
            None is passed.  For debugging, it can be useful to set this to 
            ``__file__``, though ``__name__`` is much less verbose.
        :keyword kw: Keyword arguments are passed on to the backend logging 
            system.
                        
        """
        kwargs = self._get_logging_kwargs(namespace, **kw)
        self.backend.critical(msg, **kwargs)

    def fatal(self, msg, namespace=None, **kw):
        """
        Log to the FATAL facility.
        
        :param msg: The message the log.
        :param namespace: A log prefix, generally the module ``__name__`` that 
            the log is coming from.  Will default to self._meta.namespace if 
            None is passed.  For debugging, it can be useful to set this to 
            ``__file__``, though ``__name__`` is much less verbose.
        :keyword kw: Keyword arguments are passed on to the backend logging 
            system.
                        
        """
        kwargs = self._get_logging_kwargs(namespace, **kw)
        self.backend.fatal(msg, **kwargs)

    def error(self, msg, namespace=None, **kw):
        """
        Log to the ERROR facility.
        
        :param msg: The message the log.
        :param namespace: A log prefix, generally the module ``__name__`` that 
            the log is coming from.  Will default to self._meta.namespace if 
            None is passed.  For debugging, it can be useful to set this to 
            ``__file__``, though ``__name__`` is much less verbose.
        :keyword kw: Keyword arguments are passed on to the backend logging 
            system.
                        
            """
        kwargs = self._get_logging_kwargs(namespace, **kw)
        self.backend.error(msg, **kwargs)

    ## NOTE: do we even need this for logbook?
    def clear_loggers(self):
        """Clear any previously configured logging namespaces.
        """
        if not self._meta.namespace:
            # _setup() probably wasn't run
            return
        self.handlers = []
Example #16
0
File: cmd.py Project: mbr/scotch
def main_scotch_deploy():
    log = Logger('main')

    parser = ArgumentParser()
    parser.add_argument('-c', '--configuration-file',
                        action='append', default=[],
                        help='Configuration files to search. Can be given '
                             'multiple times, default is {!r}'
                             .format(Site.DEFAULT_CONFIGURATION_PATHS))
    parser.add_argument('-d', '--debug', default=False, action='store_true')
    subparsers = parser.add_subparsers(dest='action',
                                       help='Action to perform')

    cmd_list = subparsers.add_parser('list', help='List available apps')

    cmd_deploy = subparsers.add_parser('deploy', help='Deploy app')
    cmd_deploy.add_argument('app_name', nargs='+')
    cmd_dump = subparsers.add_parser('dump', help='Dump app configuration')
    cmd_dump.add_argument('app_name', nargs='+')

    args = parser.parse_args()

    # set up logging handlers
    if not args.debug:
        NullHandler(level=logbook.DEBUG).push_application()
        handler = StderrHandler(level=logbook.INFO)
        handler.format_string = '{record.message}'
        handler.push_application()


    wd = Site(args)

    # set site-umask
    umask = int(wd.config['site']['umask'], 8)
    log.debug('Setting umask to {:04o}'.format(umask))
    os.umask(umask)

    def _header(s):
        print(s)
        print('=' * len(s))

    # commands:
    def list():
        wd.load_apps()

        for name, app in sorted(wd.apps.items()):
            print(name)
            for domain in sorted(app.domains):
                print('  {}{}'.format(domain, app.url_prefix))

    def deploy():
        for name in args.app_name:
            app = wd.load_app(name)
            app.deploy()

    def dump():
        for name in args.app_name:
            app = wd.load_app(name)
            app.config['app']['instance_id'] = '(INSTANCE_ID)'

            # dump config
            _header('App configuration for {}'.format(name))
            for section_name, section in sorted(app.config.items()):
                for key, value in sorted(section.items()):
                    print('{}:{} = {!r}'.format(section_name, key,  value))
                print

    # call appropriate command
    try:
        locals()[args.action]()
    except subprocess.CalledProcessError as e:
        log.critical('Command failed: {}'.format(' '.join(e.cmd)))
Example #17
0
class Pixie(Bot):

    def __init__(self, *args, **kwargs):
        super().__init__(command_prefix=when_mentioned_or(setup_file["discord"]["command_prefix"]),
                         description="A bot for weebs programmed by Recchan")

        # Set a custom user agent for Pixie
        self.http.user_agent = user_agent

        # Logging setup
        redirect_logging()
        StreamHandler(sys.stderr).push_application()
        self.logger = Logger("Pixie")
        self.logger.level = getattr(logbook, setup_file.get("log_level", "INFO"), logbook.INFO)
        logging.root.setLevel(self.logger.level)

    async def on_ready(self):
        self.logger.info("Logged in as Bot Name: {0.user.name} Bot ID: {0.user.id}".format(self))

    async def on_command_error(self, exception, ctx):
        print(exception)
        if isinstance(exception, commands.errors.CommandNotFound):
            return
        if isinstance(exception, commands.errors.CheckFailure):
            await self.send_message(ctx.message.channel, "You don't have the required permissions to run this command.")
            return
        # if is_owner(ctx):
        #     try:
        #         # Get a string of the traceback
        #         trace = "".join(traceback.format_tb(exception.__traceback__))
        #         # Send that string as the data to hastebin
        #         msg = await hastebin(trace)
        #         # Send the link of the hastebin to discord
        #         await self.send_message(ctx.message.channel, msg)
        #     # Error raised when the hastebin fails
        #     except FailedHaste:
        #         await self.send_message(ctx.message.channel, "Failed to make hastebin.")

    async def on_member_join(self, member):
        # Auto roles people in the Mahouka (Onii-sama) server with the role "Member"
        if member.server.id == '209121677148160000':
            await bot.say("Hey {0.name}, welcome to {0.server.name}".format(member))
            role = discord.utils.get(member.server.roles, name="Member")
            await bot.add_roles(member, role)

    async def on_voice_state_update(self, before, after):
        # If nothing changes just exit out of the function
        if before.voice.voice_channel == after.voice.voice_channel:
            return
        # Exit on channel being None as it errors if Pixie isn't in a voice channel
        if not after.server.me.voice_channel:
            return
        # Checks the length of the list of members in the voice channel
        if len(after.server.me.voice.voice_channel.voice_members) == 1:
            # Get the VoiceClient object
            voice = self.voice_client_in(after.server)
            # Disconnect the VoiceClient and close the stream
            await voice.disconnect()

    def run(self):
        # We load plugins in run rather than on_ready due to on_ready being able to be called multiple times
        for plugin in plugins:
            # We try to load the extension, and we account for if it fails
            try:
                self.load_extension(plugin)
                self.logger.info("{0} has been loaded".format(plugin))
            # Except discord.ClientException so it doesn't fail to load all cogs when a cog doesn't have a setup function
            except discord.ClientException:
                self.logger.critical("{0} does not have a setup function!".format(plugin))
            # Except import error (importlib raises this) so bot doesn't crash when it's raised
            except ImportError as IE:
                self.logger.critical(IE)
        # We check if discord.opus is loaded, despite it not having a reason to be
        if not discord.opus.is_loaded():
            # Load discord.opus so we can use voice
            discord.opus.load_opus()
            self.logger.info("Opus has been loaded")
        super().run(setup_file["discord"]["token"])
Example #18
0
    logger.info("channel-bot is up")

    while True:
        response = sc.rtm_read()
        for res in response:
            logger.info(res)
            if "type" in res:
                if is_channels_message(res, json.loads(sc.api_call("channels.list", exclude_archived="1"))["channels"]):
                    channels = json.loads(sc.api_call("channels.list", exclude_archived="1"))
                    text = convert_channels_to_text(channels)
                    sc.api_call("chat.postMessage", channel=room, text=text, link_names="1", as_user="******")
                    time.sleep(1)
                elif is_channel_created_event(res):
                    channel = res["channel"]
                    time.sleep(2)
                    channel_info = json.loads(sc.api_call("channels.info", channel=channel["id"]))
                    text = "New Channel!:sparkles:\n" + "#" + channel["name"] + " : " + channel["name"] + " : " + channel_info["channel"]["purpose"]["value"]
                    sc.api_call("chat.postMessage", channel=room, text=text.encode("utf-8"), link_names="1", as_user="******")
                    time.sleep(1)
                elif is_direct_message(res, json.loads(sc.api_call("im.list"))["ims"]):
                    channels = json.loads(sc.api_call("channels.list", exclude_archived="1"))
                    text = convert_channels_to_text(channels)
                    sc.api_call("chat.postMessage", channel=res["channel"], text=text, link_names="1", as_user="******")
                    time.sleep(1)
                elif is_emoji_changed_event(res):
                    text = "New Stamp!`:{}:`:sparkles:\nOriginal: {}".format(res["name"], res["value"])
                    sc.api_call("chat.postMessage", channel=room, text=text, as_user="******")

else:
    logger.critical("Connection Failed, invalid token?")
def solve(infile: str, outfile: str, level=ERROR):
    global log
    global lock_explode_v2
    log = Logger(os.path.split(infile)[1], level=level)
    start_time = time.time()

    robots, obstacles, name = utils.read_scene(infile)
    invalid_positions = load_occupied_positions(robots, obstacles)
    grid = create_grid(robots, invalid_positions)
    graph = create_graph(grid, invalid_positions)
    remained_distance = update_robots_distances(robots, graph)
    start_distance = remained_distance
    log.info(f'Started! {remained_distance} distance')
    robots = sort_robots(robots)
    robots_dsts = list(map(lambda robot: robot.target_pos, robots))
    steps = []  # a data structure to hold all the moves for each robot
    step_number = 0
    total_moves = 0
    while is_not_finished(robots) and is_not_stuck(
            steps):  # while not all robots finished
        steps.append(
            dict())  # each step holds dictionary <robot_index,next_direction>
        stuck_robots = []

        for robot in robots:
            condition = (
                abs_distance(robot.target_pos, robot.current_pos) == 3 and
                (utils.calc_next_pos(robot.target_pos, RIGHT)
                 in invalid_positions and utils.calc_next_pos(
                     utils.calc_next_pos(robot.target_pos, RIGHT),
                     RIGHT) in invalid_positions
                 or utils.calc_next_pos(robot.target_pos, RIGHT) in obstacles
                 or utils.calc_next_pos(
                     utils.calc_next_pos(robot.target_pos, RIGHT),
                     RIGHT) in obstacles) and
                (utils.calc_next_pos(robot.target_pos, LEFT)
                 in invalid_positions and utils.calc_next_pos(
                     utils.calc_next_pos(robot.target_pos, LEFT),
                     LEFT) in invalid_positions
                 or utils.calc_next_pos(robot.target_pos, LEFT) in obstacles
                 or utils.calc_next_pos(
                     utils.calc_next_pos(robot.target_pos, LEFT),
                     LEFT) in obstacles) and
                (utils.calc_next_pos(robot.target_pos, DOWN)
                 in invalid_positions and utils.calc_next_pos(
                     utils.calc_next_pos(robot.target_pos, DOWN),
                     DOWN) in invalid_positions
                 or utils.calc_next_pos(robot.target_pos, DOWN) in obstacles
                 or utils.calc_next_pos(
                     utils.calc_next_pos(robot.target_pos, DOWN),
                     DOWN) in obstacles) and
                (utils.calc_next_pos(robot.target_pos, UP) in invalid_positions
                 and utils.calc_next_pos(
                     utils.calc_next_pos(robot.target_pos, UP),
                     UP) in invalid_positions
                 or utils.calc_next_pos(robot.target_pos, UP) in obstacles or
                 utils.calc_next_pos(utils.calc_next_pos(robot.target_pos, UP),
                                     UP) in obstacles))
            if condition:
                log.critical(f'CONDITION {robot.index}')

            blocked_count = sum([
                utils.calc_next_pos(robot.target_pos, RIGHT)
                in invalid_positions and invalid_positions[utils.calc_next_pos(
                    robot.target_pos,
                    RIGHT)].occupied_type == PERMANENT_OCCUPIED,
                utils.calc_next_pos(robot.target_pos, LEFT)
                in invalid_positions and invalid_positions[utils.calc_next_pos(
                    robot.target_pos,
                    LEFT)].occupied_type == PERMANENT_OCCUPIED,
                utils.calc_next_pos(robot.target_pos, UP) in invalid_positions
                and invalid_positions[utils.calc_next_pos(
                    robot.target_pos, UP)].occupied_type == PERMANENT_OCCUPIED,
                utils.calc_next_pos(robot.target_pos, DOWN)
                in invalid_positions and invalid_positions[utils.calc_next_pos(
                    robot.target_pos,
                    DOWN)].occupied_type == PERMANENT_OCCUPIED
            ])
            if blocked_count == 4 and (abs_distance(
                    robot.current_pos, robot.target_pos) == 2 or condition):
                log.critical(f'EXPLODE id={robot.index}')
                robot.stuck_count = 777
            elif (abs_distance(robot.current_pos, robot.target_pos) == 2 or condition) and \
                    blocked_count == 3 and lock_explode_v2 is None \
                    and not calc_sp(step_number, robot, invalid_positions)[1]:
                log.critical(f'EXPLODE V2 id={robot.index}')
                robot.stuck_count = 777
                lock_explode_v2 = robot.index

        stuck_hard_robots = [
            robot for robot in robots if robot.stuck_count > 10
        ][:1]
        right_robots, up_robots, down_robots, left_robots = [], [], [], []
        for robot in stuck_hard_robots:
            log.critical(
                f'STUCK HARD ROBOT {robot.index}, count={robot.stuck_count}!!!'
            )
            robot_pos = robot.current_pos if robot.stuck_count != 777 else robot.target_pos

            right_robots = []
            start_pos = robot_pos
            next_right = [
                robot for robot in robots
                if robot.current_pos == utils.calc_next_pos(start_pos, RIGHT)
            ]
            while len(next_right) > 0:
                right_robots.extend(next_right)
                start_pos = next_right[0].current_pos
                next_right = [
                    robot for robot in robots
                    if robot.current_pos == utils.calc_next_pos(
                        start_pos, RIGHT)
                ]
            left_robots = []
            start_pos = robot_pos
            next_left = [
                robot for robot in robots
                if robot.current_pos == utils.calc_next_pos(start_pos, LEFT)
            ]
            while len(next_left) > 0:
                left_robots.extend(next_left)
                start_pos = next_left[0].current_pos
                next_left = [
                    robot for robot in robots
                    if robot.current_pos == utils.calc_next_pos(
                        start_pos, LEFT)
                ]
            up_robots = []
            start_pos = robot_pos
            next_up = [
                robot for robot in robots
                if robot.current_pos == utils.calc_next_pos(start_pos, UP)
            ]
            while len(next_up) > 0:
                up_robots.extend(next_up)
                start_pos = next_up[0].current_pos
                next_up = [
                    robot for robot in robots
                    if robot.current_pos == utils.calc_next_pos(start_pos, UP)
                ]
            down_robots = []
            start_pos = robot_pos
            next_down = [
                robot for robot in robots
                if robot.current_pos == utils.calc_next_pos(start_pos, DOWN)
            ]
            while len(next_down) > 0:
                down_robots.extend(next_down)
                start_pos = next_down[0].current_pos
                next_down = [
                    robot for robot in robots
                    if robot.current_pos == utils.calc_next_pos(
                        start_pos, DOWN)
                ]
            right_robots = [r for r in right_robots if r != robot]
            up_robots = [r for r in up_robots if r != robot]
            down_robots = [r for r in down_robots if r != robot]
            left_robots = [r for r in left_robots if r != robot]
            total_moves = explode_position(robot_pos, right_robots[::-1],
                                           left_robots[::-1], up_robots[::-1],
                                           down_robots[::-1],
                                           invalid_positions, steps,
                                           step_number, total_moves,
                                           stuck_robots, robots_dsts, robots)
            robot.prev_pos = None
            robot.way_blocked = []
            robot.self_block = []
            if robot.current_pos != robot.target_pos:
                total_moves, _ = turn(robot, invalid_positions, steps,
                                      step_number, total_moves, stuck_robots,
                                      True, robots_dsts)
        turn_robots = [
            robot for robot in robots
            if robot not in (stuck_hard_robots + right_robots + up_robots +
                             down_robots + left_robots)
        ]
        for robot in turn_robots:  # move each robot accordingly to its priority
            if robot.current_pos != robot.target_pos:
                total_moves, _ = turn(robot, invalid_positions, steps,
                                      step_number, total_moves, stuck_robots,
                                      False, robots_dsts)
        for robot in stuck_robots:  # move each robot accordingly to its priority
            if robot.current_pos != robot.target_pos:
                total_moves, _ = turn(robot, invalid_positions, steps,
                                      step_number, total_moves, stuck_robots,
                                      True, robots_dsts)
        sides_robots = [
            r for r in right_robots + up_robots + down_robots + left_robots
        ]
        robots = sides_robots + [r for r in stuck_hard_robots] + \
                 [r for r in robots if r not in stuck_robots and r not in stuck_hard_robots and r not in sides_robots] + \
                 [r for r in stuck_robots if r not in stuck_hard_robots and r not in sides_robots]
        clean_invalid_position(invalid_positions)
        step_number += 1

    # after the algorithm finished, we should write the moves data structure to json file.
    utils.write_solution(steps, name, outfile)

    remained_distance = update_robots_distances(robots, graph)
    total_time = time.time() - start_time
    if not is_not_finished(robots):
        log.info(
            f'Finished! {total_time}s, {step_number} steps, {total_moves} moves, {remained_distance} distance'
        )
        root_log.warn('Success!')
        return {
            'succeed': True,
            'total_time': total_time,
            'number_of_steps': step_number,
            'number_of_moves': total_moves,
            'remained_distance': remained_distance,
            'start_distance': start_distance
        }
    else:
        log.info(
            f'Stuck! {total_time}s, {step_number} steps, {total_moves} moves, {remained_distance} distance'
        )
        return {
            'succeed': False,
            'total_time': total_time,
            'number_of_steps': step_number,
            'number_of_moves': total_moves,
            'remained_distance': remained_distance,
            'start_distance': start_distance
        }
Example #20
0
        tb = traceback.format_exc()

        pyfa = wx.App(False)
        ErrorFrame(e, tb)
        pyfa.MainLoop()
        sys.exit()

    with logging_setup.threadbound():
        # Don't redirect if frozen
        if not hasattr(sys, 'frozen'):
            # Output all stdout (print) messages as warnings
            try:
                sys.stdout = LoggerWriter(pyfalog.warning)
            except ValueError, Exception:
                pyfalog.critical("Cannot access log file.  Continuing without writing stdout to log.")

            if not options.debug:
                # Output all stderr (stacktrace) messages as critical
                try:
                    sys.stderr = LoggerWriter(pyfalog.critical)
                except ValueError, Exception:
                    pyfalog.critical("Cannot access log file.  Continuing without writing stderr to log.")

        pyfalog.info("Starting Pyfa")
        pyfalog.info("Running in logging mode: {0}", logging_mode)

        if hasattr(sys, 'frozen') and options.debug:
            pyfalog.critical("Running in frozen mode with debug turned on. Forcing all output to be written to log.")

        from gui.mainFrame import MainFrame
Example #21
0
class Pixie(Bot):
    def __init__(self, *args, **kwargs):
        super().__init__(command_prefix=when_mentioned_or(
            setup_file["discord"]["command_prefix"]),
                         description="A bot for weebs programmed by Recchan")

        # Set a custom user agent for Pixie
        self.http.user_agent = user_agent

        # Logging setup
        redirect_logging()
        StreamHandler(sys.stderr).push_application()
        self.logger = Logger("Pixie")
        self.logger.level = getattr(logbook,
                                    setup_file.get("log_level",
                                                   "INFO"), logbook.INFO)
        logging.root.setLevel(self.logger.level)

    async def on_ready(self):
        self.logger.info(
            "Logged in as Bot Name: {0.user.name} Bot ID: {0.user.id}".format(
                self))

    async def on_command_error(self, exception, ctx):
        print(exception)
        if isinstance(exception, commands.errors.CommandNotFound):
            return
        if isinstance(exception, commands.errors.CheckFailure):
            await self.send_message(
                ctx.message.channel,
                "You don't have the required permissions to run this command.")
            return
        # if is_owner(ctx):
        #     try:
        #         # Get a string of the traceback
        #         trace = "".join(traceback.format_tb(exception.__traceback__))
        #         # Send that string as the data to hastebin
        #         msg = await hastebin(trace)
        #         # Send the link of the hastebin to discord
        #         await self.send_message(ctx.message.channel, msg)
        #     # Error raised when the hastebin fails
        #     except FailedHaste:
        #         await self.send_message(ctx.message.channel, "Failed to make hastebin.")

    async def on_member_join(self, member):
        # Auto roles people in the Mahouka (Onii-sama) server with the role "Member"
        if member.server.id == '209121677148160000':
            await bot.say(
                "Hey {0.name}, welcome to {0.server.name}".format(member))
            role = discord.utils.get(member.server.roles, name="Member")
            await bot.add_roles(member, role)

    async def on_voice_state_update(self, before, after):
        # If nothing changes just exit out of the function
        if before.voice.voice_channel == after.voice.voice_channel:
            return
        # Exit on channel being None as it errors if Pixie isn't in a voice channel
        if not after.server.me.voice_channel:
            return
        # Checks the length of the list of members in the voice channel
        if len(after.server.me.voice.voice_channel.voice_members) == 1:
            # Get the VoiceClient object
            voice = self.voice_client_in(after.server)
            # Disconnect the VoiceClient and close the stream
            await voice.disconnect()

    def run(self):
        # We load plugins in run rather than on_ready due to on_ready being able to be called multiple times
        for plugin in plugins:
            # We try to load the extension, and we account for if it fails
            try:
                self.load_extension(plugin)
                self.logger.info("{0} has been loaded".format(plugin))
            # Except discord.ClientException so it doesn't fail to load all cogs when a cog doesn't have a setup function
            except discord.ClientException:
                self.logger.critical(
                    "{0} does not have a setup function!".format(plugin))
            # Except import error (importlib raises this) so bot doesn't crash when it's raised
            except ImportError as IE:
                self.logger.critical(IE)
        # We check if discord.opus is loaded, despite it not having a reason to be
        if not discord.opus.is_loaded():
            # Load discord.opus so we can use voice
            discord.opus.load_opus()
            self.logger.info("Opus has been loaded")
        super().run(setup_file["discord"]["token"])
Example #22
0
    from matplotlib.backends.backend_wxagg import FigureCanvasWxAgg as Canvas
    from matplotlib.figure import Figure

    graphFrame_enabled = True
    mplImported = True
except ImportError as e:
    pyfalog.warning("Matplotlib failed to import.  Likely missing or incompatible version.")
    mpl_version = -1
    Patch = mpl = Canvas = Figure = None
    graphFrame_enabled = False
    mplImported = False
except Exception:
    # We can get exceptions deep within matplotlib. Catch those.  See GH #1046
    tb = traceback.format_exc()
    pyfalog.critical("Exception when importing Matplotlib. Continuing without importing.")
    pyfalog.critical(tb)
    mpl_version = -1
    Patch = mpl = Canvas = Figure = None
    graphFrame_enabled = False
    mplImported = False


class GraphFrame(wx.Frame):
    def __init__(self, parent, style=wx.DEFAULT_FRAME_STYLE | wx.NO_FULL_REPAINT_ON_RESIZE | wx.FRAME_FLOAT_ON_PARENT):
        global graphFrame_enabled
        global mplImported
        global mpl_version

        self.legendFix = False
Example #23
0
                logger.debug('unlocked %r' % (key,))
                return
            except urllib2.HTTPError, e:
                logger.error('unlock HTTPError: %s, key %r, retries left %s' % (e, key, num_retries))
                if e.code == 417: # paxos failed, retry
                    num_retries -= 1
                    time.sleep(random.random())
                else:
                    logger.critical('unlock failed because exception')
                    raise
            except urllib2.URLError, e:
                logger.error('lock URLError: %s, key %r, retries left %s' % (e, key, num_retries))
                num_retries -= 1
                time.sleep(random.random())
        logger.critical('unlock failed after retries')
        raise


def test(servers):
    d = defaultdict(list)
    seed = int(time.time())
    print 'RANDOM SEED: %s' % seed
    random.seed(seed)

    def watch_on_progress(exit_event):
        done = len(d[0])
        total = NUM_ITERATIONS * NUM_WORKERS
        bar_length = 40
        previous_progress = 0
        time_since_progress = time.time()
Example #24
0
            )
        ])

    with logging_setup.threadbound():
        pyfalog.info("Starting Pyfa")

        pyfalog.info("Logbook version: {0}", logbook_version)

        pyfalog.info("Running in logging mode: {0}", logging_mode)
        pyfalog.info("Writing log file to: {0}", config.logPath)

        # Output all stdout (print) messages as warnings
        try:
            sys.stdout = LoggerWriter(pyfalog.warning)
        except:
            pyfalog.critical("Cannot redirect.  Continuing without writing stdout to log.")

        # Output all stderr (stacktrace) messages as critical
        try:
            sys.stderr = LoggerWriter(pyfalog.critical)
        except:
            pyfalog.critical("Cannot redirect.  Continuing without writing stderr to log.")

        pyfalog.info("OS version: {0}", platform.platform())

        pyfalog.info("Python version: {0}", sys.version)
        if sys.version_info < (2, 7) or sys.version_info > (3, 0):
            exit_message = "Pyfa requires python 2.x branch ( >= 2.7 )."
            raise PreCheckException(exit_message)

        if hasattr(sys, 'frozen'):
Example #25
0
from logbook import Logger, StreamHandler, set_datetime_format
from sys import path, stdout
import numpy as np
import pickle, json, yaml, os.path
import multiprocessing as mp
import redis

StreamHandler(stdout).push_application()
logger = Logger(__name__)
set_datetime_format("local")

try:
  with open('config.yaml', 'r') as stream:
    config = yaml.load(stream)
except yaml.YAMLError as e:
  logger.critical(e)
  exit()
except IOError as e:
  logger.critical(e)
  exit()

redis = redis.StrictRedis(config['sys_settings']['redis_ip'])

path.append(config['sys_settings']['albula_path'])
import dectris.albula
series = dectris.albula.DImageSeries()

def sanity_check():
  assert config['in']['start'] >= 0
#  assert config['in']['start'] <= config['in']['end']
  assert config['filter']['low'] <= config['filter']['high']
Example #26
0
commandPrefix = ""
isBot = True
token = ""
path = ""
minLength = 0

#loading config
if os.path.exists("config.ini"):
    config.read("config.ini")

    #commandPrefix
    try:
        commandPrefix = config['Config']['commandPrefix']
    except KeyError:
        logger.critical(
            "No commandPrefix found in config, please ensure that the config formatting is correct"
        )
        time.sleep(5)
        exit(1)

    if commandPrefix == "":
        logger.critical("No commandPrefix set! Exiting")
        time.sleep(5)
        exit(1)
        exit(1)

    #isBot
    try:
        isBot = config.getboolean('Config', 'bot')
    except KeyError:
        logger.critical(
Example #27
0
File: pyfa.py Project: w9jds/Pyfa
            StreamHandler(sys.stdout, bubble=False)
        ])

    with logging_setup.threadbound():
        pyfalog.info("Starting Pyfa")

        pyfalog.info("Logbook version: {0}", logbook_version)

        pyfalog.info("Running in logging mode: {0}", logging_mode)
        pyfalog.info("Writing log file to: {0}", config.logPath)

        # Output all stdout (print) messages as warnings
        try:
            sys.stdout = LoggerWriter(pyfalog.warning)
        except:
            pyfalog.critical(
                "Cannot redirect.  Continuing without writing stdout to log.")

        # Output all stderr (stacktrace) messages as critical
        try:
            sys.stderr = LoggerWriter(pyfalog.critical)
        except:
            pyfalog.critical(
                "Cannot redirect.  Continuing without writing stderr to log.")

        pyfalog.info("OS version: {0}", platform.platform())

        pyfalog.info("Python version: {0}", sys.version)
        if sys.version_info < (2, 7) or sys.version_info > (3, 0):
            exit_message = "Pyfa requires python 2.x branch ( >= 2.7 )."
            raise PreCheckException(exit_message)
Example #28
0
default_config = "[Config]\ntoken = \nsnip = "

config = configparser.ConfigParser()

token = ""
snip = ""

if os.path.exists("config.ini"):
    config.read("config.ini")

    try:
        token = config['Config']['token']
    except KeyError:
        logger.critical(
            "No token found in config, please ensure that the config formatting is correct"
        )
        time.sleep(5)
        exit(1)

    if token == "":
        logger.critical("No token set! Exiting")
        time.sleep(5)
        exit(1)

    try:
        snip = config['Config']['snip']
    except KeyError:
        logger.critical(
            "No path to snip found in config, please ensure that the config formatting is correct"
        )
Example #29
0
    from matplotlib.figure import Figure

    graphFrame_enabled = True
    mplImported = True
except ImportError as e:
    pyfalog.warning(
        "Matplotlib failed to import.  Likely missing or incompatible version."
    )
    mpl_version = -1
    Patch = mpl = Canvas = Figure = None
    graphFrame_enabled = False
    mplImported = False
except Exception:
    # We can get exceptions deep within matplotlib. Catch those.  See GH #1046
    tb = traceback.format_exc()
    pyfalog.critical(
        "Exception when importing Matplotlib. Continuing without importing.")
    pyfalog.critical(tb)
    mpl_version = -1
    Patch = mpl = Canvas = Figure = None
    graphFrame_enabled = False
    mplImported = False


class GraphFrame(wx.Frame):
    def __init__(self,
                 parent,
                 style=wx.DEFAULT_FRAME_STYLE | wx.NO_FULL_REPAINT_ON_RESIZE
                 | wx.FRAME_FLOAT_ON_PARENT):
        global graphFrame_enabled
        global mplImported
        global mpl_version
Example #30
0
class Casca(Bot):
    def __init__(self, *args, **kwargs):
        config_file = os.path.join(os.getcwd(), "config.yaml")

        with open(config_file) as f:
            self.config = yaml.load(f)

        super().__init__(*args, **kwargs)

        # Define the logging set up.
        redirect_logging()
        StreamHandler(sys.stderr).push_application()

        self.logger = Logger("Casca_Best_Bot")
        self.logger.level = getattr(logbook,
                                    self.config.get("log_level", "INFO"),
                                    logbook.INFO)

        # Set the root logger level, too.
        logging.root.setLevel(self.logger.level)

        self._loaded = False

    async def on_ready(self):
        if self._loaded:
            return

        self.logger.info(
            "LOADED Casca | LOGGED IN AS: {0.user.name}#{0.user.discriminator}.\n----------------------------------------------------------------------------------------------------"
            .format(self))

        for cog in extensions:
            try:
                self.load_extension(cog)
            except Exception as e:
                self.logger.critical(
                    "Could not load extension `{}` -> `{}`".format(cog, e))
                self.logger.exception()
            else:
                self.logger.info("Loaded extension {}.".format(cog))

        self._loaded = True

    async def on_message(self, message):
        if not message.server:
            return

        if message.server.id not in Whitelisted_Servers:
            return

        if message.channel.id not in Whitelisted_Channels:
            return

        self.logger.info("MESSAGE: {message.content}".format(
            message=message, bot=" [BOT]" if message.author.bot else ""))
        self.logger.info("FROM: {message.author.name}".format(message=message))

        if message.server is not None:
            self.logger.info(
                "CHANNEL: {message.channel.name}".format(message=message))
            self.logger.info(
                "SERVER: {0.server.name}\n----------------------------------------------------------------------------------------------------"
                .format(message))

        await super().on_message(message)

    async def on_command_error(self, e, ctx):
        if isinstance(e, (commands.errors.BadArgument,
                          commands.errors.MissingRequiredArgument)):
            await self.send_message(ctx.message.channel,
                                    "```ERROR: {}```".format(' '.join(e.args)))
            return

    async def on_command(self, command, ctx):
        await self.delete_message(ctx.message)

    def run(self):
        try:
            super().run(self.config["bot"]["token"], bot=True)
        except discord.errors.LoginFailure as e:
            self.logger.error("LOGIN FAILURE: {}".format(e.args[0]))
            sys.exit(2)
Example #31
0
      #blah = pool.map_async(evaluator, [(word, letters, results) for word in words])
      #log.info('Done: {}'.format([word for word in blah.get() if word is not None]))
      pool.close()

      done = False
      try:
        while not done:
          if len(best_words) > 1 and results.empty() and complete.empty():
            done = True

          time.sleep(0.01)
      except KeyboardInterrupt:
        log.warn('Exiting application')

      pool.terminate()

      best_words = reversed(best_words)

      if preferred is not None:
        preferred = list(preferred)
        log.info('Sorting for words containing {}'.format(', '.join(preferred)))
        best_words = sorted(best_words, key=lambda word: 1 in [l in preferred for l in word], reverse=True)

      log.info('Best words: {}'.format(', '.join(best_words)))

      end = datetime.now()

      log.info('Took {}s to locate {} words from {} possible'.format(end-start, len(best_words), len(words)))
  else:
    log.critical('No letters supplied')
Example #32
0
else:
    gamedata_engine = create_engine(gamedata_connectionstring, echo=config.debug)

gamedata_meta = MetaData()
gamedata_meta.bind = gamedata_engine
gamedata_session = sessionmaker(bind=gamedata_engine, autoflush=False, expire_on_commit=False)()

# This should be moved elsewhere, maybe as an actual query. Current, without try-except, it breaks when making a new
# game db because we haven't reached gamedata_meta.create_all()
try:
    config.gamedata_version = gamedata_session.execute(
            "SELECT `field_value` FROM `metadata` WHERE `field_name` LIKE 'client_build'"
    ).fetchone()[0]
except Exception as e:
    pyfalog.warning("Missing gamedata version.")
    pyfalog.critical(e)
    config.gamedata_version = None

saveddata_connectionstring = config.saveddata_connectionstring
if saveddata_connectionstring is not None:
    if callable(saveddata_connectionstring):
        saveddata_engine = create_engine(creator=saveddata_connectionstring, echo=config.debug)
    else:
        saveddata_engine = create_engine(saveddata_connectionstring, echo=config.debug)

    saveddata_meta = MetaData()
    saveddata_meta.bind = saveddata_engine
    saveddata_session = sessionmaker(bind=saveddata_engine, autoflush=False, expire_on_commit=False)()
else:
    saveddata_meta = None