def run_enum(filename, maxlength=3, **other):
    from .reader import load_data
    from .claudette import EmptyClause
    language, instances, engine = load_data(filename)

    rule = EmptyClause(language)
    rules = list(rule.refine())
    count = 0
    get_logger('claudette').log(9,'%% ---- Length 1 ----')
    for rule in rules:
        print (rule)
        count += 1
    level = 1 
    maxlength = 3
    while (maxlength is None or level < maxlength) and rules:
        new_rules = []
        print ('%% ---- Length %s ----' % (level + 1))
        for rule in rules:
            for rule1 in rule.refine():
                new_rules.append(rule1)
                print(str(rule1))
                count += 1
        rules = new_rules
        level += 1
    print ('Number of rules:', count)
Ejemplo n.º 2
0
def getLogger(name=None, level=DEBUG, add_monitor_pass=None):
    if name is None:
        logger = get_logger()
    else:
        logger = get_logger(name)
    logger.setLevel(level)
    toggle = {"stdout": True, "monitor": True}

    formatter = StdoutFormatter()

    for each in logger.root.handlers:
        if hasattr(each, "name") is True:
            toggle[each.name] = False

    for (key, value) in toggle.items():
        if value is True:
            if key == "monitor" and add_monitor_pass is not None:
                handler = SocketHandler(name="monitor",
                                        password=add_monitor_pass)
            elif key == "stdout":
                handler = StreamHandler(name="stdout", stream=stdout)
            else:
                continue

            handler.setLevel(DEBUG)
            handler.setFormatter(formatter)
            logger.root.addHandler(handler)

    return logger
Ejemplo n.º 3
0
    def next2(self, iteration=None):
        # Find a new test.
        new_clauses = []
        # Go through all clauses and update
        new_candidates = []
        num_candidates = 0
        self.candidates.sort()
        for i, candidate in enumerate(self.candidates):
            get_logger('claudien').log(9, 'REFINING CANDIDATE %s', candidate)
            refs = self.refine(candidate)
            print(refs)
            cands = filter(self.prune_refine_verbose, refs)
            for cand in cands:
                get_logger('claudien').log(9, '\tNEW CANDIDATE %s', cand)
                if self.is_valid(cand) and not self._prune_contrib(cand):
                    if self.prune_clause(cand):
                        new_clauses.append(cand)
                        self.clauses_store.append(cand)
                        self.clauses.append(cand)
                        get_logger('claudien').info('NEW CLAUSE %s', cand)
                else:
                    new_candidates.append(cand)
                    num_candidates += 1

            msg = '--- ITERATION: %d --- PROCESSED: %d/%d --- NUM CANDIDATES: %d --- NUM CLAUSES: %d ---'
            get_logger('claudien').info(msg, iteration, i + 1,
                                        len(self.candidates), num_candidates,
                                        len(self.clauses))
        get_logger('claudien').info('====== COMPLETED ITERATION ======')
        self.candidates = new_candidates
        return new_clauses
Ejemplo n.º 4
0
 def get_instance(cls, ref: int):
     try:
         return cls._instances_[ref]
     except:
         logging.get_logger("pyocf").error(
             "OcfSharedObject corruption. wanted: {} instances: {}".format(
                 ref, cls._instances_))
         return None
Ejemplo n.º 5
0
    def prune_clause(self, clause):
        if not clause.verify(self.data, self.engine):
            return False
        if self.clauses_store.find_subclause(clause):
            get_logger('claudien').debug('\tPRUNING CLAUSE %s', clause)
            get_logger('claudien').debug('\t\tSUBSUMED BY LEARNED %s', clause)
            return False

        # if self._prune_implied_by_background(clause) : return False
        return True
Ejemplo n.º 6
0
def isroman(literal):
    literal = literal.strip()
    roman_pattern = re.compile(""" # matches numbers up to 154 (largest sonnte)
        ^                   # beginning of string
        (C{0,1})            #
        (XC|XL|L?X{0,3})    # tens - 90 (XC), 40 (XL), 0-30 (0 to 3 X's),
                            #        or 50-80 (L, followed by 0 to 3 X's)
        (IX|IV|V?I{0,3})    # ones - 9 (IX), 4 (IV), 0-3 (0 to 3 I's),
                            #        or 5-8 (V, followed by 0 to 3 I's)
        $                   # end of string
    """)
    if roman_pattern.search(literal):
        logging.get_logger(__name__).warning('{} is a roman literal.'.format(literal))
    return roman_pattern.search(literal)
Ejemplo n.º 7
0
 def init(cls, default, levels):
     """
     Set the default and levels and initialize the log manager.
     :param cls: Log class.
     :param default: default log level
     :param levels: log levels
     """
     verboselogs.install()
     coloredlogs.install()
     cls.default = default
     cls.levels = {module: level for module, level in levels}
     logging.get_logger = logging.getLogger
     for module, level in levels:
         logging.get_logger(module).setLevel(level)
Ejemplo n.º 8
0
def get_server():
    global _server
    with _lock:
        options = dict(port=random.randrange(2**15, 2**16))
        if _server is None:
            for i in range(3):
                try:
                    _server = Server(path=resources.BROWSERMOB_EXE,
                                     options=options)
                    break
                except Exception as e:
                    logging.get_logger(__name__).error(repr(e))
                    pass
        _server.start(options=options)
    return _server
Ejemplo n.º 9
0
 def __init__(self):
     """Constructor."""
     super(Syncer, self).__init__(target=self._run)
     self._log = logging.get_logger(self)
     self.queue = gruvi.Queue()
     self.neighbors = {}
     self.last_sync = {}
Ejemplo n.º 10
0
class LocalPath(object):
    _logger = get_logger('tasker.utils.LocalPath')

    class CATEGORY(Enum):
        CACHE = 'cache'
        LOG = 'log'
        OUTPUT = 'output'
        STORAGE = 'storage'
        TEMP = 'temp'
        CONfig = 'config'

    @classmethod
    def create(cls, category, *name: Text):
        assert isinstance(category, cls.CATEGORY)
        if not name:
            cls._logger.warning(
                'You are operating in a root category folder! Please re-check it.'
            )

        absolute_root = path.abspath(path.curdir)
        absolute_path = path.join(absolute_root, category.value, *name)

        if not path.exists(absolute_path):
            makedirs(absolute_path)

        return absolute_path
Ejemplo n.º 11
0
def factory(prefix, controller):
	"""This is the function that FSO's frameworkd will call to start this subsystem"""
	from logging import getLogger as get_logger
	log = get_logger('opimd')
	
	# Claim the bus name
	# TODO Check for exceptions
	SystemBus().request_name(DBUS_BUS_NAME_FSO)
	
	from backend_manager import BackendManager
	from domain_manager import DomainManager
	
	# Load plugins
	DomainManager.init(os.getcwdu())
	backend_manager = BackendManager(os.getcwdu())
	
	dbus_objects = []
	
	# Create a list of all d-bus objects to make frameworkd happy
	for dbus_obj in DomainManager.enumerate_dbus_objects():
		dbus_objects.append(dbus_obj)

	dbus_objects.append(backend_manager)
	
	log.info('opimd subsystem loaded')
	
	return dbus_objects
Ejemplo n.º 12
0
def run_job(job_id):
    setup_logger(job_id)
    logger.info('Starting crawl for job: ' + str(job_id))
    job = database.get_job(job_id)
    store = database.get_store(job)
    logger = logging.get_logger(__name__)
    for base_url in job.queries:
        crawler.crawl(base_url, job.hoods, job.to_email, store)
Ejemplo n.º 13
0
 def __init__(self, pwd_path: str):
     """Create a new :class:`.DAL` object."""
     super(DAL, self).__init__()
     self.logger = get_logger("nlaunch.dal")
     with open(pwd_path, "r") as f:
         self.passwords = json.loads(f.read())
         self.logger.info("Loaded passwords from '{path}'".format(
             path=pwd_path))
Ejemplo n.º 14
0
 def _setup_logger(self):
     self._logger = get_logger(self.name)
     # rate limit our own messages to not spam around in case of temporary network errors, etc
     rate_limit_setting = constants.ERROR_LOG_RATE_LIMIT
     if rate_limit_setting:
         self._rate_limit_storage = MemoryStorage()
         self._rate_limit_strategy = FixedWindowRateLimiter(self._rate_limit_storage)
         self._rate_limit_item = parse_rate_limit(rate_limit_setting)
Ejemplo n.º 15
0
 def __init__(self, name, aggregate_class=None,
              join_transaction=True, autocommit=False):
     if aggregate_class is None:
         aggregate_class = NoSqlAggregate
     Repository.__init__(self, name, aggregate_class,
                         join_transaction=join_transaction,
                         autocommit=autocommit)
     self.__db = None
     self.__logger = get_logger('everest.repositories')
Ejemplo n.º 16
0
    def _init_logger(self, level="INFO", file_logging=True, **kwargs):
        """Initialize logger."""

        self.logger = get_logger("CactusBot")
        self.logger.propagate = False

        self.logger.setLevel("DEBUG")

        if level is True or level.lower() == "true":
            level = "DEBUG"
        elif level is False or level.lower() == "false":
            level = "WARNING"
        elif hasattr(level, "upper"):
            level = level.upper()

        format = kwargs.get(
            "format",
            "%(asctime)s %(name)s %(levelname)-8s %(message)s"
        )

        formatter = Formatter(format, datefmt='%Y-%m-%d %H:%M:%S')

        try:
            from coloredlogs import ColoredFormatter
            colored_formatter = ColoredFormatter(format)
        except ImportError:
            colored_formatter = formatter
            self.logger.warning(
                "Module 'coloredlogs' unavailable; using ugly logging.")

        stream_handler = StreamHandler()
        stream_handler.setLevel(level)
        stream_handler.setFormatter(colored_formatter)
        self.logger.addHandler(stream_handler)

        if file_logging:
            file_handler = FileHandler("latest.log")
            file_handler.setLevel("DEBUG")
            file_handler.setFormatter(formatter)
            self.logger.addHandler(file_handler)

        get_logger("requests").setLevel(get_level_name("WARNING"))

        self.logger.info("Logger initialized with level '{}'.".format(level))
Ejemplo n.º 17
0
def parse_arguments():
    parser = argparse.ArgumentParser()
    parser.add_argument('-d',"archive", help="The archive to crack, currently only zip supported.")
    parser.add_argument('-w',"wordlist", help="The wordlist to use for cracking")
    parser.add_argument('-v','verbose', help='Increase verbosity of output', action='store_true')
    args = parser.parse_args()
    if args.verbose:
        l = logging.get_logger(__name__)
        l.basicConfig(level=logging.DEBUG)
    return (args.archive, args.wordlist)
Ejemplo n.º 18
0
def send_email_task(message, **kwargs):
    conn = get_connection(backend=BACKEND)

    logger = logging.get_logger(**kwargs)

    try:
        conn.send_messages([message])
        if settings.DEBUG:
            logger.debug("Successfully sent email message: %s" % (message.message().as_string(),))
    except:
        logger.error("Error to send email")
Ejemplo n.º 19
0
def log_with_stacktrace(message, level=logging.INFO, logger='hiicart.audit'):
    client = get_client()
    if client is None:
        logger = logging.get_logger()
        logger.warn("Could not save stack trace for message: %s" % message)
        return
    kwargs = dict(level=level, logger=logger)
    stack = inspect.stack()[1:]
    tb = FakeTraceback(stack)
    exc_info = (AuditingStacktrace, AuditingStacktrace(message), tb)
    get_client().create_from_exception(exc_info, **kwargs)
Ejemplo n.º 20
0
    def __init__(self, bucket=None, options={}):
        self.log = logging.get_logger("log")

        self.options = options  #: holds pycurl options
        self.bucket = bucket

        self.cj = None  #: needs to be setted later
        self._size = 0

        self.renew_HTTP_request()
        self.dl = None
Ejemplo n.º 21
0
def log_with_stacktrace(message, level=logging.INFO, logger='hiicart.audit'):
    client = get_client()
    if client is None:
        logger = logging.get_logger()
        logger.warn("Could not save stack trace for message: %s" % message)
        return
    kwargs = dict(level=level, logger=logger)
    stack = inspect.stack()[1:]
    tb = FakeTraceback(stack)
    exc_info = (AuditingStacktrace, AuditingStacktrace(message), tb)
    get_client().create_from_exception(exc_info, **kwargs)
Ejemplo n.º 22
0
def get_config(path):

    default_config = dict(
        socket='/var/run/mrbeam_ledstrips.sock',
        led_count=46,  # Number of LED pixels.
        gpio_pin=18,  # SPI:10, PWM: 18
        led_freq_hz=800000,  # LED signal frequency in Hz (usually 800kHz)
        # led_freq_hz = 1200000, # for spreading on SPI pin....
        led_dma=
        10,  # DMA channel to use for generating signal. This produced a problem after changing to a
        # newer kernerl version (https://github.com/jgarff/rpi_ws281x/issues/208). Changing it from
        # the previous 5 to channel 10 solved it.
        led_brigthness=255,  # 0..255 / Dim if too much power is used.
        led_invert=
        False,  # True to invert the signal (when using NPN transistor level shift)

        # spread spectrum settings (only effective if gpio_pin is set to 10 (SPI))
        spread_spectrum_enabled=True,
        spread_spectrum_random=True,
        spread_spectrum_bandwidth=200000,
        spread_spectrum_channel_width=9000,
        spread_spectrum_hopping_delay_ms=50,

        # default frames per second
        frames_per_second=28,

        # max png file size 30 kB
        max_png_size=30 * 1024)

    import os
    if os.path.exists(path):
        try:
            with open(path, "r") as f:
                file_config = yaml.safe_load(f)
        except:
            logging.get_logger(__name__).warning("error loading config file")
            return default_config
        else:
            return merge_config(default_config, file_config)
    else:
        return default_config
Ejemplo n.º 23
0
def main():
    global serializer, name

    name = 'fetcher' + str(randint(100, 9999))

    cfg = load(open('/etc/fetch/fetch.cfg'))

    server_addr = cfg['server_addr']

    serializer = url_serializer(cfg['key'])

    get_logger().name = name
    get_logger().setLevel(cfg['log_level'])

    info('starting up')

    fg = feed_getter(server_addr, name, serializer)
    fg.start()
    fg.join()

    warn('not sure why we\'re here')
Ejemplo n.º 24
0
def send_email_task(message, **kwargs):
    conn = get_connection(backend=BACKEND)

    logger = logging.get_logger(**kwargs)

    try:
        conn.send_messages([message])
        if settings.DEBUG:
            logger.debug("Successfully sent email message: %s" %
                         (message.message().as_string(), ))
    except:
        logger.error("Error to send email")
Ejemplo n.º 25
0
    def __init__(self, dal, com):
        """Initialize a new :class:`.BaseHandler`.

        :param dal: The data-access-layer used by the handler.
        :type  dal: misc.DAL
        :param com: The object used to communicate with the client.
        :type  com: communication.NLaunchCommFacade
        """
        super(BaseHandler, self).__init__()
        self.logger = get_logger("nlaunch.handler")
        self.dal = dal
        self.com = com
Ejemplo n.º 26
0
 def execute(idx: Text):
     task = task_cls()
     self._register_task(task)
     task_storage = self.STORAGE_VIEW_CLASS(storage=shared.storage(),
                                            task=task,
                                            mirror=idx)
     task_logger_name = f'{profile.reference}[{hex(hash(task))}]@{hex(hash(self))}'
     task_logger = get_logger(task_logger_name)
     return self._invoke_check(task, task_profile, task_storage,
                               task_logger), task, (task_profile,
                                                    task_storage,
                                                    task_logger)
Ejemplo n.º 27
0
 def __init__(self, source_proxy, target_proxy):
     self._src_prx = source_proxy
     self._tgt_prx = target_proxy
     self.__trv_path = TraversalPath()
     self.__root_is_sequence = \
         (not source_proxy is None and
          source_proxy.proxy_for == RESOURCE_KINDS.COLLECTION) \
         or (not target_proxy is None and
             target_proxy.proxy_for == RESOURCE_KINDS.COLLECTION)
     if __debug__:
         self.__logger = get_logger('everest.traversal')
     else:
         self.__logger = None
Ejemplo n.º 28
0
 def __init__(self, source_proxy, target_proxy):
     self._src_prx = source_proxy
     self._tgt_prx = target_proxy
     self.__trv_path = TraversalPath()
     self.__root_is_sequence = \
         (not source_proxy is None and
          source_proxy.proxy_for == RESOURCE_KINDS.COLLECTION) \
         or (not target_proxy is None and
             target_proxy.proxy_for == RESOURCE_KINDS.COLLECTION)
     if __debug__:
         self.__logger = get_logger('everest.traversal')
     else:
         self.__logger = None
Ejemplo n.º 29
0
  def __init__(self, fd, ssh_passphrase_file=None, daemon=False, logger=None):
    """Constructor

    Args:
      fd: The file descriptor to read from (and write to).
      ssh_passphrase_file: Contains passphrase to inject.
          If no passphrases are expected (e.g. ssh-agent is running)
          then this could be nullptr.
      daemon: If true then the injector should never terminate.
          Otherwise, it will terminate once there is no more input.
      logger: The logger to use if other than the default.
    """
    self.__fd = fd
    self.__ssh_passphrase_file = ssh_passphrase_file
    self.__daemon = daemon
    self.__logger = logger or logging.get_logger(__name__)
Ejemplo n.º 30
0
 def execute(meta):
     task_cls = import_reference(profile.reference)
     task = task_cls()
     self._register_task(task)
     task_logger_name = f'{profile.reference}[{hex(hash(task))}]@{hex(hash(self))}'
     task_logger = get_logger(task_logger_name)
     task_shared = ForkStorageView(storage=shared.storage(), task=task)
     if meta.include:
         task_profile = Profile.from_toml(filename=meta.path)
     else:
         task_profile = profile[meta.profile]
     state = self._invoke_check(task, task_profile, task_shared,
                                task_logger)
     return \
         state, \
         task, \
         (task_profile, task_shared, task_logger)
Ejemplo n.º 31
0
    def get(cls, name):
        """
        Return the initialized logger with the module name.

        :param cls: Log class.
        :param name: module name
        :returns: logger instance
        """
        level = cls.levels.get(name, cls.default)
        logger = logging.get_logger(name)

        def __exception(exception):
            logger.error(to_str(exception))

        logger.exception = __exception

        logger.setLevel(level)
        return logger
Ejemplo n.º 32
0
    def __init__(self):
        configParser = ConfigParser()
        self.log = logging.get_logger(__name__, config=configParser)
        self.rabbitConfig = configParser.app_cfg["rabbitmq"]

        self.credentials = pika.PlainCredentials(
            self.rabbitConfig["user"], self.rabbitConfig["passwd"]
        )

        self.connection = pika.BlockingConnection(
            pika.ConnectionParameters(
                host=self.rabbitConfig["host"],
                port=self.rabbitConfig["port"],
                credentials=self.credentials,
            )
        )

        self.channel = self.connection.channel()
Ejemplo n.º 33
0
    def __init__(self, token=None, log_level=logging.INFO, exec_policy=ExecPolicy.ADAPTIVE,
                 confirm_charges=False, all_local_cpus=False):
        self.token = token
        self.exec_policy = exec_policy
        self.all_local_cpus = all_local_cpus
        self.kb_avg = self.ping()
        self.log = logging.get_logger('fastmap')
        self.confirm_charges = confirm_charges

        try:
            self.num_threads = len(psutil.Process().cpu_affinity())
        except AttributeError:
            self.num_threads = psutil.cpu_count()
        if not self.all_local_cpus:
            self.num_threads -= 1

        if confirm_charges:
            logging.warning("")
Ejemplo n.º 34
0
def save_transcription(item,
                       clips,
                       speakers=None,
                       engine=None,
                       raw_files=None,
                       logger=None):
    """
    Save an automatically-generated transcript for `item`.

    `clips`: Array of Clip objects

    `speakers`: (optional): Array of Speaker objects

    `engine`: FIXME

    `raw_files`: FIXME Any raw files resulting from the transcription.
    Each file is represented by a dict with keys `content_type`,
    `file_name` and `body`.
    """
    if not speakers: speakers = []
    if not logger: logger = logging.get_logger(__name__)

    logger.info(u"Saving transcript with %d speakers, %d clips for item %s",
                len(speakers), len(clips), item)

    with transaction.commit_on_success():
        track = Track(item=item,
                      kind='captions',
                      name='Automatic transcription')
        track.save()

        for speaker in speakers:
            speaker.track = track
            speaker.save()

        for clip in clips:
            # Despite appearances, the following line is actually
            # necessary to make the speaker_id foreign key update
            # correctly. Yuck.
            clip.speaker = clip.speaker  # Necessary!
            clip.track = track

        Clip.objects.bulk_create(clips)
Ejemplo n.º 35
0
                def __getattribute__(self, name):
                    try:
                        method = super().__getattribute__(name)
                    except AttributeError:
                        pass
                    else:

                        print('this should run whenever a method is called')
                        logger = logging.get_logger(name)

                        # this function gets wrapped around decorators
                        def method_with_logger(*args, **kwargs):

                            print('getattr')
                            print('Logger', logging.loggers)
                            method(logger, *args, **kwargs)
                            print('after')

                        return method_with_logger
Ejemplo n.º 36
0
def clean_exit(status, should_rem_pid=True):
    """

    A function that - when called - will cleanly exit the AdaHub application. On a clean exit the application will
    remove the saved PID file, and report its status.

    Args:
        status (int): Either 1 or 0. 0 means everything went as expected. 1 means an error or exception occurred.

        should_rem_pid (bool): Should the program attempt to remove the file PID located in the program's 'run' directory? (Defaults to True)

    Returns:
        None

    """
    from .lib.helpers.pid import remove_pid

    log_name = f'{m_name}.clean_exit'
    log = get_logger(log_name)
    log.debug(f'Started logger for {log_name}')

    log.info("User's desire to exit has been noted.")

    log.debug(f'Exit status: {status}')
    log.debug(f'Intent to delete PID file? {should_rem_pid}')

    if status == 1:
        log.setLevel(logging.DEBUG)
        log.debug('Received clean-exit call after a failure. Please check the logs above.')

    if status == 0:
        log.debug('Exit is expected. No errors.')

    if should_rem_pid:
        log.debug('Removing PID file!')
        remove_pid()
        log.debug('PID file removed, exiting...')
    else:
        log.debug('Was instructed not to remove PID file.')
        log.debug('Exiting...')


    exit()
Ejemplo n.º 37
0
def get_logger(mode):
    ## Get_Logger Function
    ## Description:     Determines which log configuration to use based on optional arguments
    ## Arguments:       mode - string - any profile names passed to script as arguments
    ## Return:          loghandler object
    # create logger
    log_file_path = path.join(path.dirname(path.abspath(__file__)),
                              'logging.config')
    logging.config.fileConfig(log_file_path)
    # Fetch the right handler for our purposes
    if mode == "debug":
        logger = logging.getLogger('debug')
        logger_test(logger)
    if mode == "test":
        logger = logging.getLogger('test')
        logger_test(logger)
    else:
        logger = logging.get_logger('default')
        logger_test(logger)
    return logger
Ejemplo n.º 38
0
    def __init__(self,
                 fd,
                 ssh_passphrase_file=None,
                 daemon=False,
                 logger=None):
        """Constructor

    Args:
      fd: The file descriptor to read from (and write to).
      ssh_passphrase_file: Contains passphrase to inject.
          If no passphrases are expected (e.g. ssh-agent is running)
          then this could be nullptr.
      daemon: If true then the injector should never terminate.
          Otherwise, it will terminate once there is no more input.
      logger: The logger to use if other than the default.
    """
        self.__fd = fd
        self.__ssh_passphrase_file = ssh_passphrase_file
        self.__daemon = daemon
        self.__logger = logger or logging.get_logger(__name__)
Ejemplo n.º 39
0
    def __init__(self, cookies=None, options=None):
        self.c = pycurl.Curl()
        self.rep = cStringIO.StringIO()

        self.cj = cookies  #: cookiejar

        self.lastURL = None
        self.lastEffectiveURL = None
        self.abort = False
        self.code = 0  #: last http code

        self.header = ""

        self.headers = []  #: temporary request header

        self.init_handle()
        self.set_interface(options)

        self.c.setopt(pycurl.WRITEFUNCTION, self.write)
        self.c.setopt(pycurl.HEADERFUNCTION, self.writeHeader)

        self.log = logging.get_logger("log")
Ejemplo n.º 40
0
    def __init__(self,
                 reader1,
                 writer1,
                 reader2,
                 writer2,
                 name='[AioTCPProxy]',
                 logger=None,
                 timeout=60):
        self.reader1 = reader1
        self.writer1 = writer1
        self.addrs1 = '%s:%s' % self.writer1.get_extra_info('peername')
        self.reader2 = reader2
        self.writer2 = writer2
        self.addrs2 = '%s:%s' % self.writer2.get_extra_info('peername')
        self.proxy_closed = asyncio.Event()
        self.timeout = timeout
        self.name = name
        self.logger = logger
        self.log_data = True

        if not self.logger:
            self.logger = logging.get_logger()
Ejemplo n.º 41
0
    def __init__(self, title, template='default'):
        '''Initializes Html output manager and loads the template.

        Parameters
        ----------
        title : str
            HTML Title
        template : str
            Template to be loaded, it can either be the ``name`` or
            ``absolute_path/template``
        '''
        self.logger = get_logger('dotapatch.model')
        self._title = title

        if template != 'default':
            self.logger.info("{} using '{}' template.".format(title, template))

        self._template_dictionary = self._read_template(template)

        self._bg_style = False
        self._content = self._template_dictionary['OPEN_HTML'] \
            .format(title=self._title)
Ejemplo n.º 42
0
def crawl(base_url, hoods, to_email, history):
    logger = logging.get_logger(__name__)
    logger.info('Crawling ' + base_url)
    scraper = _infer_scraper(base_url)
    ad_urls = collect_ad_urls(base_url, scraper)
    unvisited_urls = filter_new_urls(ad_urls, history)
    raise NotImplementedError
    logger.info('Found %i new ads' % len(unvisited_urls))
    ads = collect_ads(ad_urls, scraper)
    for ad in ads:
        if hoods:
            geo = ad['geo']
            lat, lng = geo['lat'], geo['lng']
            hood = match_hood(lat, lng, hoods)
            if not hood:
                mark_visited(ad, history)
                continue

            ad['hood'] = hood

        send_ad(ad)
        mark_visited(ad)
Ejemplo n.º 43
0
    def __init__(self, url, filename, get={}, post={}, referer=None, cj=None, bucket=None,
                 options={}, progress=None, disposition=False):
        self.url = urllib.unquote(encode(url).strip())
        self.filename = filename.strip()  #: complete file destination, not only name
        self.get = get
        self.post = post
        self.referer = referer
        self.cj = cj  #: cookiejar if cookies are needed
        self.bucket = bucket
        self.options = options
        self.disposition = disposition
        # all arguments

        self.abort = False
        self.size = 0
        self.nameDisposition = None  #: will be parsed from content disposition

        self.chunks = []

        self.log = logging.get_logger("log")

        try:
            self.info = ChunkInfo.load(filename)
            self.info.resume = True  #: resume is only possible with valid info file
            self.size = self.info.size
            self.infoSaved = True
        except IOError:
            self.info = Chunk_info(filename)

        self.chunkSupport = True
        self.manager = pycurl.Curl_multi()

        # needed for speed calculation
        self.lastArrived = []
        self.speeds = []
        self.lastSpeeds = [0, 0]

        self.progress = progress
Ejemplo n.º 44
0
def save_transcription(item, clips, speakers=None, engine=None, raw_files=None,
                       logger=None):
    """
    Save an automatically-generated transcript for `item`.

    `clips`: Array of Clip objects

    `speakers`: (optional): Array of Speaker objects

    `engine`: FIXME

    `raw_files`: FIXME Any raw files resulting from the transcription.
    Each file is represented by a dict with keys `content_type`,
    `file_name` and `body`.
    """
    if not speakers: speakers = []
    if not logger: logger = logging.get_logger(__name__)

    logger.info(u"Saving transcript with %d speakers, %d clips for item %s",
                len(speakers), len(clips), item)

    with transaction.commit_on_success():
        track = Track(item=item, kind='captions', name='Automatic transcription')
        track.save()

        for speaker in speakers:
            speaker.track = track
            speaker.save()

        for clip in clips:
            # Despite appearances, the following line is actually
            # necessary to make the speaker_id foreign key update
            # correctly. Yuck.
            clip.speaker = clip.speaker # Necessary!
            clip.track = track

        Clip.objects.bulk_create(clips)
# -*- coding: utf-8 -*-
"""
Created on Sat Mar  5 10:03:43 2016

@author: ddboline
"""
from __future__ import (absolute_import, division, print_function, unicode_literals)
from traceback import format_exc
from flask import Flask, jsonify
from socket import gethostbyname
from logging import getLogger as get_logger
from subprocess import check_output, call
from six.moves.urllib.parse import unquote

app = Flask(__name__)
log = get_logger()


# straight from flask documentation
class Error(Exception):

    def __init__(self, message, status_code, payload=None):
        Exception.__init__(self)
        self.message = message
        self.status_code = status_code
        self.payload = payload

    def to_dict(self):
        rv = dict(self.payload or ())
        rv['msg'] = self.message
        return rv
Ejemplo n.º 46
0
def _create_child(tag, parent, attributes):
    """
    Create child element.
    """
    get_logger("lxmlbind.base").debug("Creating element '{}' for '{}'".format(tag, parent.tag))
    return etree.SubElement(parent, tag, attrib=attributes)
Ejemplo n.º 47
0
 def __init__(self, receiver):
     u"""Create a new :class:`.NLaunchCommFacade`."""
     super(NLaunchCommFacade, self).__init__()
     self.logger = get_logger("nlaunch.manager")
     self.receiver = receiver
Ejemplo n.º 48
0
# -*- coding: utf-8 -*-

from django.contrib.sites.models import Site
from django.core.mail import mail_admins
from django.template import loader, Context
from django.utils.timezone import now
from logging import getLogger as get_logger
from socket import gethostbyaddr, error as SocketError

logger = get_logger(__name__)


#----------------------------------------------------------------------
def notify(access_attempt, **kwargs):
    site = Site.objects.get_current()
    ip_address = access_attempt.ip_address
    fqdn = _resolve_ip_address_to_fqdn(ip_address)

    if access_attempt.failures == 0:
        login_success_msg = 'successful'
        login_success = True
    else:
        login_success_msg = 'failed'
        login_success = False

    context = dict(
        current_time=now(),
        attempt_time=access_attempt.attempt_time,
        failures=access_attempt.failures,
        fqdn=fqdn,
        site_domain=site.domain,
Ejemplo n.º 49
0
from splparser import parse as splparse
from .query import *
from logging import getLogger as get_logger
import re

ESCAPED_SLASH_STANDIN = "~#$slash$#~"
ESCAPED_SQUOTE_STANDIN = "~#$squote$#~"
ESCAPED_DQUOTE_STANDIN = "~#$dquote$#~"
SQID_MARKER = "~#$sqid$#~"
DQID_MARKER = "~#$dqid$#~"

logger = get_logger("queryutils")

def extract_schema(query):
    parsetree = parse_query(query)
    if not parsetree is None:
        return parsetree.schema()

def extract_template(query):
    parsetree = parse_query(query)
    if not parsetree is None:
        return parsetree.template()

def tag_parseable(query):
    query.parseable = False
    query.parsetree = parse_query(query)
    if query.parsetree is not None:
        query.parseable = True

def parse_query(query):
Ejemplo n.º 50
0
#coding=utf-8

import logging

class Logging(object):
    def __init__(self):
        # 创建一个logger
        self.logger = logging.getLogger('mylogger')
        self.logger.setLevel(logging.DEBUG)

        # 创建一个handler,用于写入日志文件
        self.fh = logging.FileHandler('error.log')
        self.fh.setLevel(logging.DEBUG)

        # 定义handler的输出格式
        formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
        self.fh.setFormatter(formatter)

        # 给logger添加handler
        self.logger.addHandler(self.fh)
        
    def get_logger(self):
        return self.logger


if __name__ == "__main__":
    logging = Logging()
    logger = logging.get_logger();
    logger.info("for test")
Ejemplo n.º 51
0
import zmq
import msgpack

from logging import getLogger as get_logger


_LOG = get_logger(__name__)
_HALT_MSG = {'operation': 'halt'}
_DEFAULT_TTL = 120


class CacheClient(object):

    def __init__(self, cfg):
        self._cfg = cfg
        self._context = zmq.Context()
        self._socket = self._context.socket(zmq.REQ)
        self._socket.connect(self._cfg.connection.cache_uri)

    def _send(self, msg):
        self._socket.send(msgpack.packb(msg))

    def _request(self, msg):
        try:
            self._send(msg)

            breply = self._socket.recv()
            return msgpack.unpackb(breply)
        except Exception as ex:
            _LOG.exception(ex)
        return None
Ejemplo n.º 52
0
def error(*args, **kwargs):
    if logger is None:
        import logging
        logger = logging.get_logger(__package__)
    logger.error(*args, **kwargs)
Ejemplo n.º 53
0
    import pickle
import socket
import struct

from .errnos import EBADF, EPIPE, ECONNRESET
from ..profiler import Profiler


__all__ = ['LOGGER', 'LOG', 'INTERVAL', 'PICKLE_PROTOCOL',
           'SIZE_STRUCT_FORMAT', 'pack_stats', 'recv_stats', 'fmt_connected',
           'fmt_disconnected', 'fmt_profiler_started', 'fmt_profiler_stopped',
           'BaseProfilingServer']


#: The standard logger.
LOGGER = get_logger('Profiling')

#: The standard log function.
LOG = LOGGER.debug

#: The default profiling interval.
INTERVAL = 5

#: The default Pickle protocol.
PICKLE_PROTOCOL = getattr(pickle, 'DEFAULT_PROTOCOL', pickle.HIGHEST_PROTOCOL)

#: The struct format to pack packet size. (uint32)
SIZE_STRUCT_FORMAT = '!I'


def pack_stats(profiler, pickle_protocol=PICKLE_PROTOCOL):
	def handle_sim_error(self, error):
		log = get_logger('opimd')
		log.error("%s hit an error, scheduling retry. Reason: %s" % (self.name, error))
		timeout_add(_OGSMD_POLL_INTERVAL, self.load_entries)
Ejemplo n.º 55
0
   Unless required by applicable law or agreed to in writing, software
   distributed under the License is distributed on an "AS IS" BASIS,
   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
   See the License for the specific language governing permissions and
   limitations under the License.
"""

import sys
import os
import boto
import logging
import errno

from boto.s3.key import Key

log = logging.get_logger()
formatter = logging.Formatter("[%(levelname)s] [%(module)s:%(lineno)d] %(message)s")
handler_stream = logging.StreamHandler()
handler_stream.setFormatter(formatter)
log.addHandler(handler_stream)

# -------------------------
def get_bucket(bucket_name, secrets):

    aws_id = secrets.get("AWS_ACCESS_KEY_ID", None)
    aws_key = secrets.get("AWS_SECRET_ACCESS_KEY", None)

    assert aws_id is not None, "No AWS ID given"
    assert aws_key is not None, "No AWS key given"

    try:
Ejemplo n.º 56
0
 def __init__(self, pwd_path):
     super(NLaunchReceiver, self).__init__()
     self.logger = get_logger("nlaunch.receiver")
     self.dal = DAL(pwd_path)
     self.facade = NLaunchCommFacade(self)
     self.handler = InitialHandler(self.dal, self.facade)
Ejemplo n.º 57
0
from emitters import Emitter
from handler import typemapper
from doc import HandlerMethod
from authentication import NoAuthentication
from utils import coerce_put_post, FormValidationError, HttpStatusCode
from utils import rc, format_error, translate_mime, MimerDataException

CHALLENGE = object()

try:
    import structlog
    logger = structlog.get_logger()
except:
    import logging
    logger = logging.get_logger(__name__)


class Resource(object):
    """
    Resource. Create one for your URL mappings, just
    like you would with Django. Takes one argument,
    the handler. The second argument is optional, and
    is an authentication handler. If not specified,
    `NoAuthentication` will be used by default.
    """
    callmap = {'GET': 'read', 'POST': 'create',
                'PUT': 'update', 'DELETE': 'delete'}

    def __init__(self, handler, authentication=None):
        if not callable(handler):
Ejemplo n.º 58
0
import os.path
from smb.SMBConnection import SMBConnection
import logging

logger = logging.get_logger(__name__)


class Handler:
    def __init__(self,
                 system_name,
                 user,
                 domain,
                 password,
                 service_name,
                 path=None):
        self.conn = SMBConnection(user, password, 'raspberry', system_name,
                                  domain)
        assert self.conn.connect(system_name)
        self.service_name = service_name
        self.path = path if path else '/'

    def upload_file(self, local_path, samba_path=''):
        file_name = os.path.basename(samba_path)
        if not file_name:  # if no filename it copies the local filename
            file_name = os.path.basename(local_path)
            samba_path = os.path.join(samba_path, file_name)

        samba_path = os.path.join(self.path, samba_path)
        with open(local_path, 'rb') as f:
            print(samba_path)
            self.conn.storeFile(self.service_name, samba_path, f)
Ejemplo n.º 59
0
def main(start=True, argv=None):
    try:
        import twisted
    except ImportError:
        print "Orbited requires Twisted, which is not installed. See http://twistedmatrix.com/trac/ for installation instructions."
        sys.exit(1)

    #################
    # This corrects a bug in Twisted 8.2.0 for certain Python 2.6 builds on Windows
    #   Twisted ticket: http://twistedmatrix.com/trac/ticket/3868
    #     -mario
    try:
        from twisted.python import lockfile
    except ImportError:
        from orbited import __path__ as orbited_path
        sys.path.append(os.path.join(orbited_path[0],"hotfixes","win32api"))
        from twisted.python import lockfile
        lockfile.kill = None
    #################
      
  
    from optparse import OptionParser
    parser = OptionParser()
    parser.add_option(
        "-c",
        "--config",
        dest="config",
        default=None,
        help="path to configuration file"
    )
    parser.add_option(
        "-v",
        "--version",
        dest="version",
        action="store_true",
        default=False,
        help="print Orbited version"
    )
    parser.add_option(
        "-p",
        "--profile",
        dest="profile",
        action="store_true",
        default=False,
        help="run Orbited with a profiler"
    )
    parser.add_option(
        "-q",
        "--quickstart",
        dest="quickstart",
        action="store_true",
        default=False,
        help="run Orbited on port 8000 and MorbidQ on port 61613"
    )
    if argv == None:
        argv = sys.argv[1:]
    (options, args) = parser.parse_args(argv)
    if args:
        print 'the "orbited" command does not accept positional arguments. type "orbited -h" for options.'
        sys.exit(1)

    if options.version:
        print "Orbited version: %s" % (version,)
        sys.exit(0)

    if options.quickstart:
        config.map['[listen]'].append('http://:8000')
        config.map['[listen]'].append('stomp://:61613')
        config.map['[access]'][('localhost',61613)] = ['*']
        print "Quickstarting Orbited"
    else:
        # load configuration from configuration
        # file and from command line arguments.
        config.setup(options=options)

    logging.setup(config.map)

    # we can now safely get loggers.
    global logger; logger = logging.get_logger('orbited.start')
  
    ############
    # This crude garbage corrects a bug in twisted
    #   Orbited ticket: http://orbited.org/ticket/111
    #   Twisted ticket: http://twistedmatrix.com/trac/ticket/2447
    # XXX : do we still need this?
    #       -mcarter 9/24/09
#    import twisted.web.http
#    twisted.web.http.HTTPChannel.setTimeout = lambda self, arg: None
#    twisted.web.http.HTTPChannel.resetTimeout = lambda self: None
    ############

        
    # NB: we need to install the reactor before using twisted.
    reactor_name = config.map['[global]'].get('reactor')
    if reactor_name:
        install = _import('twisted.internet.%sreactor.install' % reactor_name)
        install()
        logger.info('using %s reactor' % reactor_name)
        
        
    from twisted.internet import reactor
    from twisted.web import resource
    from twisted.web import server
    from twisted.web import static
#    import orbited.system
        
        
    if 'INDEX' in config.map['[static]']:
        root = static.File(config.map['[static]']['INDEX'])
    else:
        root = resource.Resource()
    static_files = static.File(os.path.join(os.path.dirname(__file__), 'static'))
    root.putChild('static', static_files)
    # Note: hard coding timeout to 120. 
    site = server.Site(root, timeout=120)
    from proxy import ProxyFactory
    from csp_twisted import CometPort

    reactor.listenWith(CometPort, factory=ProxyFactory(), resource=root, childName='csp')
    _setup_static(root, config.map)
    start_listening(site, config.map, logger)
    
    
    # switch uid and gid to configured user and group.
    if os.name == 'posix' and os.getuid() == 0:
        user = config.map['[global]'].get('user')
        group = config.map['[global]'].get('group')
        if user:
            import pwd
            import grp
            try:
                pw = pwd.getpwnam(user)
                uid = pw.pw_uid
                if group:
                    gr = grp.getgrnam(group)
                    gid = gr.gr_gid
                else:
                    gid = pw.pw_gid
                    gr = grp.getgrgid(gid)
                    group = gr.gr_name
            except Exception, e:
                logger.error('Aborting; Unknown user or group: %s' % e)
                sys.exit(1)
            logger.info('switching to user %s (uid=%d) and group %s (gid=%d)' % (user, uid, group, gid))
            os.setgid(gid)
            os.setuid(uid)
        else:
            logger.error('Aborting; You must define a user (and optionally a group) in the configuration file.')
            sys.exit(1)