예제 #1
0
def main():
    warnings.simplefilter('error', UserWarning)
    warnings.simplefilter('error', Warning)

    level='WARNING'
    from logbook import FileHandler, StreamHandler

    log_handler1 = FileHandler('application.log')
    log_handler1.push_application()

    test4()
    pylab.show()
예제 #2
0
def setup_logging(config):
    log_file = os.path.join(config['daemon']['app_path'],
                            config['daemon']['log']['file'])
    # if running in debug mode, disable log rotation because it makes
    # things confusing
    if config['daemon']['debug']:
        log_handler = FileHandler(log_file)
    else:
        max_size = config['daemon']['log']['rotate_size']
        backup_count = config['daemon']['log']['rotate_count']
        log_handler = RotatingFileHandler(log_file, max_size=max_size,
                                          backup_count=backup_count)
    log_handler.push_application()
    log = Logger('edgy_crits')
    return log
예제 #3
0
파일: app.py 프로젝트: mrtopf/We-Rate-It
 def __init__(self, settings={}, prefix=""):
     """initialize the Application with a settings dictionary and an optional
     ``prefix`` if this is a sub application"""
     self.settings = settings
     self.mapper = routes.Mapper()
     self.setup_handlers(self.mapper)
     self.loghandler = FileHandler(self.logfilename)
예제 #4
0
파일: pipeline.py 프로젝트: LabAdvComp/dish
    def start(self):
        """Initialize workdir, logging, etc. in preparation for running jobs.
        """

        # make a working directory for each job
        for job in self.jobs:
            job["workdir"] = os.path.join(self.workdir, job["description"])
            fs.maybe_mkdir(job["workdir"])
        # temporary ipython profile directory
        self.ipythondir = os.path.join(self.workdir, ".ipython")
        fs.maybe_mkdir(self.ipythondir)
        # log dir
        self.logdir = os.path.join(self.workdir, "log")
        fs.maybe_mkdir(self.logdir)

        # determine which IP we are going to listen on for logging
        try:
            self.listen_ip = localinterfaces.public_ips()[0]
        except:
            raise ValueError("This machine appears not to have"
                             " any publicly visible IP addresses")

        # setup ZMQ logging
        self.handler = FileHandler(os.path.join(self.logdir, "dish.log"))
        self.listen_port = str(randint(5000, 10000))
        self.subscriber = ZeroMQPullSubscriber("tcp://" + self.listen_ip +
                                               ":" + self.listen_port)
        self.controller = self.subscriber.dispatch_in_background(self.handler)
        self.logger = Logger("dish_master")
예제 #5
0
    def __init__(self, filament):

        self.logger = Logger(Fibratus.__name__)
        self.file_handler = FileHandler(os.path.join(os.path.abspath(__file__), '..', '..', '..', 'fibratus.log'),
                                        mode='w+')
        self.kevt_streamc = KEventStreamCollector(etw.KERNEL_LOGGER_NAME.encode())
        self.kcontroller = KTraceController()
        self.ktrace_props = KTraceProps()
        self.ktrace_props.enable_kflags()
        self.ktrace_props.logger_name = etw.KERNEL_LOGGER_NAME

        self.handle_repository = HandleRepository()
        self._handles = []
        # query for handles on the
        # start of kernel trace
        with self.file_handler.applicationbound():
            self.logger.info('Starting fibratus...')
            self.logger.info('Enumerating system handles...')
            self._handles = self.handle_repository.query_handles()
            self.logger.info('%s handles found' % len(self._handles))
            self.handle_repository.free_buffers()
        self.thread_registry = ThreadRegistry(self.handle_repository, self._handles)

        self.kevent = KEvent(self.thread_registry)

        self._filament = filament

        self.fsio = FsIO(self.kevent, self._handles)
        self.hive_parser = HiveParser(self.kevent, self.thread_registry)
        self.tcpip_parser = TcpIpParser(self.kevent)
        self.dll_repository = DllRepository(self.kevent)

        self.requires_render = {}
        self.filters_count = 0
def main():
    """
    The main routine which kicks everything off
    :return:
    """

    # Setup the command line arguments
    flags = argparse.ArgumentParser(description="Tool to validate and fix errors in CSV files for TADC imports")
    flags.add_argument('csv_file', type=str, help="Path to a CSV file to validate")
    flags.add_argument('header_rows', type=str, help="Number of header rows")
    flags.add_argument('--fix-missing', '-f', action='store_true', help="Fix missing fields by inserting the value 'unknown'")
    flags.add_argument('--output-dir', '-o', type=str, help='Where to put output files', default=os.getcwd())
    flags.add_argument('--log-dir', '-l', type=str, help='Where to put log files', default='/tmp')
    flags.add_argument('--log-level', type=str, help='Choose a log level', default='INFO')
    flags.add_argument('--old-date-format', type=str, help="the format of dates that will be fixed", default='%d/%m/%Y')
    args = flags.parse_args()

    log_filename = os.path.join(
            args.log_dir,
            'tadc_import_validator_{}.log'.format(os.path.basename(time.strftime('%Y%m%d-%H%M%S')))
        )

    # register some logging handlers
    log_handler = FileHandler(
        log_filename,
        mode='w',
        level=args.log_level,
        bubble=True
    )
    stdout_handler = StreamHandler(sys.stdout, level=args.log_level, bubble=True)

    with stdout_handler.applicationbound():
        with log_handler.applicationbound():
            log.info("Arguments: {}".format(args))
            start = time.time()
            log.info("starting at {}".format(time.strftime('%l:%M%p %Z on %b %d, %Y')))

            with CSVFileValidator(
                    csv_file=args.csv_file,
                    header_rows=args.header_rows,
                    output_dir=args.output_dir,
                    old_date_format=args.old_date_format,
                    fix_missing=args.fix_missing) as validator:
                validator.validate_file()
                log.info("Running time: {}".format(str(datetime.timedelta(seconds=(round(time.time() - start, 3))))))
                log.info("Log written to {}:".format(log_filename))
                log.info("Fixed data is in: {}".format(validator.get_fixed_filename()))
예제 #7
0
파일: tracer.py 프로젝트: ThisGuyCodes/qdb
    def __init__(self, config=None, merge=False, **kwargs):
        """
        See qdb.config for more information about the configuration of
        qdb.
        merge denotes how config and kwargs should be merged.
        QdbConfig.kwargs_first says config will trample kwargs,
        QdbConfig.config_first says kwargs will trample config.
        Otherwise, kwargs and config cannot both be passed.
        """
        super(Qdb, self).__init__()
        if config and kwargs:
            if merge == QdbConfig.kwargs_first:
                first = kwargs
                second = config
            elif merge == QdbConfig.config_first:
                first = config
                second = kwargs
            else:
                raise TypeError('Cannot pass config and kwargs')
            config = first.merge(second)
        else:
            config = QdbConfig.get_config(config or kwargs)

        self.address = config.host, config.port
        self.set_default_file(config.default_file)
        self.default_namespace = config.default_namespace or {}
        self.exception_serializer = config.exception_serializer or \
            default_exception_serializer
        self.eval_fn = config.eval_fn or default_eval_fn
        self.green = config.green
        self._file_cache = {}
        self.redirect_output = config.redirect_output
        self.retry_attepts = config.retry_attepts
        self.repr_fn = config.repr_fn
        self.skip_fn = config.skip_fn or (lambda _: False)
        self.pause_signal = config.pause_signal \
            if config.pause_signal else signal.SIGUSR2
        self.uuid = str(config.uuid or uuid4())
        self.watchlist = {}
        self.execution_timeout = config.execution_timeout
        # We need to be able to send stdout back to the user debugging the
        # program. We hold a handle to this in case the program resets stdout.
        if self.redirect_output:
            self._old_stdout = sys.stdout
            self._old_stderr = sys.stderr
            self.stdout = StringIO()
            self.stderr = StringIO()
            sys.stdout = self.stdout
            sys.stderr = self.stderr
        self.forget()
        self.log_handler = None
        if config.log_file:
            self.log_handler = FileHandler(config.log_file)
            self.log_handler.push_application()
        self.cmd_manager = (config.cmd_manager or RemoteCommandManager)(self)
        self.cmd_manager.start(config.auth_msg)
예제 #8
0
파일: runner.py 프로젝트: tulanthoar/pygit
def main():
    """watch a specific directory, logging changes and
    running python scripts when they are written to disk"""
    home_dir = Path(environ.get('HOME'))
    run_logfile = home_dir / 'pyrun.log'
    watchdog_logfile = home_dir / 'pydir.log'
    run_log = FileHandler(str(run_logfile), level='NOTICE', bubble=True, mode='w', delay=True)
    file_log = FileHandler(str(watchdog_logfile), level='INFO', bubble=True)
    with run_log.applicationbound():
        with file_log.applicationbound():
            watched_dir = home_dir / 'code' / 'pyrep' / 'coderunner' / 'snippets'
            handler = MyEventHandler(run_logfile, run_log)
            obs = InotifyObserver()
            obs.schedule(handler, str(watched_dir), False)
            obs.start()
            try:
                while True:
                    sleep(1)
            except: #  pylint: disable=bare-except
                obs.stop()
            obs.join()
예제 #9
0
 def log(self, message):
     try:
         if self._logger is None:
             FileHandler(self.args.log,
                         format_string=
                         "{record.time:%Y-%m-%d %H:%M:%S} {record.message}"
                         ).push_application()
             if self.args.verbose > 1:
                 StderrHandler(bubble=True).push_application()
             self._logger = Logger("tupa")
         self._logger.warn(message)
     except OSError:
         pass
예제 #10
0
def logging_context(path=None, level=None):
    from logbook import StderrHandler, FileHandler
    from logbook.compat import redirected_logging
    with StderrHandler(level=level or 'INFO').applicationbound():
        if path:
            if not os.path.isdir(os.path.dirname(path)):
                os.makedirs(os.path.dirname(path))
            with FileHandler(path, bubble=True).applicationbound():
                with redirected_logging():
                    yield
        else:
            with redirected_logging():
                yield
예제 #11
0
def test_formatter_with_file_handler(monkeypatch):
    r_fixture = {
        'channel': 'formatter.test',
        'level': 2,
        'msg': 'My test log message',
        'args': None,
        'kwargs': None,
        'exc_info': None,
        'extra': None,
        'frame': None,
        'dispatcher': None
    }
    fh_fixture = {
        'filename': '/tmp/bogus.log',
        'mode': 'a',
        'encoding': 'utf-8',
        'level': 0,
        'format_string': None,
        'delay': False,
        'filter': None,
        'bubble': False
    }
    # This date fixture translates to '2014-12-05T14:12:49.303830Z'
    timestamp_fixture = FROZEN_DATETIME.strftime('%Y-%m-%dT%H:%M:%S.%fZ')

    monkeypatch.setattr(datetime, 'datetime', MockedDate)

    lf = LogstashFormatter()
    json_msg = lf(
        record=LogRecord(**r_fixture),
        handler=FileHandler(**fh_fixture)
    )

    expected_json = {
        '@fields': {
            'extra': {}, 'level': r_fixture['level'], 'process': None,
            'frame': None, 'args': [], 'kwargs': {},
            '_dispatcher': r_fixture['dispatcher'],
            'channel': r_fixture['channel']
        },
        '@handler': {
            'level': fh_fixture['level'],
            '_filename': fh_fixture['filename'], '_mode': fh_fixture['mode'],
            'filter': fh_fixture['filter'], 'bubble': fh_fixture['bubble'],
            'encoding': fh_fixture['encoding']
        },
        '@timestamp': timestamp_fixture,
        '@source_host': 'localhost',
        '@message': r_fixture['msg']
    }
    assert json.loads(json_msg) == expected_json
예제 #12
0
    def __init__(self, filament):

        self.logger = Logger(Fibratus.__name__)
        self.file_handler = FileHandler(os.path.join(os.path.abspath(__file__),
                                                     '..', '..', '..',
                                                     'fibratus.log'),
                                        mode='w+')
        self.kevt_streamc = KEventStreamCollector(
            etw.KERNEL_LOGGER_NAME.encode())
        self.kcontroller = KTraceController()
        self.ktrace_props = KTraceProps()
        self.ktrace_props.enable_kflags()
        self.ktrace_props.logger_name = etw.KERNEL_LOGGER_NAME

        self.handle_repository = HandleRepository()
        self._handles = []
        # query for handles on the
        # start of kernel trace
        with self.file_handler.applicationbound():
            self.logger.info('Starting fibratus...')
            self.logger.info('Enumerating system handles...')
            self._handles = self.handle_repository.query_handles()
            self.logger.info('%s handles found' % len(self._handles))
            self.handle_repository.free_buffers()
        self.thread_registry = ThreadRegistry(self.handle_repository,
                                              self._handles)

        self.kevent = KEvent(self.thread_registry)

        self._filament = filament

        self.fsio = FsIO(self.kevent, self._handles)
        self.hive_parser = HiveParser(self.kevent, self.thread_registry)
        self.tcpip_parser = TcpIpParser(self.kevent)
        self.dll_repository = DllRepository(self.kevent)

        self.requires_render = {}
        self.filters_count = 0
예제 #13
0
파일: logger.py 프로젝트: t1user/ib_tools
def logger(name: str,
           stream_level=DEBUG,
           file_level=DEBUG,
           folder: str = default_path('logs')) -> Logger:
    set_datetime_format('local')
    StreamHandler(sys.stdout, level=stream_level,
                  bubble=True).push_application()
    filename = __file__.split('/')[-1][:-3]
    FileHandler(
        f'{folder}/{name}_{datetime.today().strftime("%Y-%m-%d_%H-%M")}.log',
        bubble=True,
        level=file_level,
        delay=True).push_application()
    return Logger(name)
예제 #14
0
def __init():
    driver = config.get('app.log.driver', 'stderr')
    level = config.get('app.log.level', 'DEBUG').upper()
    global __handler
    global __loggers
    if driver == 'stderr':
        __handler = StreamHandler(sys.stderr, level=level)
    elif driver == 'stdout':
        __handler = StreamHandler(sys.stdout, level=level)
    elif driver == 'file':
        __handler = FileHandler(filename=__get_log_file(), level=level)
    else:
        raise Exception('Invalid driver for log')
    __handler.push_application()
    __loggers['core'] = Logger('Core')
    container.register('logger', __loggers['core'])
def setup_logging():
    try:

        print("setting up log file")
        #get current folder
        current_dir_path = os.path.dirname(os.path.realpath(__file__))
        #join log folder to current path, if you \log then it will be created one level up
        log_path=os.path.join(current_dir_path,"log")
        log.info('log path:' + log_path)
        #check if log folder exist
        if not os.path.exists(log_path):
            try:
                print("creating log folder")
                os.makedirs(log_path)
            except OSError as exc:  # Guard against race condition
                if exc.errno != errno.EEXIST:
                    raise
        else:
            log.info("Log folder exist")

        log.info("create log file inside log folder")
        log_filename = os.path.join(
            log_path,
            'google-drive-to-s3-{}.log'.format(os.path.basename(time.strftime('%Y%m%d-%H%M%S')))
        )
        print(log_filename)
        """#test to create log file on disk
        if not os.path.exists(log_filename):
            print("create log file")
            try:
                # double check against OS race
                filehandle = open(log_filename, 'r')
            except IOError:
                # if file does not exist, create it
                filehandle = open(log_filename, "w")
        """
        # register some logging handlers
        log_handler = FileHandler(
            log_filename,
            mode='w',
            level=args.loglevel,
            bubble=True
        )
        return log_handler
    except (Exception) as e:
        log.error('Error in creating log handler' + str(e))
예제 #16
0
def main(config_file, **kwargs):
    with open(config_file) as fh:
        config = yaml.load(fh)

    try:
        rmq_settings = config["rabbitmq_logging"]
    except KeyError:
        print("RabbitMQ logging not configured in {}".format(config_file))
        sys.exit()

    handlers = [NullHandler()]
    if not kwargs["quiet"]:
        handlers.append(StderrHandler(bubble=True))

    if kwargs["filename"]:
        handlers.append(FileHandler(kwargs["filename"], bubble=True))

    if kwargs["log_db"]:
        try:
            cdb_settings = config["couchdb_logging"]
        except KeyError:
            print("CouchDB logging not configured in {}".format(config_file))
            sys.exit()

        db_handler = DatabaseHandler(cdb_settings["couchdb_url"],
                                     backend=CouchDBBackend,
                                     db=cdb_settings["database"],
                                     bubble=True)
        handlers.append(db_handler)

    setup = NestedSetup(handlers)

    print("Now waiting for log messages")
    with setup:
        subscriber = RabbitMQSubscriber(rmq_settings["url"],
                                        queue=rmq_settings["log_queue"])
        try:
            subscriber.dispatch_forever()

        except KeyboardInterrupt:
            print("\nLog subscriber shutting down")
            subscriber.close()

        except Exception:
            print("Log subscriber quit (unexpectedly)")
    def __init__(self,
                 name='default',
                 log_type='stdout',
                 filepath='default.log',
                 loglevel='DEBUG'):
        """

        :param name:
        :param log_type:stdout 输出到屏幕  file 输出到文件
        :param filepath:
        :param loglevel:设定log等级 ['CRITICAL', 'ERROR', 'WARNING', 'NOTICE', 'INFO', 'DEBUG', 'TRACE', 'NOTSET']
        """
        self.log = Logger(name)
        if log_type == 'stdout':
            StreamHandler(sys.stdout, level=loglevel).push_application()
        elif log_type == 'file':
            if os.path.isdir(filepath) and not os.path.exists(filepath):
                os.makedirs(os.path.dirname(filepath))
            file_hander = FileHandler(filepath, level=loglevel)
            self.log.handlers.append(file_hander)
예제 #18
0
 def __init__(self,
              name='default',
              log_type='stdout',
              filepath='default.log',
              loglevel='DEBUG'):
     """Log对象
     :param name: log 名字
     :param :logtype: 'stdout' 输出到屏幕, 'file' 输出到指定文件
     :param :filename: log 文件名
     :param :loglevel: 设定log等级 ['CRITICAL', 'ERROR', 'WARNING', 'NOTICE', 'INFO', 'DEBUG', 'TRACE', 'NOTSET']
     :return log handler object
     """
     self.log = Logger(name)
     if log_type == 'stdout':
         StreamHandler(sys.stdout, level=loglevel).push_application()
     if log_type == 'file':
         if os.path.isdir(filepath) and not os.path.exists(filepath):
             os.makedirs(os.path.dirname(filepath))
         file_handler = FileHandler(filepath, level=loglevel)
         self.log.handlers.append(file_handler)
예제 #19
0
 def __init__(self, name='default', log_type='stdout', file_path=default_log_path, loglevel='DEBUG'):
     """Log对象
     :param name: log 名字
     :param :logtype: 'stdout' 输出到屏幕, 'file' 输出到指定文件
     :param :filename: log 文件名
     :param :loglevel: 设定log等级 ['CRITICAL', 'ERROR', 'WARNING', 'NOTICE', 'INFO', 'DEBUG', 'TRACE', 'NOTSET']
     :param :dir_path: 文件夹路径
     :return log handler object
     """
     self.log = Logger(name)
     if log_type == 'stdout':
         StreamHandler(sys.stdout, level=loglevel).push_application()
     if log_type == 'file':
         date = time.strftime("%Y%m%d", time.localtime())
         # 一天对应一个文件夹
         file_path = "%s%s/" % (file_path, date)
         file_path_final = "%s%s-%s.log" % (file_path, date, name)
         print(file_path)
         if file_path[-1] == '/' and not os.path.exists(file_path):
             os.makedirs(os.path.dirname(file_path))
         file_handler = FileHandler(file_path_final, level=loglevel)
         self.log.handlers.append(file_handler)
예제 #20
0
파일: app.py 프로젝트: mrtopf/We-Rate-It
class Application(object):
    """a base class for dispatching WSGI requests"""
    
    def __init__(self, settings={}, prefix=""):
        """initialize the Application with a settings dictionary and an optional
        ``prefix`` if this is a sub application"""
        self.settings = settings
        self.mapper = routes.Mapper()
        self.setup_handlers(self.mapper)
        self.loghandler = FileHandler(self.logfilename)

    def __call__(self, environ, start_response):
        with self.loghandler.threadbound():
            request = werkzeug.Request(environ)
            m = self.mapper.match(environ = environ)
            if m is not None:
                handler = m['handler'](app=self, request=request, settings=self.settings)
                try:
                    return handler.handle(**m)(environ, start_response)
                except werkzeug.exceptions.HTTPException, e:
                    return e(environ, start_response)
            # no view found => 404
            return werkzeug.exceptions.NotFound()(environ, start_response)
예제 #21
0
        p.oplossing = data
    elif soort == 'event':
        p.events.append((dt.datetime.today().isoformat(' ')[:19], data))
    elif soort == 'statuscode':
        p.status = data
    elif soort == 'arch':
        p.set_arch(data)
    list(p)
    if update:
        p.write()
    return p


if __name__ == "__main__":
    fnm = "afrift"
    log_handler = FileHandler('get_acties_sql_1.log', mode='w')
    with log_handler.applicationbound():
        test_get_acties(fnm, {}, "")
        test_get_acties(fnm, {"idlt": "2010"}, "")
        test_get_acties(fnm, {
            "idlt": "2010",
            "id": "and",
            "idgt": "2007-0003"
        }, "")
        test_get_acties(fnm, {
            "idgt": "2010",
            "id": "or",
            "idlt": "2007-0003"
        }, "")
        test_get_acties(fnm, {"idgt": "2007-0003"}, "")
        test_get_acties(fnm, {"status": ["1"]}, "")
예제 #22
0
파일: browser.py 프로젝트: Nikola-K/RESTool
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import platform
import shutil
from uuid import uuid4
from time import strftime
import psutil
from types import NoneType

from logbook import FileHandler, Logger, CRITICAL

log = Logger("Browser")
if os.path.exists("application.log"):
    log_handler = FileHandler('application.log')
    log_handler.push_application()
else:
    log.level = CRITICAL


class Browser(object):
    def _expand(self, path):
        log.debug("Expanding: {}".format(path))

        if self.os == "linux" or self.os == "darwin":
            return os.path.expanduser(path)
        elif self.os == "windows" and platform.release() != "XP":
            return os.path.expandvars(path)
        else:
            log.error("Unsupported OS: {} - expanding failed.".format(self.os))
예제 #23
0
        if not os.path.exists(restore_path):
            print "Backup file at {} could not be found.".format(restore_path)
            return

        backup_browser = os.path.basename(restore_path)[0].lower()
        if backup_browser == browser.name:
            browser.restore_from_self(restore_path)
        else:
            restore_browser = self.all_browsers[backup_browser]
            restore_data = restore_browser.get_data(restore_path)
            browser.set_data(restore_data)


if __name__ == '__main__':
    if os.path.exists("application.log"):
        log_handler = FileHandler('application.log')
        log_handler.push_application()
    else:
        log.level = CRITICAL

    parser = argparse.ArgumentParser(prog='RESTool Command Line Interface. '
                                     'See github for usage examples.')
    parser.add_argument(
        '-l',
        '--list',
        action='store_true',
        help=
        "List all available browsers and profiles to use for other commands.")

    parser.add_argument(
        '-b',
예제 #24
0
# -*- coding: utf-8 -*-

from logbook import Logger, FileHandler
import logbook
import sys

handler = FileHandler('app.log')
handler.push_application()
log = Logger('test')


def main():
    log.info('something logging')


if __name__ == '__main__':
    main()
예제 #25
0
# ptvsd.wait_for_attach()


# I use cspell plugin for spell checking, and I want it to ignore few words
# cspell:ignore reican lzma isdir

# max file size in lines (10M by default)
MAX_LINES_TO_READ = 10000000
# max file size in megabytes
MAX_FILE_SIZE = "50M"
# where to write application log
LOG_FILE_NAME = "reican.log"

TIMESTAMP_FORMAT = "YYYY-MM-DD HH:mm:ss"

log_handler = FileHandler(LOG_FILE_NAME)
log_handler.push_application()
log = Logger("Reican")
log.info("Logging started")


def func_log(function_name):
    """Decorator for logging and timing function execution."""

    def log_it(*args, **kwargs):
        """Log function and its args, execute the function and return the result."""
        t_start = time.time()
        result = function_name(*args, **kwargs)
        t_end = time.time() - t_start
        msg = "Function call: {}".format(function_name.__name__)
        if args:
def main():
    parser = argparse.ArgumentParser(description='Extract features')
    parser.add_argument(
        '-i',
        '--input',
        required=True,
        help='Raw data input dir'
    )
    parser.add_argument(
        '-o',
        '--output',
        required=True,
        help='Output dir'
    )
    parser.add_argument(
        '--filter',
        default='lowpass',
        help='Filtering Type'
    )
    parser.add_argument(
        '--window',
        type=int,
        required=True,
        help='Window length'
    )
    parser.add_argument(
        '--stride',
        type=int,
        required=True,
        help='Stride length'
    )
    parser.add_argument(
        '-f',
        '--featurelist',
        nargs='+',
        help='Features to extact',
        required=True
    )
    parser.add_argument(
        '--downsample',
        type=int,
        default=1,
        help='Downsample step, default takes no downsample'
    )
    parser.add_argument(
        '--log',
        default='info',
        choices=['debug', 'warning', 'info', 'error'],
        help='Logging level, default info'
    )
    parser.add_argument(
        '--dataset',
        choices=['ninapro-db1', 'ninapro-db2', 'ninapro-db3', 'ninapro-db4',
                 'ninapro-db5', 'ninapro-db6', 'ninapro-db7', 'biopatrec-db1',
                 'biopatrec-db2', 'biopatrec-db3', 'biopatrec-db4'],
        help='Dataset choices',
        required=True
    )

    args = parser.parse_args()

    with NullHandler().applicationbound():
        with StderrHandler(level=args.log.upper()).applicationbound():
            with FileHandler(
                os.path.join(ensure_dir(args.output), 'log'),
                level=args.log.upper(),
                bubble=True
            ).applicationbound():
                try:
                    return run(args)
                except:
                    log.exception('Failed')
예제 #27
0
if __name__ == '__main__':
    '''Usage: python tk_maintain.py --log hehe.log
    '''

    from logbook import FileHandler
    from logbook import Logger
    from argparse import ArgumentParser
    import sys
    parser = ArgumentParser()
    logpath = './log/'
    parser.add_argument('--log', nargs=1, help='log path')
    parser.add_argument('--version', nargs=1, help='maintain version')
    args = parser.parse_args(sys.argv[1:])
    logfilepath = logpath + args.log[0]
    maintain_version = args.version[0]
    log_handler = FileHandler(logfilepath)
    logbk = Logger('Token Maintain')

    with log_handler.applicationbound():
        logbk.info('maintain prepare')

        at_least = AT_LEAST_TOKEN_COUNT
        max_tokens_redis_limit = MAX_TOKENS_IN_REDIS

        logbk.info('maintain begin')

        # 认证新用户,并将access_token加入mongodb,redis从mongodb导入新token,不重置已有token 的 req_count
        if maintain_version == 'addatoken':
            print 'generate new token, write to mongo, push to redis without reset request count'
            generate_api_access_token(logbk)
            add_without_reset_req_count(max_tokens_redis_limit, logbk)
예제 #28
0
import requests
import time
import json
import zmq
from multiprocessing import Process
from logbook import FileHandler, catch_exceptions
from sachintweets.models import connect, MongoException
from twitter import username, password
from os import remove, getpid

####################################### Log book setup #########################
log_handler = FileHandler('recv.log')
log_handler.push_application()

###################################### Constants ###############################
LOCK_FILE = 'recv.lock'

###################################### Mongodb connection ######################
try:
    db =  connect() 
    if db:
       tweet  = db.tweet
except Exception as e:
    log_handler.write(e.message)

#################################### ZERO MQ PULLER ############################
context = zmq.Context()
socket = context.socket(zmq.SUB)
socket.connect("tcp://*:6789")
socket.setsockopt(zmq.SUBSCRIBE, "")
예제 #29
0
# - * - coding: utf-8 - * -

import logbook
import sys
from logbook import Logger, FileHandler, StreamHandler

# 日志文件
logFile = "rk-bd.log"

# 设置UTC时间格式为本地时间格式
logbook.set_datetime_format("local")

# 日志输出到标准输出设备,应用整个程序范围
StreamHandler(
    sys.stdout,
    level=logbook.DEBUG,
    format_string=
    "[{record.time:%Y-%m-%d %H:%M:%S}] {record.level_name}: {record.filename}:{record.lineno} {record.message}",
    encoding="utf-8").push_application()

# 日志输出到文件,应用整个程序范围
FileHandler(
    logFile,
    level=logbook.INFO,
    format_string=
    "[{record.time:%Y-%m-%d %H:%M:%S}] {record.level_name}: {record.filename}:{record.lineno} {record.message}",
    encoding="utf-8",
    bubble=True).push_application()

log = Logger(__name__)
예제 #30
0
        file_handle = kwargs.get("file_handle")
        logger.debug(
            f"Unlocking the file {file_name} on host {host}, owner={owner}, client={client_name},"
            f" kwargs = {kwargs}")
        file_handle = self._get_file_handle(
            host, export, file_name) if not file_handle else file_handle
        nlm_client = NLMClient(host)
        unlock_arguments = get_packer_arguments("UNLOCK",
                                                caller_name=client_name,
                                                owner=owner,
                                                fh=file_handle,
                                                l_offset=offset,
                                                l_len=length)
        status = nlm_client.unlock(unlock_arguments)
        return NLM4_Stats(status).name

    def _get_file_handle(self, host, export, file_name):
        file_handle = self.exposed_lookup_file(host, export, file_name)
        if not file_handle:
            raise FileNotFound(
                f"{file_name} cannot be found and file_handle was not specified"
            )
        return file_handle


if __name__ == "__main__":
    FileHandler("nfs_client.log").push_application()
    t = ThreadedServer(NFSClientWrapper, port=9999)
    t.daemon = True
    logger.notice("Starting server on port 9999")
    t.start()
예제 #31
0
    servers = [Server(i) for i in range(1, NUM_SERVERS + 1)]

    start_servers(servers)

    time.sleep(10)
    try:
        return test(servers)
    except Exception, e:
        logger.exception('Test failed: %s' % e)
        return 1
    finally:
        logger.info('Stopping')
        stop(servers)


if __name__ == '__main__':
    format = '[{record.time}] {record.level_name:>5} [{record.extra[worker_id]}] {record.message}'

    logging_setup = NestedSetup([
        NullHandler(),
        FileHandler(
            filename=os.path.join(os.path.dirname(__file__), 'log/client.log'),
            format_string=format,
            bubble=True,
        ),
        StderrHandler(level=logbook.INFO, format_string=format, bubble=True),
    ])

    with logging_setup.applicationbound():
        sys.exit(main())
        for k,v in headers.iteritems():
            self.req.add_header(k,v)

    def set_req(self):
        self.req = urllib2.Request(self.url, urllib.urlencode(self.data))
        #self.req = urllib2.Request(self.url)

    def send(self):
        self.set_req()
        return urllib2.urlopen(self.req)



if __name__ == "__main__":

    logger = Logger("TicketchangeToInfluxdb")
    logfile = "ticketchangetoinfluxdb.log"
    fh = FileHandler(logfile,"a")
    fh.applicationbound()
    fh.push_application()

    client = Client()
    client.test()
    adapter = Adapter()
    client.set_adapter(adapter)
    a =client.get_adapter()
    a.test()

    print("This is just a test.")
    logger.info("Testing logging.")
예제 #33
0
def init(log_path):
    global logger
    log_hander = FileHandler(log_path)
    log_hander.formatter = log_type
    log_hander.push_application()
    logger = Logger('simulate_tcm')
예제 #34
0
     collection = Collection(url_api_collection)
 except Exception, e:
     msg = "Exception in Collection {}, init {}".format(url_api_collection, str(e))
     logbook.error(msg)
     raise e
 if not (collection["harvest_type"] in HARVEST_TYPES):
     msg = "Collection {} wrong type {} for harvesting. Harvest type {} \
             is not in {}".format(
         url_api_collection, collection["harvest_type"], collection["harvest_type"], HARVEST_TYPES.keys()
     )
     logbook.error(msg)
     raise ValueError(msg)
 mail_handler.subject = "Error during harvest of " + collection.url
 my_log_handler = None
 if not log_handler:  # can't init until have collection
     my_log_handler = FileHandler(get_log_file_path(collection.slug))
     my_log_handler.push_application()
 logger = logbook.Logger("HarvestMain")
 msg = "Init harvester next. Collection:{}".format(collection.url)
 logger.info(msg)
 # email directly
 mimetext = create_mimetext_msg(
     EMAIL_RETURN_ADDRESS, user_email, " ".join(("Starting harvest for ", collection.slug)), msg
 )
 try:  # TODO: request more emails from AWS
     mail_handler.deliver(mimetext, "*****@*****.**")
 except:
     pass
 logger.info("Create DPLA profile document")
 if not profile_path:
     profile_path = os.path.abspath(os.path.join(dir_profile, collection.id + ".pjs"))
예제 #35
0
import requests
import time
import json
import zmq
from logbook import FileHandler, catch_exceptions
from sachintweets.models import connect, MongoException

####################################### Log book setup #########################
log_handler = FileHandler('fetch_user_details.log')
log_handler.push_application()

###################################### Constants ###############################
LOCK_FILE = 'recv.lock'

###################################### Mongodb connection ######################
try:
    db =  connect() 
    if db:
       tweet  = db.tweet
except Exception as e:
    log_handler.write(e.message)


def fetch_user_details():
    tweets = tweet.find({})
    for t in tweets:
        if not 'screen_name' in t:
            r = requests.get("http://api.twitter.com/1/statuses/show/%d.json"\
            %(t['tid']))
            d = json.loads(r.content)
            if 'user' in d:
예제 #36
0
    dh = User(usernaam, password, flag)
    if not flag:
        if dh.paswdok:
            logger.info('    {}'.format(dh.__dict__))
        else:
            logger.info("    password not ok")
    else:
        if dh.session_ok:
            logger.info('    {}'.format(dh.__dict__))
        else:
            logger.info("    session not ok")
    return dh


if __name__ == '__main__':
    log_handler = FileHandler('test_user.log', mode="w")
    with log_handler.applicationbound():
        test_userlijst()
        test_user('woefdram')
        test_user('vader', "begin")
        test_user('vader', "begin", False)
        test_user('vader', "20015533280430", True)

        usernaam, passwd = 'leerkracht', 'begin'
        test_user(usernaam)
        dh = test_user(usernaam, passwd)
        login = dh.login
        test_user(usernaam, passwd, False)
        test_user(usernaam, "20015533280430", True)
        test_user(usernaam, dh.login, True)
        dh = test_user(usernaam, passwd)
예제 #37
0
class Fibratus():

    """Fibratus entrypoint.

    Setup the core components including the kernel
    event stream collector and the tracing controller.
    At this point the system handles are also being
    enumerated.

    """
    def __init__(self, filament):

        self.logger = Logger(Fibratus.__name__)
        self.file_handler = FileHandler(os.path.join(os.path.abspath(__file__), '..', '..', '..', 'fibratus.log'),
                                        mode='w+')
        self.kevt_streamc = KEventStreamCollector(etw.KERNEL_LOGGER_NAME.encode())
        self.kcontroller = KTraceController()
        self.ktrace_props = KTraceProps()
        self.ktrace_props.enable_kflags()
        self.ktrace_props.logger_name = etw.KERNEL_LOGGER_NAME

        self.handle_repository = HandleRepository()
        self._handles = []
        # query for handles on the
        # start of kernel trace
        with self.file_handler.applicationbound():
            self.logger.info('Starting fibratus...')
            self.logger.info('Enumerating system handles...')
            self._handles = self.handle_repository.query_handles()
            self.logger.info('%s handles found' % len(self._handles))
            self.handle_repository.free_buffers()
        self.thread_registry = ThreadRegistry(self.handle_repository, self._handles)

        self.kevent = KEvent(self.thread_registry)

        self._filament = filament

        self.fsio = FsIO(self.kevent, self._handles)
        self.hive_parser = HiveParser(self.kevent, self.thread_registry)
        self.tcpip_parser = TcpIpParser(self.kevent)
        self.dll_repository = DllRepository(self.kevent)

        self.requires_render = {}
        self.filters_count = 0

    def run(self):

        @atexit.register
        def _exit():
            self.stop_ktrace()

        self.kcontroller.start_ktrace(etw.KERNEL_LOGGER_NAME, self.ktrace_props)

        def on_kstream_open():
            if self._filament is None:
                IO.write_console('Done!                               ')
        self.kevt_streamc.set_kstream_open_callback(on_kstream_open)
        self._open_kstream()

    def _open_kstream(self):
        try:
            self.kevt_streamc.open_kstream(self._on_next_kevent)
        except Exception as e:
            with self.file_handler.applicationbound():
                self.logger.error(e)
        except KeyboardInterrupt:
            self.stop_ktrace()

    def stop_ktrace(self):
        IO.write_console('Stopping fibratus...')
        if self._filament:
            self._filament.close()
        self.kcontroller.stop_ktrace(self.ktrace_props)
        self.kevt_streamc.close_kstream()

    def add_filters(self, kevent_filters):
        if len(kevent_filters) > 0:
            self.filters_count = len(kevent_filters)
            # include the basic filters
            # that are essential to the
            # rest of kernel events
            self.kevt_streamc.add_kevent_filter(ENUM_PROCESS)
            self.kevt_streamc.add_kevent_filter(ENUM_THREAD)
            self.kevt_streamc.add_kevent_filter(ENUM_IMAGE)
            self.kevt_streamc.add_kevent_filter(REG_CREATE_KCB)
            self.kevt_streamc.add_kevent_filter(REG_DELETE_KCB)

            # these kevents are necessary for consistent state
            # of the trace. If the user doesn't include them
            # in a filter list, then we do the job but set the
            # kernel event type as not eligible for rendering
            if not KEvents.CREATE_PROCESS in kevent_filters:
                self.kevt_streamc.add_kevent_filter(CREATE_PROCESS)
                self.requires_render[CREATE_PROCESS] = False
            else:
                self.requires_render[CREATE_PROCESS] = True

            if not KEvents.CREATE_THREAD in kevent_filters:
                self.kevt_streamc.add_kevent_filter(CREATE_THREAD)
                self.requires_render[CREATE_THREAD] = False
            else:
                self.requires_render[CREATE_THREAD] = True

            if not KEvents.CREATE_FILE in kevent_filters:
                self.kevt_streamc.add_kevent_filter(CREATE_FILE)
                self.requires_render[CREATE_FILE] = False
            else:
                self.requires_render[CREATE_FILE] = True

            for kevent_filter in kevent_filters:
                ktuple = kname_to_tuple(kevent_filter)
                if isinstance(ktuple, list):
                    for kt in ktuple:
                        self.kevt_streamc.add_kevent_filter(kt)
                        if not kt in self.requires_render:
                            self.requires_render[kt] = True
                else:
                    self.kevt_streamc.add_kevent_filter(ktuple)
                    if not ktuple in self.requires_render:
                        self.requires_render[ktuple] = True

    def _on_next_kevent(self, ktype, cpuid, ts, kparams):
        """Callback which fires when new kernel event arrives.

        This callback is invoked for every new kernel event
        forwarded from the kernel stream collector.

        Parameters
        ----------

        ktype: tuple
            Kernel event type.
        cpuid: int
            Indentifies the CPU core where the event
            has been captured.
        ts: str
            Temporal reference of the kernel event.
        kparams: dict
            Kernel event's parameters.
        """

        # initialize kernel event properties
        self.kevent.ts = ts
        self.kevent.cpuid = cpuid
        self.kevent.name = ktuple_to_name(ktype)
        kparams = ddict(kparams)
        # thread / process kernel events
        if ktype in [CREATE_PROCESS,
                     CREATE_THREAD,
                     ENUM_PROCESS,
                     ENUM_THREAD]:
            self.thread_registry.add_thread(ktype, kparams)
            if ktype in [CREATE_PROCESS, CREATE_THREAD]:
                self.thread_registry.init_thread_kevent(self.kevent,
                                                        ktype,
                                                        kparams)
                self._render(ktype)
        elif ktype in [TERMINATE_PROCESS, TERMINATE_THREAD]:
            self.thread_registry.init_thread_kevent(self.kevent,
                                                    ktype,
                                                    kparams)
            self._render(ktype)
            self.thread_registry.remove_thread(ktype, kparams)

        # file system/disk kernel events
        elif ktype in [CREATE_FILE,
                       DELETE_FILE,
                       CLOSE_FILE,
                       READ_FILE,
                       WRITE_FILE]:
            self.fsio.parse_fsio(ktype, kparams)
            self._render(ktype)

        # dll kernel events
        elif ktype in [LOAD_IMAGE, ENUM_IMAGE]:
            self.dll_repository.register_dll(kparams)
            if ktype == LOAD_IMAGE:
                self._render(ktype)
        elif ktype == UNLOAD_IMAGE:
            self.dll_repository.unregister_dll(kparams)
            self._render(ktype)

        # registry kernel events
        elif ktype == REG_CREATE_KCB:
            self.hive_parser.add_kcb(kparams)
        elif ktype == REG_DELETE_KCB:
            self.hive_parser.remove_kcb(kparams.key_handle)

        elif ktype in [REG_CREATE_KEY,
                       REG_DELETE_KEY,
                       REG_OPEN_KEY,
                       REG_QUERY_KEY,
                       REG_SET_VALUE,
                       REG_DELETE_VALUE,
                       REG_QUERY_VALUE]:
            self.hive_parser.parse_hive(ktype, kparams)
            self._render(ktype)

        # network kernel events
        elif ktype in [SEND_SOCKET_TCPV4,
                       SEND_SOCKET_UDPV4,
                       RECV_SOCKET_TCPV4,
                       RECV_SOCKET_UDPV4,
                       ACCEPT_SOCKET_TCPV4,
                       CONNECT_SOCKET_TCPV4,
                       DISCONNECT_SOCKET_TCPV4,
                       RECONNECT_SOCKET_TCPV4]:
            self.tcpip_parser.parse_tcpip(ktype, kparams)
            self._render(ktype)

        if self._filament:
            # call filament method
            # to process the next
            # kernel event from the stream
            if ktype not in [ENUM_PROCESS,
                             ENUM_THREAD, ENUM_IMAGE]:
                if self.kevent.name:
                    self._filament.process(self.kevent)

    def _render(self, ktype):
        """Renders the kevent to the standard output stream.

        Parameters
        ----------

        ktype: tuple
            Identifier of the kernel event
        """
        if not self._filament:
            if ktype in self.requires_render:
                rr = self.requires_render[ktype]
                if rr:
                    self.kevent.render()
            elif self.filters_count == 0:
                self.kevent.render()
예제 #38
0
파일: tracer.py 프로젝트: fuyuanwu/qdb
class Qdb(Bdb, object):
    """
    The Quantopian Remote Debugger.
    """
    _instance = None

    def __new__(cls, *args, **kwargs):
        """
        Qdb objects are singletons that persist until their disable method is
        called.
        """
        if not cls._instance:
            cls._instance = super(Qdb, cls).__new__(cls)
            cls._instance._init(*args, **kwargs)
        return cls._instance

    def __init__(self, *args, **kwargs):
        pass

    def _init(self, config=None, merge=False, **kwargs):
        """
        See qdb.config for more information about the configuration of
        qdb.
        merge denotes how config and kwargs should be merged.
        QdbConfig.kwargs_first says config will trample kwargs,
        QdbConfig.config_first says kwargs will trample config.
        Otherwise, kwargs and config cannot both be passed.
        """
        self.super_ = super(Qdb, self)
        self.super_.__init__()
        self.reset()
        if config and kwargs:
            if merge == QdbConfig.kwargs_first:
                first = kwargs
                second = config
            elif merge == QdbConfig.config_first:
                first = config
                second = kwargs
            else:
                raise TypeError('Cannot pass config and kwargs')
            config = first.merge(second)
        else:
            config = QdbConfig.get_config(config or kwargs)

        self.address = config.host, config.port
        self.set_default_file(config.default_file)
        self.default_namespace = config.default_namespace or {}
        self.exception_serializer = config.exception_serializer or \
            default_exception_serializer
        self.eval_fn = config.eval_fn or default_eval_fn
        self.green = config.green
        self._file_cache = {}
        self.redirect_output = config.redirect_output
        self.retry_attepts = config.retry_attepts
        self.repr_fn = config.repr_fn
        self._skip_fn = config.skip_fn or (lambda _: False)
        self.pause_signal = config.pause_signal \
            if config.pause_signal else signal.SIGUSR2
        self.uuid = str(config.uuid or uuid4())
        self.watchlist = {}
        self.execution_timeout = config.execution_timeout
        self.reset()
        self.log_handler = None
        if config.log_file:
            self.log_handler = FileHandler(config.log_file)
            self.log_handler.push_application()

        # The timing between these lines might matter depending on the
        # cmd_manager. Don't seperate them.
        self.cmd_manager = (config.cmd_manager or RemoteCommandManager)(self)
        self.cmd_manager.start(config.auth_msg)

        # We need to be able to send stdout back to the user debugging the
        # program. We hold a handle to this in case the program resets stdout.
        if self.redirect_output:
            self._old_stdout = sys.stdout
            self._old_stderr = sys.stderr
            sys.stdout = OutputTee(
                sys.stdout,
                RemoteOutput(self.cmd_manager, '<stdout>'),
            )
            sys.stderr = OutputTee(
                sys.stderr,
                RemoteOutput(self.cmd_manager, '<stderr>'),
            )

    def skip_fn(self, path):
        return self._skip_fn(self.canonic(path))

    def restore_output_streams(self):
        """
        Restores the original output streams.
        """
        if self.redirect_output:
            sys.stdout = self._old_stdout
            sys.stderr = self._old_stderr

    def _new_execution_timeout(self, src):
        """
        Return a new execution timeout context manager.
        If not execution timeout is in place, returns ExitStack()
        """
        # We use green=False because this could be cpu bound. This will
        # still throw to the proper greenlet if this is gevented.
        return (
            Timeout(
                self.execution_timeout,
                QdbExecutionTimeout(src, self.execution_timeout),
                green=False
            ) if self.execution_timeout else ExitStack()
        )

    def set_default_file(self, filename):
        """
        Safely sets the new default file.
        """
        self.default_file = self.canonic(filename) if filename else None

    def get_line(self, filename, line):
        """
        Checks for any user cached files before deferring to the linecache.
        """
        # The line - 1 is so that querying line 1 gives us the first line in
        # the file.
        try:
            return self._get_file_lines(filename)[line - 1]
        except IndexError:
            return 'No source available for this line.'

    def get_file(self, filename):
        """
        Retrieves a file out of cache or opens and caches it.
        """
        return '\n'.join(self._get_file_lines(filename))

    def _get_file_lines(self, filename):
        """
        Retrieves the file from the file cache as a list of lines.
        If the file does not exist in the cache, it is cached from
        disk.
        """
        canonic_name = self.canonic(filename)
        try:
            return self._file_cache[canonic_name]
        except KeyError:
            if not self.cache_file(canonic_name):
                return []
            return self._file_cache.get(canonic_name)

    def cache_file(self, filename, contents=None):
        """
        Caches filename from disk into memory.
        This overrides whatever was cached for filename previously.
        If contents is provided, it allows the user to cache a filename to a
        string.
        Returns True if the file caching succeeded, otherwise returns false.
        """
        canonic_name = self.canonic(filename)
        if contents:
            self._file_cache[canonic_name] = contents.splitlines()
            return True
        try:
            with open(canonic_name, 'r') as f:
                self._file_cache[canonic_name] = map(
                    lambda l: l[:-1] if l.endswith('\n') else l,
                    f.readlines()
                )
                return True
        except IOError:
            # The caching operation failed.
            return False

    def set_break(self, filename, lineno, temporary=False, cond=None,
                  funcname=None, **kwargs):
        """
        Sets a breakpoint. This is overridden to account for the filecache
        and for unreachable lines.
        **kwargs are ignored. This is to work with payloads that pass extra
        fields to the set_break payload.
        """
        filename = self.canonic(filename) if filename else self.default_file
        try:
            self.get_line(filename, lineno)
        except IndexError:
            raise QdbUnreachableBreakpoint({
                'file': filename,
                'line': lineno,
                'temp': temporary,
                'cond': cond,
                'func': funcname,
            })

        blist = self.breaks.setdefault(filename, [])
        if lineno not in blist:
            blist.append(lineno)
        Breakpoint(filename, lineno, temporary, cond, funcname)

    def clear_break(self, filename, lineno, *args, **kwargs):
        """
        Wrapper to make the breakpoint json standardized for setting
        and removing of breakpoints.
        This means that the same json data that was used to set a break point
        may be fed into this function with the extra values ignored.
        """
        self.super_.clear_break(filename, lineno)

    def canonic(self, filename):
        canonic_filename = self.super_.canonic(filename)
        if canonic_filename.endswith('pyc'):
            return canonic_filename[:-1]
        return canonic_filename

    def reset(self):
        self.botframe = None
        self._set_stopinfo(None, None)
        self.forget()

    def forget(self):
        self.lineno = None
        self.stack = []
        self.curindex = 0
        self.curframe = None

    def setup_stack(self, stackframe, traceback):
        """
        Sets up the state of the debugger object for this frame.
        """
        self.forget()
        self.stack, self.curindex = self.get_stack(stackframe, traceback)
        self.curframe = self.stack[self.curindex][0]
        self.curframe_locals = self.curframe.f_locals
        self.update_watchlist()

    def extend_watchlist(self, *args):
        """
        Adds every arg to the watchlist and updates.
        """
        for expr in args:
            self.watchlist[expr] = (False, '')

        self.update_watchlist()

    def update_watchlist(self):
        """
        Updates the watchlist by evaluating all the watched expressions in
        our current frame.
        """
        id_ = lambda n: n  # Why is this NOT a builtin?
        for expr in self.watchlist:
            try:
                with self._new_execution_timeout(expr), \
                        self.inject_default_namespace() as stackframe:
                    self.watchlist[expr] = (
                        None,
                        (self.repr_fn or id_)(
                            self.eval_fn(expr, stackframe)
                        )
                    )
            except Exception as e:
                self.watchlist[expr] = (
                    type(e).__name__,
                    self.exception_serializer(e)
                )

    def effective(self, file, line, stackframe):
        """
        Finds the effective breakpoint for this line; called only
        when we know that there is a breakpoint here.

        returns the breakpoint paired with a flag denoting if we should
        remove this breakpoint or not.
        """
        for breakpoint in Breakpoint.bplist[file, line]:
            if breakpoint.enabled == 0:
                continue
            if not checkfuncname(breakpoint, stackframe):
                continue
            # Count every hit when breakpoint is enabled
            breakpoint.hits = breakpoint.hits + 1
            if not breakpoint.cond:
                # If unconditional, and ignoring go on to next, else break
                if breakpoint.ignore > 0:
                    breakpoint.ignore = breakpoint.ignore - 1
                    continue
                else:
                    return breakpoint, True
            else:
                # Conditional breakpoint
                # Ignore count applies only to those bpt hits where the
                # condition evaluates to true.
                try:
                    with self._new_execution_timeout(breakpoint.cond), \
                            self.inject_default_namespace(stackframe) as frame:
                        val = self.eval_fn(
                            breakpoint.cond,
                            frame,
                            'eval'
                        )
                except Exception as e:
                    # Send back a message to let the user know there was an
                    # issue with their breakpoint.
                    self.cmd_manager.send_error(
                        'condition', {
                            'cond': breakpoint.cond,
                            'line': line,
                            'exc': type(e).__name__,
                            'output': self.exception_serializer(e),
                        }
                    )
                    # Return this breakpoint to be safe. The user will be
                    # stopped here so that they can fix the breakpoint.
                    return breakpoint, False

                if val:
                    if breakpoint.ignore > 0:
                        breakpoint.ignore = breakpoint.ignore - 1
                    else:
                        return breakpoint, True
        return None, False

    def break_here(self, stackframe):
        """
        Checks if we should break execution in this stackframe.
        This function handles the cleanup and ignore counts for breakpoints.
        Returns True iff we should stop in the stackframe, False otherwise.
        """
        filename = self.canonic(stackframe.f_code.co_filename)
        if filename not in self.breaks:
            return False
        lineno = stackframe.f_lineno
        if lineno not in self.breaks[filename]:
            # The line itself has no breakpoint, but maybe the line is the
            # first line of a function with breakpoint set by function name.
            lineno = stackframe.f_code.co_firstlineno
            if lineno not in self.breaks[filename]:
                return False

        # flag says ok to delete temporary breakpoints.
        breakpoint, flag = self.effective(filename, lineno, stackframe)
        if breakpoint:
            self.currentbp = breakpoint.number
            if flag and breakpoint.temporary:
                self.do_clear(breakpoint.number)
            return True
        else:
            return False

    def trace_dispatch(self, stackframe, event, arg):
        """
        Trace function that does some preliminary checks and then defers to
        the event handler for each type of event.
        """
        if self.quitting:
            # We were told to quit by the user, bubble this up to their code.
            return

        if self.skip_fn(stackframe.f_code.co_filename):
            # We want to skip this, don't stop but keep tracing.
            return self.trace_dispatch

        try:
            return self.super_.trace_dispatch(stackframe, event, arg)
        except BdbQuit:
            raise QdbQuit()  # Rewrap as a QdbError object.

    def user_call(self, stackframe, arg):
        if self.break_here(stackframe):
            self.user_line(stackframe)

    def user_line(self, stackframe):
        self.setup_stack(stackframe, None)
        self.cmd_manager.send_watchlist()
        self.cmd_manager.send_stack()
        self.cmd_manager.next_command()

    def user_return(self, stackframe, return_value):
        stackframe.f_locals['__return__'] = return_value
        self.setup_stack(stackframe, None)
        self.cmd_manager.send_watchlist()
        self.cmd_manager.send_stack()
        msg = fmt_msg('return', str(return_value), serial=pickle.dumps)
        self.cmd_manager.next_command(msg)

    def user_exception(self, stackframe, exc_info):
        exc_type, exc_value, exc_traceback = exc_info
        stackframe.f_locals['__exception__'] = exc_type, exc_value
        self.setup_stack(stackframe, exc_traceback)
        self.cmd_manager.send_watchlist()
        self.cmd_manager.send_stack()
        msg = fmt_msg(
            'exception', {
                'type': exc_type.__name__,
                'value': str(exc_value),
                'traceback': traceback.format_tb(exc_traceback)
            },
            serial=pickle.dumps,
        )
        self.cmd_manager.next_command(msg)

    def do_clear(self, bpnum):
        """
        Handles deletion of temporary breakpoints.
        """
        if not (0 <= bpnum < len(Breakpoint.bpbynumber)):
            return
        self.clear_bpbynumber(bpnum)

    def set_quit(self):
        """
        Sets the quitting state and restores the program state.
        """
        self.quitting = True

    def disable(self, mode='soft'):
        """
        Stops tracing.
        """
        try:
            if mode == 'soft':
                self.clear_all_breaks()
                self.set_continue()
                # Remove this instance so that new ones may be created.
                self.__class__._instance = None
            elif mode == 'hard':
                sys.exit(1)
            else:
                raise ValueError("mode must be 'hard' or 'soft'")
        finally:
            self.restore_output_streams()
            if self.log_handler:
                self.log_handler.pop_application()
            self.cmd_manager.stop()
            if sys.gettrace() is self.trace_dispatch:
                sys.settrace(None)

    def __enter__(self):
        self.set_trace(sys._getframe().f_back, stop=False)
        return self

    def __exit__(self, type, value, traceback):
        self.disable('soft')

    def set_trace(self, stackframe=None, stop=True):
        """
        Starts debugging in stackframe or in the callers frame.
        If stop is True, begin stepping from here, otherwise, wait for
        the first breakpoint or exception.
        """
        # We need to look back 1 frame to get our caller.
        stackframe = stackframe or sys._getframe().f_back
        self.reset()
        while stackframe:
            stackframe.f_trace = self.trace_dispatch
            self.botframe = stackframe
            stackframe = stackframe.f_back
        if stop:
            self.set_step()
        else:
            self.set_continue()
        sys.settrace(self.trace_dispatch)

    @contextmanager
    def inject_default_namespace(self, stackframe=None):
        """
        Adds the default namespace to the frame, or if no frame is provided,
        self.curframe is used.
        """
        stackframe = stackframe or self.curframe
        to_remove = set()
        for k, v in self.default_namespace.iteritems():
            if k not in stackframe.f_globals:
                # Only add the default things if the name is unbound.
                stackframe.f_globals[k] = v
                to_remove.add(k)

        try:
            yield stackframe
        finally:
            for k in to_remove:
                try:
                    del stackframe.f_globals[k]
                except IndexError:
                    # The body of this manager might have del'd this.
                    pass

            # Prevent exceptions from generating ref cycles.
            del stackframe
예제 #39
0
파일: pipeline.py 프로젝트: LabAdvComp/dish
class Pipeline(object):
    """Represents the abstraction of a pipeline of jobs to be run
    distributed over machines
    """

    def __init__(self, workdir, jobs, total_cores, scheduler=None, queue=None,
                 local=False, retries=None):
        """Initialize a pipeline.

        :param workdir: Name of a directory to use for scratch space
        and results. This needs to be visible to all nodes over NFS or
        similar.

        :param jobs: A list of jobs, which are just dicts. The only
        required key for now is "description", which will be used for
        the directory that holds all this job's output.

        :param total_cores: The total number of cores you want to use
        for processing.

        :returns: A Pipeline object, which has methods that invoke
        various kinds of distributed work.

        """

        # validate things
        for job in jobs:
            if type(job) is not dict:
                raise ValueError("job is not a dict: {}".format(job))
            if not job.get("description"):
                raise ValueError("job {} has not description".format(job))
        workdir = os.path.abspath(os.path.expanduser(workdir))
        if not os.path.exists(workdir):
            raise ValueError(
                "workdir: {} appears not to exist".format(workdir))
        self.workdir = workdir
        self.jobs = jobs
        self.total_cores = total_cores
        self.scheduler = scheduler
        self.queue = queue
        self.local = local
        self.retries = retries
        # setup default cluster_view
        self._cluster_view = cluster_view

    def start(self):
        """Initialize workdir, logging, etc. in preparation for running jobs.
        """

        # make a working directory for each job
        for job in self.jobs:
            job["workdir"] = os.path.join(self.workdir, job["description"])
            fs.maybe_mkdir(job["workdir"])
        # temporary ipython profile directory
        self.ipythondir = os.path.join(self.workdir, ".ipython")
        fs.maybe_mkdir(self.ipythondir)
        # log dir
        self.logdir = os.path.join(self.workdir, "log")
        fs.maybe_mkdir(self.logdir)

        # determine which IP we are going to listen on for logging
        try:
            self.listen_ip = localinterfaces.public_ips()[0]
        except:
            raise ValueError("This machine appears not to have"
                             " any publicly visible IP addresses")

        # setup ZMQ logging
        self.handler = FileHandler(os.path.join(self.logdir, "dish.log"))
        self.listen_port = str(randint(5000, 10000))
        self.subscriber = ZeroMQPullSubscriber("tcp://" + self.listen_ip +
                                               ":" + self.listen_port)
        self.controller = self.subscriber.dispatch_in_background(self.handler)
        self.logger = Logger("dish_master")

    def stop(self):
        """Gracefully shutdown the Pipeline, cleaning up threads, sockets,
        etc.  Leaves working directory intact so everything can in
        principle be picked up again where we left off.

        """
        self.controller.stop()
        self.subscriber.close()

    def _compute_resources(self, cores_per_engine, mem_per_engine,
                           max_engines):
        if cores_per_engine > self.total_cores:
            raise ValueError("A job requested {0} but only {1}"
                             " are available.".format(cores_per_engine,
                                                      self.total_cores))
        num_engines = self.total_cores // cores_per_engine
        if len(self.jobs) < num_engines:
            # we don't even need this many engines
            num_engines = self.jobs
        if max_engines:
            num_engines = min(num_engines, max_engines)
        # TODO in the future, should maybe validate that requested
        # cores and memory are actually going to be availible. This
        # would unfortunately have to be specialized for each
        # scheduler probably.
        return num_engines, cores_per_engine, mem_per_engine

    @contextmanager
    def group(self, cores=1, mem="0.1", max=None):
        """Context manager for "grouping" a set of pipeline operations. A
        group of operations is run on the same ipython cluster and has
        it's resources specified in the group as opposed to in each
        individual job. This is useful if there is some small amount
        of setup work that isn't worth spinning up a new cluster for
        but which needs to be done before a resource intensive task.

        For example::

            with p.group(cores=8, mem=12):
               p.run("setup.sh . . .")  # do some data munging or other setup
               p.run("main_work -n 8 . . .")  # call an expensive program

        """
        # TODO this duplicates some code from p.map and is a bit
        # clunky, there is probably a better abstraction here
        engines, cores, mem = self._compute_resources(cores, mem, max)
        extra_params = {"run_local": self.local,
                        "mem": mem}
        old_view_factory = self._cluster_view
        cm = self._cluster_view(self.scheduler, self.queue,
                                engines, profile=self.ipythondir,
                                cores_per_job=cores,
                                extra_params=extra_params,
                                retries=self.retries)
        view = cm.gen.next()

        @contextmanager
        def reuse_view(*args, **kwargs):
            yield view

        # everything done in the block will use the view we just made
        self._cluster_view = reuse_view
        try:
            yield
        finally:
            # restore the normal cluster_view context manager on exit
            self._cluster_view = old_view_factory
            try:
                cm.gen.next()  # clean up the view we've been using
            except StopIteration:
                pass

    def _transaction_filter(self, targets):
        """Filter the `jobs` appropriately based on whether `targets` is a
        function, str, or list of str"""
        # TODO there has got to be a better way to do this -____-
        to_run = []
        dont_run = []
        if callable(targets):
            f = targets
            for job in self.jobs:
                if f(job):
                    dont_run.append(job)
                else:
                    to_run.append(job)
            return to_run, dont_run
        elif isinstance(targets, str):
            targets = [targets]
        elif not isinstance(targets, list):
            TypeError("transaction targets must be list, str, or callable")
        for job in self.jobs:
            canonical_targets = fs.canonicalize(job, targets)
            if all((os.path.exists(target)
                    for target in canonical_targets)):
                info = ("Skipping transaction for job {} targets {} "
                        "already present")
                with self.handler.applicationbound():
                    self.logger.info(info.format(job["description"],
                                                 canonical_targets))
                dont_run.append(job)
            else:
                # targets not present for this job
                to_run.append(job)
        return to_run, dont_run

    @contextmanager
    def transaction(self, targets):
        """Do some work "transacationally", in the sense that nothing done
        inside a ``transaction`` block will be "commited" to the
        workdir unless it all succeeds without error. The work done
        inside a transaction is also idempotent in that you must
        specify a ``target`` file or files for the tranasaction and it
        will not be run if the target exists already. This is perhaps
        best illustrated by a simple example::

            with p.transaction("{workdir}/example.txt"):
                p.run("{tmpdir}/touch example.txt")

        This will result in a file ``B.txt`` in each job's
        ``workdir``. The creation of this file will be skipped if the
        code is run again and the file already exists. This is
        obviously a silly example, but the code inside the `with`
        block can be any arbitrarily complex series of operations
        which produces a set of target output files at the end. This
        is a powerful feature in that it allows pipelines to be
        restratable: if a pipeline crashes for some reason but you
        have it's major sections wrapped in ``transaction`` blocks,
        you can simple run it again and pick up where you left off
        without redoing any work. The transaction blocks guarentee
        that the ``workdir`` for each job is never in an inconsistent
        state and that work that's already been completed isn't
        redone.

        Inside a transaction, each job has a special ``tmpdir`` key,
        whose value is the path to a unique temporary directory for
        the job. You can do work that produces files inside the
        ``tmpdir`` and expect everything in it to be moved to the
        job's ``workdir`` if the transaction compeltes without error.
        The ``tmpdir`` will be removed at the end of the transaction
        regardless of whether or not it succeeds. We change
        directories to the ``tmpdir`` before doing anything else and
        implicitly consider targets to be relative to a job's
        ``workdir`` so the above example could also be written
        written::

            with p.transaction("example.txt"):
                p.run("touch example.txt")

        which sacrifices explicitness for brevity.

        :param targets: a string or list of strings descsribing files
        that must exist in order for the transaction to be skipped.

        """
        to_run, dont_run = self._transaction_filter(targets)
        for job in to_run:
            job["tmpdir"] = tempfile.mkdtemp(dir=job["workdir"])
        self.jobs = to_run
        try:
            yield
        finally:
            for job in self.jobs:
                if not os.path.exists(os.path.join(job["tmpdir"], ".error")):
                    fs.liftdir(job["tmpdir"], job["workdir"])
                shutil.rmtree(job["tmpdir"])
                del job["tmpdir"]
            self.jobs = dont_run + self.jobs

    def localmap(self, f):
        """Just like ``map``, but work locally rather than launching an ipython
        cluster.  This is useful for tasks where the cluster launch
        overhead would swamp the cost of the actual work to be done.

        :params f: function of ``(job, logger)`` to be mapped over all jobs.

        """
        self.jobs = map(logging_wrapper, self.jobs,
                        (f for j in self.jobs),
                        (self.listen_ip for j in self.jobs),
                        (self.listen_port for j in self.jobs))

    def map(self, f, cores=1, mem="0.1", max=None):
        """Map the function ``f`` over all of the ``jobs`` in this
        pipeline. ``f`` must be a function of two arguments, the job
        and a logger. It should modify the job it is passed, which
        will then be returned over the wire. A silly example::

            def f(job, logger):
                job["capitalized_description"] = job["description"].toupper()
            p.map(f)

        Will give each ``job`` in the pipeline a ``capitalized_description``
        attribute, which can then be used in future pipline operations.

        ``cores`` and ``mem`` are used to specify the cores and memory
        required by this step; they will be passed to the underlying
        scheduler. ``max`` can be used as a hard limit on the number of
        jobs to run. This is useful if, for example, a particular task
        puts pressure on some sort of storage system (a distributed
        file system, object store, etc.) that you know will fail under
        too much load.

        :param f: function of ``(job, logger)`` to be mapped over all jobs.
        :param cores: cores required by this call.
        :param mem: memory required by this call.
        :param max: maximum number of jobs to submit.

        """
        if not self.jobs:
            # this looks very odd, it's necessary because sometimes
            # being a transaction causes self.jobs to be empty, and
            # IPython throws errors if you try to make over the empty
            # list. It might be cleaner to catch the error after
            # letting IPython do the map; will have to think about it.
            return
        engines, cores, mem = self._compute_resources(cores, mem, max)
        extra_params = {"run_local": self.local,
                        "mem": mem}
        with self._cluster_view(self.scheduler, self.queue,
                                engines, profile=self.ipythondir,
                                cores_per_job=cores,
                                extra_params=extra_params,
                                retries=self.retries) as view:
            # using cloudpickle allows us to serialize all sorts of things
            # we wouldn't otherwise be able to
            dview = view.client.direct_view()
            use_cloudpickle()
            dview.apply(use_cloudpickle)
            self.jobs = view.map_sync(logging_wrapper, self.jobs,
                                      (f for j in self.jobs),
                                      (self.listen_ip for j in self.jobs),
                                      (self.listen_port for j in self.jobs))

    def run(self, template, capture_in=None, **kwargs):
        """Run the ``template`` formatted with the contents of each
        job. Example::

            p.run("touch {workdir}/example.txt")

        will make an example.txt file in each job's workdir.

        ``cores`` and ``mem`` mean the same thing they do in the
        ``map`` method.

        If a string is passed for ``capture_in``, the stdout of the command
        will be captured in ``job[capture_in]`` for each job.

        """
        runner = cmdrunner(template, capture_in)
        self.map(runner, **kwargs)
예제 #40
0
import sys
import time
import calendar as cal
from io import BytesIO
from datetime import datetime
import requests
from requests_html import HTMLSession
import pandas as pd
from logbook import Logger, FileHandler, StreamHandler


stream_handler = StreamHandler(
    sys.stdout,
    format_string=" | {record.message}", bubble=True)
file_handler = FileHandler(
    'expiration_downloader_errors_{}.log'.format(datetime.today().strftime("%Y-%m-%d_%H-%M")),
    format_string=" | {record.message}", bubble=True, delay=True)
log = Logger(__name__)
#stream_handler.push_application()
#file_handler.push_application()


class ExpirationDownloader:
    """
    Download contract expiry dates from CME website.
    Use urls from quandl provided meta data.
    Parameters: 
    df: dataframe read from csv file downloaded from quandl
    download: False - use file from disk, True - download file form CME
    show_progress: zipline variable to be passed by caller (or not)
예제 #41
0
def start_logging(filename):
    this_file = os.path.basename(filename)
    log_file = '/var/log/' + remove_extn(this_file) + '.log'

    log_handler = FileHandler(log_file, bubble=True)
    log_handler.push_application()
예제 #42
0
def main(user_email,
         url_api_collection,
         log_handler=None,
         mail_handler=None,
         dir_profile='profiles',
         profile_path=None,
         config_file=None,
         **kwargs):
    '''Executes a harvest with given parameters.
    Returns the ingest_doc_id, directory harvest saved to and number of
    records.
    '''
    if not config_file:
        config_file = os.environ.get('DPLA_CONFIG_FILE', 'akara.ini')
    num_recs = -1
    my_mail_handler = None
    if not mail_handler:
        my_mail_handler = logbook.MailHandler(
            EMAIL_RETURN_ADDRESS, user_email, level='ERROR', bubble=True)
        my_mail_handler.push_application()
        mail_handler = my_mail_handler
    try:
        collection = Collection(url_api_collection)
    except Exception as e:
        msg = 'Exception in Collection {}, init {}'.format(url_api_collection,
                                                           str(e))
        logbook.error(msg)
        raise e
    if not (collection['harvest_type'] in HARVEST_TYPES):
        msg = 'Collection {} wrong type {} for harvesting. Harvest type {} \
                is not in {}'.format(url_api_collection,
                                     collection['harvest_type'],
                                     collection['harvest_type'],
                                     HARVEST_TYPES.keys())
        logbook.error(msg)
        raise ValueError(msg)
    mail_handler.subject = "Error during harvest of " + collection.url
    my_log_handler = None
    if not log_handler:  # can't init until have collection
        my_log_handler = FileHandler(get_log_file_path(collection.slug))
        my_log_handler.push_application()
    logger = logbook.Logger('HarvestMain')
    msg = 'Init harvester next. Collection:{}'.format(collection.url)
    logger.info(msg)
    # email directly
    mimetext = create_mimetext_msg(EMAIL_RETURN_ADDRESS, user_email, ' '.join(
        ('Starting harvest for ', collection.slug)), msg)
    try:  # TODO: request more emails from AWS
        mail_handler.deliver(mimetext, '*****@*****.**')
    except:
        pass
    logger.info('Create DPLA profile document')
    if not profile_path:
        profile_path = os.path.abspath(
            os.path.join(dir_profile, collection.id + '.pjs'))
    with codecs.open(profile_path, 'w', 'utf8') as pfoo:
        pfoo.write(collection.dpla_profile)
    logger.info('DPLA profile document : ' + profile_path)
    harvester = None
    try:
        harvester = HarvestController(
            user_email,
            collection,
            profile_path=profile_path,
            config_file=config_file,
            **kwargs)
    except Exception as e:
        import traceback
        msg = 'Exception in harvester init: type: {} TRACE:\n{}'.format(
            type(e), traceback.format_exc())
        logger.error(msg)
        raise e
    logger.info('Create ingest doc in couch')
    ingest_doc_id = harvester.create_ingest_doc()
    logger.info('Ingest DOC ID: ' + ingest_doc_id)
    logger.info('Start harvesting next')
    try:
        num_recs = harvester.harvest()
        msg = ''.join(('Finished harvest of ', collection.slug, '. ',
                       str(num_recs), ' records harvested.'))
        harvester.update_ingest_doc('complete', items=num_recs, num_coll=1)
        logger.info(msg)
        # email directly
        mimetext = create_mimetext_msg(
            EMAIL_RETURN_ADDRESS, user_email, ' '.join(
                ('Finished harvest of raw records '
                 'for ', collection.slug, ' enriching next')), msg)
        try:
            mail_handler.deliver(mimetext, '*****@*****.**')
        except:
            pass
    except Exception as e:
        import traceback
        error_msg = ''.join(("Error while harvesting: type-> ", str(type(e)),
                             " TRACE:\n" + str(traceback.format_exc())))
        logger.error(error_msg)
        harvester.update_ingest_doc(
            'error', error_msg=error_msg, items=num_recs)
        raise e
    if my_log_handler:
        my_log_handler.pop_application()
    if my_mail_handler:
        my_mail_handler.pop_application()
    return ingest_doc_id, num_recs, harvester.dir_save, harvester
def main():
    """Shows basic usage of the Google Drive API.

    Creates a Google Drive API service object and outputs the names and IDs
    for up to 10 files.
    """

    log_filename = os.path.join(
        args.log_dir,
        'google-drive-to-s3-{}.log'.format(os.path.basename(time.strftime('%Y%m%d-%H%M%S')))
    )

    # register some logging handlers
    log_handler = FileHandler(
        log_filename,
        mode='w',
        level=args.log_level,
        bubble=True
    )
    stdout_handler = StreamHandler(sys.stdout, level=args.log_level, bubble=True)

    with stdout_handler.applicationbound():
        with log_handler.applicationbound():
            log.info("Arguments: {}".format(args))
            start = time.time()
            log.info("starting at {}".format(time.strftime('%l:%M%p %Z on %b %d, %Y')))

            credentials = get_credentials()
            http = credentials.authorize(httplib2.Http())
            drive_service = discovery.build('drive', 'v3', http=http)

            s3 = boto3.resource('s3')

            # load up a match file if we have one.
            if args.match_file:
                with open(args.match_file, 'r') as f:
                    match_filenames = f.read().splitlines()
            else:
                match_filenames = None

            # get the files in the specified folder.
            files = drive_service.files()
            request = files.list(
                pageSize=args.page_size,
                q="'{}' in parents".format(args.folder_id),
                fields="nextPageToken, files(id, name)"
            )

            # make sure our S3 Key prefix has a trailing slash
            key_prefix = ensure_trailing_slash(args.key_prefix)

            page_counter = 0
            file_counter = 0
            while request is not None:
                file_page = request.execute(http=http)
                page_counter += 1
                page_file_counter = 0  # reset the paging file counter

                # determine the page at which to start processing.
                if page_counter >= args.start_page:
                    log.info(u"######## Page {} ########".format(page_counter))

                    for this_file in file_page['files']:
                        file_counter += 1
                        page_file_counter += 1
                        if we_should_process_this_file(this_file['name'], match_filenames):
                            log.info(u"#== Processing {} file number {} on page {}. {} files processed.".format(
                                this_file['name'],
                                page_file_counter,
                                page_counter,
                                file_counter
                            ))

                            # download the file
                            download_request = drive_service.files().get_media(fileId=this_file['id'])
                            fh = io.BytesIO()  # Using an in memory stream location
                            downloader = MediaIoBaseDownload(fh, download_request)
                            done = False
                            pbar = InitBar(this_file['name'])
                            while done is False:
                                status, done = downloader.next_chunk()
                                pbar(int(status.progress()*100))
                                # print("\rDownload {}%".format(int(status.progress() * 100)))
                            del pbar

                            # upload to bucket
                            log.info(u"Uploading to S3")
                            s3.Bucket(args.bucket).put_object(
                                Key="{}{}".format(key_prefix, this_file['name']),
                                Body=fh.getvalue(),
                                ACL='public-read'
                            )
                            log.info(u"Uploaded to S3")
                            fh.close()  # close the file handle to release memory
                        else:
                            log.info(u"Do not need to process {}".format(this_file['name']))

                # stop if we have come to the last user specified page
                if args.end_page and page_counter == args.end_page:
                    log.info(u"Finished paging at page {}".format(page_counter))
                    break
                # request the next page of files
                request = files.list_next(request, file_page)

            log.info("Running time: {}".format(str(datetime.timedelta(seconds=(round(time.time() - start, 3))))))
            log.info("Log written to {}:".format(log_filename))
예제 #44
0
    def __init__(self, filament, **kwargs):

        self._start = datetime.now()
        try:
            log_path = os.path.join(os.path.expanduser('~'), '.fibratus',
                                    'fibratus.log')
            FileHandler(log_path, mode='w+').push_application()
            StreamHandler(sys.stdout, bubble=True).push_application()
        except PermissionError:
            panic(
                "ERROR - Unable to open log file for writing due to permission error"
            )

        self.logger = Logger(Fibratus.__name__)

        self._config = YamlConfig()

        self.logger.info('Starting Fibratus...')

        enable_cswitch = kwargs.pop('cswitch', False)

        self.kcontroller = KTraceController()
        self.ktrace_props = KTraceProps()
        self.ktrace_props.enable_kflags(cswitch=enable_cswitch)
        self.ktrace_props.logger_name = etw.KERNEL_LOGGER_NAME

        enum_handles = kwargs.pop('enum_handles', True)

        self.handle_repository = HandleRepository()
        self._handles = []
        # query for handles on the
        # start of the kernel trace
        if enum_handles:
            self.logger.info('Enumerating system handles...')
            self._handles = self.handle_repository.query_handles()
            self.logger.info('%s handles found' % len(self._handles))
            self.handle_repository.free_buffers()

        image_meta_config = self._config.image_meta
        self.image_meta_registry = ImageMetaRegistry(
            image_meta_config.enabled, image_meta_config.imports,
            image_meta_config.file_info)

        self.thread_registry = ThreadRegistry(self.handle_repository,
                                              self._handles,
                                              self.image_meta_registry)

        self.kevt_streamc = KEventStreamCollector(
            etw.KERNEL_LOGGER_NAME.encode())
        skips = self._config.skips
        image_skips = skips.images if 'images' in skips else []
        if len(image_skips) > 0:
            self.logger.info("Adding skips for images %s" % image_skips)
            for skip in image_skips:
                self.kevt_streamc.add_skip(skip)

        self.kevent = KEvent(self.thread_registry)

        self._output_classes = dict(console=ConsoleOutput,
                                    amqp=AmqpOutput,
                                    smtp=SmtpOutput,
                                    elasticsearch=ElasticsearchOutput)
        self._outputs = self._construct_outputs()
        self.output_aggregator = OutputAggregator(self._outputs)

        self._binding_classes = dict(yara=YaraBinding)
        self._bindings = self._construct_bindings()

        if filament:
            filament.logger = self.logger
            filament.do_output_accessors(self._outputs)
        self._filament = filament

        self.fsio = FsIO(self.kevent, self._handles)
        self.hive_parser = HiveParser(self.kevent, self.thread_registry)
        self.tcpip_parser = TcpIpParser(self.kevent)
        self.dll_repository = DllRepository(self.kevent)
        self.context_switch_registry = ContextSwitchRegistry(
            self.thread_registry, self.kevent)

        self.output_kevents = {}
        self.filters_count = 0
예제 #45
0
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import logging
from spider163.utils import config
from logbook import FileHandler, Logger
from terminaltables import AsciiTable
from colorama import Fore
from colorama import init

path = config.get_path()
log_handler = FileHandler(filename=path + '/spider163.log')
log_handler.push_application()
log = Logger("")

init(autoreset=True)


def Log(msg):
    log.warn(msg)


def Table(tb):
    print(AsciiTable(tb).table)


def Blue(msg):
    return Fore.BLUE + msg
예제 #46
0
#! /usr/bin/env python
# -*- coding: utf-8 -*-

from __future__ import absolute_import, nested_scopes

import os

import logbook
from logbook import FileHandler
from logbook import Logger

log = Logger('scraper')

# Create a logs direcory if not exist
if not os.path.exists('logs'):
    os.makedirs('logs')
file_handler = FileHandler('logs/app.log', level=logbook.DEBUG)
file_handler.push_application()
예제 #47
0
import itertools
from collections import defaultdict
import glob
import math


capdir = os.getcwd()
directory = os.path.join(capdir, "songs")

echonestkey = os.environ.get('ECHO_NEST_API_KEY')
config.ECHO_NEST_API_KEY=echonestkey

logging.basicConfig(level=logging.INFO)
logger = logging.getLogger(__name__)
log = Logger('Logbook')
file_handler = FileHandler("keymixlog.log")
#error_handler = SyslogHandler('logbook example', level='ERROR')

#this seems to work though :)
null_handler = NullHandler()

harmonic_mixing_dict = {80:[10,80,30,101],
30:[80,30,110,61],
110:[30,110,50,11],
50:[110,50,0,81],
0:[50,0,70,31],
70:[0,70,20,91],
20:[70,20,90,51],
90:[20,90,40,1],
40:[90,40,100,71],
100:[40,100,60,21],
예제 #48
0
def setup_logger(test, path='test.log'):
    test.log_handler = FileHandler(path)
    test.log_handler.push_application()
예제 #49
0
"""
@version: 1.0
@ __author__: longbai 
@ __file__: processortest.py
@ __mtime__: 2016/8/12 18:38

日志处理器

"""

import os
import sys
from logbook import Processor, StreamHandler, DEBUG, Logger, FileHandler

my_handler = FileHandler("test.log", encoding="utf-8", level=DEBUG)
# my_handler = StreamHandler(sys.stdout, level=DEBUG)


def log_other_info(record):
    """
    a) 通过 with.processor可以让在其中的日志拥有共同的逻辑,相当于一个切面注入
    比如这里的例子是 在每条日志中记录一些额外的信息(额外的信息是通过在日志对象(logRecord)的extra(字典对象)属性中添加
    一些其他的信息),这样每条日志都会有这里添加的额外的信息。
    b) 有个疑问就是,这些额外的信息怎么运用呢,比如这些信息如何能和日志一块记录在文件中呢
    c) 关于日志的属性,见 logrecord.py
    """
    record.extra['myname'] = 'kute'
    record.extra['mycwd'] = os.getcwd()
    # update myname propertiy
    record.extra.update(myname="lisa")
예제 #50
0
파일: log.py 프로젝트: vincent-lg/bui
"""Logger for wxPython."""

from logbook import FileHandler, Logger

logger = Logger()
file_handler = FileHandler("wx.log",
                           encoding="utf-8",
                           level="DEBUG",
                           delay=True)
file_handler.format_string = (
    "{record.time:%Y-%m-%d %H:%M:%S.%f%z} [{record.level_name}] "
    "{record.message}")
# Uncomment this to log to wx.log
#logger.handlers.append(file_handler)
def main():
    """
    Copy a folder from Source to Target

    """

    log_filename = os.path.join(
        args.log_dir,
        'copy-google-drive-folder-{}.log'.format(os.path.basename(time.strftime('%Y%m%d-%H%M%S')))
    )

    # register some logging handlers
    log_handler = FileHandler(
        log_filename,
        mode='w',
        level=args.log_level,
        bubble=True
    )
    stdout_handler = StreamHandler(sys.stdout, level=args.log_level, bubble=True)

    with stdout_handler.applicationbound():
        with log_handler.applicationbound():
            log.info("Arguments: {}".format(args))
            start = time.time()
            log.info("starting at {}".format(time.strftime('%l:%M%p %Z on %b %d, %Y')))

            credentials = get_credentials()
            http = credentials.authorize(httplib2.Http())
            drive_service = discovery.build('drive', 'v3', http=http)

            # get the files in the specified folder.
            files = drive_service.files()
            request = files.list(
                pageSize=args.page_size,
                q="'{}' in parents".format(args.source_folder_id),
                fields="nextPageToken, files(id, name, mimeType)"
            )

            page_counter = 0
            file_counter = 0
            while request is not None:
                file_page = request.execute(http=http)
                page_counter += 1
                page_file_counter = 0  # reset the paging file counter

                # determine the page at which to start processing.
                if page_counter >= args.start_page:
                    log.info(u"######## Page {} ########".format(page_counter))

                    for this_file in file_page['files']:
                        file_counter += 1
                        page_file_counter += 1
                        log.info(u"#== Processing {} {} file number {} on page {}. {} files processed.".format(
                            this_file['mimeType'],
                            this_file['name'],
                            page_file_counter,
                            page_counter,
                            file_counter
                        ))

                        # if not a folder
                        if this_file['mimeType'] != 'application/vnd.google-apps.folder':
                            # Copy the file
                            new_file = {'title': this_file['name']}
                            copied_file = drive_service.files().copy(fileId=this_file['id'], body=new_file).execute()
                            # move it to it's new location
                            drive_service.files().update(
                                fileId=copied_file['id'],
                                addParents=args.target_folder_id,
                                removeParents=args.source_folder_id
                            ).execute()
                        else:
                            log.info(u"Skipped Folder")

                else:
                    log.info(u"Skipping Page {}".format(page_counter))

                # stop if we have come to the last user specified page
                if args.end_page and page_counter == args.end_page:
                    log.info(u"Finished paging at page {}".format(page_counter))
                    break

                # request the next page of files
                request = files.list_next(request, file_page)

            log.info("Running time: {}".format(str(datetime.timedelta(seconds=(round(time.time() - start, 3))))))
            log.info("Log written to {}:".format(log_filename))
예제 #52
0
class Fibratus():
    """Fibratus entrypoint.

    Setup the core components including the kernel
    event stream collector and the tracing controller.
    At this point the system handles are also being
    enumerated.

    """
    def __init__(self, filament):

        self.logger = Logger(Fibratus.__name__)
        self.file_handler = FileHandler(os.path.join(os.path.abspath(__file__),
                                                     '..', '..', '..',
                                                     'fibratus.log'),
                                        mode='w+')
        self.kevt_streamc = KEventStreamCollector(
            etw.KERNEL_LOGGER_NAME.encode())
        self.kcontroller = KTraceController()
        self.ktrace_props = KTraceProps()
        self.ktrace_props.enable_kflags()
        self.ktrace_props.logger_name = etw.KERNEL_LOGGER_NAME

        self.handle_repository = HandleRepository()
        self._handles = []
        # query for handles on the
        # start of kernel trace
        with self.file_handler.applicationbound():
            self.logger.info('Starting fibratus...')
            self.logger.info('Enumerating system handles...')
            self._handles = self.handle_repository.query_handles()
            self.logger.info('%s handles found' % len(self._handles))
            self.handle_repository.free_buffers()
        self.thread_registry = ThreadRegistry(self.handle_repository,
                                              self._handles)

        self.kevent = KEvent(self.thread_registry)

        self._filament = filament

        self.fsio = FsIO(self.kevent, self._handles)
        self.hive_parser = HiveParser(self.kevent, self.thread_registry)
        self.tcpip_parser = TcpIpParser(self.kevent)
        self.dll_repository = DllRepository(self.kevent)

        self.requires_render = {}
        self.filters_count = 0

    def run(self):
        @atexit.register
        def _exit():
            self.stop_ktrace()

        self.kcontroller.start_ktrace(etw.KERNEL_LOGGER_NAME,
                                      self.ktrace_props)

        def on_kstream_open():
            if self._filament is None:
                IO.write_console('Done!                               ')

        self.kevt_streamc.set_kstream_open_callback(on_kstream_open)
        self._open_kstream()

    def _open_kstream(self):
        try:
            self.kevt_streamc.open_kstream(self._on_next_kevent)
        except Exception as e:
            with self.file_handler.applicationbound():
                self.logger.error(e)
        except KeyboardInterrupt:
            self.stop_ktrace()

    def stop_ktrace(self):
        IO.write_console('Stopping fibratus...')
        if self._filament:
            self._filament.close()
        self.kcontroller.stop_ktrace(self.ktrace_props)
        self.kevt_streamc.close_kstream()

    def add_filters(self, kevent_filters):
        if len(kevent_filters) > 0:
            self.filters_count = len(kevent_filters)
            # include the basic filters
            # that are essential to the
            # rest of kernel events
            self.kevt_streamc.add_kevent_filter(ENUM_PROCESS)
            self.kevt_streamc.add_kevent_filter(ENUM_THREAD)
            self.kevt_streamc.add_kevent_filter(ENUM_IMAGE)
            self.kevt_streamc.add_kevent_filter(REG_CREATE_KCB)
            self.kevt_streamc.add_kevent_filter(REG_DELETE_KCB)

            # these kevents are necessary for consistent state
            # of the trace. If the user doesn't include them
            # in a filter list, then we do the job but set the
            # kernel event type as not eligible for rendering
            if not KEvents.CREATE_PROCESS in kevent_filters:
                self.kevt_streamc.add_kevent_filter(CREATE_PROCESS)
                self.requires_render[CREATE_PROCESS] = False
            else:
                self.requires_render[CREATE_PROCESS] = True

            if not KEvents.CREATE_THREAD in kevent_filters:
                self.kevt_streamc.add_kevent_filter(CREATE_THREAD)
                self.requires_render[CREATE_THREAD] = False
            else:
                self.requires_render[CREATE_THREAD] = True

            if not KEvents.CREATE_FILE in kevent_filters:
                self.kevt_streamc.add_kevent_filter(CREATE_FILE)
                self.requires_render[CREATE_FILE] = False
            else:
                self.requires_render[CREATE_FILE] = True

            for kevent_filter in kevent_filters:
                ktuple = kname_to_tuple(kevent_filter)
                if isinstance(ktuple, list):
                    for kt in ktuple:
                        self.kevt_streamc.add_kevent_filter(kt)
                        if not kt in self.requires_render:
                            self.requires_render[kt] = True
                else:
                    self.kevt_streamc.add_kevent_filter(ktuple)
                    if not ktuple in self.requires_render:
                        self.requires_render[ktuple] = True

    def _on_next_kevent(self, ktype, cpuid, ts, kparams):
        """Callback which fires when new kernel event arrives.

        This callback is invoked for every new kernel event
        forwarded from the kernel stream collector.

        Parameters
        ----------

        ktype: tuple
            Kernel event type.
        cpuid: int
            Indentifies the CPU core where the event
            has been captured.
        ts: str
            Temporal reference of the kernel event.
        kparams: dict
            Kernel event's parameters.
        """

        # initialize kernel event properties
        self.kevent.ts = ts
        self.kevent.cpuid = cpuid
        self.kevent.name = ktuple_to_name(ktype)
        kparams = ddict(kparams)
        # thread / process kernel events
        if ktype in [CREATE_PROCESS, CREATE_THREAD, ENUM_PROCESS, ENUM_THREAD]:
            self.thread_registry.add_thread(ktype, kparams)
            if ktype in [CREATE_PROCESS, CREATE_THREAD]:
                self.thread_registry.init_thread_kevent(
                    self.kevent, ktype, kparams)
                self._render(ktype)
        elif ktype in [TERMINATE_PROCESS, TERMINATE_THREAD]:
            self.thread_registry.init_thread_kevent(self.kevent, ktype,
                                                    kparams)
            self._render(ktype)
            self.thread_registry.remove_thread(ktype, kparams)

        # file system/disk kernel events
        elif ktype in [
                CREATE_FILE, DELETE_FILE, CLOSE_FILE, READ_FILE, WRITE_FILE
        ]:
            self.fsio.parse_fsio(ktype, kparams)
            self._render(ktype)

        # dll kernel events
        elif ktype in [LOAD_IMAGE, ENUM_IMAGE]:
            self.dll_repository.register_dll(kparams)
            if ktype == LOAD_IMAGE:
                self._render(ktype)
        elif ktype == UNLOAD_IMAGE:
            self.dll_repository.unregister_dll(kparams)
            self._render(ktype)

        # registry kernel events
        elif ktype == REG_CREATE_KCB:
            self.hive_parser.add_kcb(kparams)
        elif ktype == REG_DELETE_KCB:
            self.hive_parser.remove_kcb(kparams.key_handle)

        elif ktype in [
                REG_CREATE_KEY, REG_DELETE_KEY, REG_OPEN_KEY, REG_QUERY_KEY,
                REG_SET_VALUE, REG_DELETE_VALUE, REG_QUERY_VALUE
        ]:
            self.hive_parser.parse_hive(ktype, kparams)
            self._render(ktype)

        # network kernel events
        elif ktype in [
                SEND_SOCKET_TCPV4, SEND_SOCKET_UDPV4, RECV_SOCKET_TCPV4,
                RECV_SOCKET_UDPV4, ACCEPT_SOCKET_TCPV4, CONNECT_SOCKET_TCPV4,
                DISCONNECT_SOCKET_TCPV4, RECONNECT_SOCKET_TCPV4
        ]:
            self.tcpip_parser.parse_tcpip(ktype, kparams)
            self._render(ktype)

        if self._filament:
            # call filament method
            # to process the next
            # kernel event from the stream
            if ktype not in [ENUM_PROCESS, ENUM_THREAD, ENUM_IMAGE]:
                if self.kevent.name:
                    self._filament.process(self.kevent)

    def _render(self, ktype):
        """Renders the kevent to the standard output stream.

        Parameters
        ----------

        ktype: tuple
            Identifier of the kernel event
        """
        if not self._filament:
            if ktype in self.requires_render:
                rr = self.requires_render[ktype]
                if rr:
                    self.kevent.render()
            elif self.filters_count == 0:
                self.kevent.render()
예제 #53
0
파일: tracer.py 프로젝트: gitter-badger/qdb
 def __init__(self,
              host='localhost',
              port=8001,
              auth_msg='',
              default_file=None,
              default_namespace=None,
              eval_fn=None,
              exception_serializer=None,
              skip_fn=None,
              pause_signal=None,
              redirect_output=True,
              retry_attepts=10,
              uuid=None,
              cmd_manager=None,
              green=False,
              repr_fn=None,
              log_file=None,
              execution_timeout=None):
     """
     Host and port define the address to connect to.
     The auth_msg is a message that will be sent with the start event to the
     server. This can be used to do server/tracer authentication.
     The default_file is a file to use if the file field is ommited from
     payloads.
     eval_fn is the function to eval code where the user may provide it,
     for example in a conditional breakpoint, or in the repl.
     skip_fn is simmilar to the skip list feature of Bdb, except that
     it should be a function that takes a filename and returns True iff
     the debugger should skip this file. These files will be suppressed from
     stack traces.
     The pause_signal is signal to raise in this program to trigger a pause
     command. If this is none, this will default to SIGUSR2.
     retry_attempts is the number of times to attempt to connect to the
     server before raising a QdbFailedToConnect error.
     The repr_fn is a function to use to convert objects to strings to send
     then back to the server. By default, this wraps repr by catching
     exceptions and reporting them to the user.
     The uuid is the identifier on the server for this session. If none is
     provided, it will generate a uuid4.
     cmd_manager should be a callable that takes a Qdb instance and manages
     commands by implementing a next_command method. If none, a new, default
     manager will be created that reads commands from the server at
     (host, port).
     If green is True, this will use gevent safe timeouts, otherwise this
     will use signal based timeouts.
     repr_fn is the repr function to use when displaying results. If None,
     use the builtin repr.
     execution_timeout is the amount of time user code has to execute before
     being cut short. This is applied to the repl, watchlist and conditional
     breakpoints. If None, no timeout is applied.
     """
     super(Qdb, self).__init__()
     self.address = host, port
     self.set_default_file(default_file)
     self.default_namespace = default_namespace or {}
     self.exception_serializer = exception_serializer or \
         default_exception_serializer
     self.eval_fn = eval_fn or default_eval_fn
     self.green = green
     self._file_cache = {}
     self.redirect_output = redirect_output
     self.retry_attepts = retry_attepts
     self.repr_fn = repr_fn
     self.skip_fn = skip_fn or (lambda _: False)
     self.pause_signal = pause_signal if pause_signal else signal.SIGUSR2
     self.uuid = str(uuid or uuid4())
     self.watchlist = {}
     self.execution_timeout = execution_timeout
     # We need to be able to send stdout back to the user debugging the
     # program. We hold a handle to this in case the program resets stdout.
     if self.redirect_output:
         self._old_stdout = sys.stdout
         self._old_stderr = sys.stderr
         self.stdout = StringIO()
         self.stderr = StringIO()
         sys.stdout = self.stdout
         sys.stderr = self.stderr
     self.forget()
     self.log_handler = None
     if log_file:
         self.log_handler = FileHandler(log_file)
         self.log_handler.push_application()
     if not cmd_manager:
         cmd_manager = RemoteCommandManager
     self.cmd_manager = cmd_manager(self)
     self.cmd_manager.start(auth_msg)