def agent_main(collector):
    to_file(sys.stdout)
    startLogging(sys.stdout)
    return react(
        run_agent, [
            environ.get(
                "FLOCKER_CONFIGURATION_PATH",
                "/etc/flocker",
            ).decode("ascii"),
            environ.get(
                "CATALOG_FIREHOSE_PROTOCOL",
                DEFAULT_FIREHOSE_PROTOCOL,
            ).decode("ascii"),
            environ.get(
                "CATALOG_FIREHOSE_HOSTNAME",
                DEFAULT_FIREHOSE_HOSTNAME,
            ).decode("ascii"),
            int(
                environ.get(
                    "CATALOG_FIREHOSE_PORT",
                    unicode(DEFAULT_FIREHOSE_PORT).encode("ascii"),
                ).decode("ascii")
            ),
            # Base64 encoded
            environ["CATALOG_FIREHOSE_SECRET"].decode("ascii"),
            collector,
        ],
    )
Пример #2
0
def bg(wait, command, raweliot, targets):
    """Run the default oam operation on targets"""
    if raweliot:
        eliot.to_file(sys.stdout)
    else:
        # eliottree.render_tasks(sys.stdout.write, tasks, colorize=True) #py3
        eliot.add_destination(render_stdout)
    procs = []
    if len(targets)==0:
        targets = ['localhost']
    with eliot.start_task(action_type='run_ops', targets=targets):
        with eliot.start_action(action_type='start_ops', targets=targets):
            for server in targets:
                if wait:
                    cmd = FG_CMD.format(server, command)
                else:
                    cmd = BG_CMD.format(server, SESSION_NAME, command)
                logging.debug('%s start, cmd: %s', server, cmd)
                with eliot.start_action(action_type='start_process', target=server, cmd=cmd):
                    procs.append(subprocess.Popen(cmd, shell=True))
        finished = 0
        with eliot.start_action(action_type='wait_finishes', targets=targets):
            while finished != len(procs):
                for index, server in enumerate(procs):
                    logging.debug('looping at %s %d', targets[index], finished)
                    if not server.poll() is None:
                        eliot.Message.log(message_type='finish', target=targets[index])
                        finished += 1
                time.sleep(1)
        with eliot.start_action(action_type='wait_terminations', targets=targets):
            for index, server in enumerate(procs):
                with eliot.start_action(action_type='wait_process', target=targets[index]):
                    server.wait()
                    logging.debug('%s finish, returncode=%d', targets[index], server.returncode)
Пример #3
0
    def main(self, top_level=None, base_path=None):
        """
        Check command line arguments and run the build steps.

        :param FilePath top_level: The top-level of the flocker repository.
        :param base_path: ignored.
        """
        to_file(self.sys_module.stderr)

        options = DockerBuildOptions()

        try:
            options.parseOptions(self.sys_module.argv[1:])
        except usage.UsageError as e:
            self.sys_module.stderr.write("%s\n" % (options,))
            self.sys_module.stderr.write("%s\n" % (e,))
            raise SystemExit(1)

        # Currently we add system control files for both EL and Debian-based
        # systems.  We should probably be more specific.  See FLOC-1736.
        self.build_command(
            distribution=CURRENT_DISTRIBUTION,
            destination_path=options['destination-path'],
            package_uri=options['package-uri'],
            package_files=top_level.descendant(['admin', 'package-files']),
        ).run()
Пример #4
0
    def main(self, top_level=None, base_path=None):
        """
        Check command line arguments and run the build steps.

        :param FilePath top_level: The top-level of the flocker repository.
        :param base_path: ignored.
        """
        to_file(self.sys_module.stderr)

        options = DockerBuildOptions()

        try:
            options.parseOptions(self.sys_module.argv[1:])
        except usage.UsageError as e:
            self.sys_module.stderr.write("%s\n" % (options, ))
            self.sys_module.stderr.write("%s\n" % (e, ))
            raise SystemExit(1)

        # Currently we add system control files for both EL and Debian-based
        # systems.  We should probably be more specific.  See FLOC-1736.
        self.build_command(
            distribution=CURRENT_DISTRIBUTION,
            destination_path=options['destination-path'],
            package_uri=options['package-uri'],
            package_files=top_level.descendant(['admin', 'package-files']),
        ).run()
Пример #5
0
    def __init__(self, cache_manager):
        to_file(open("mesos-viewer.log", "w"))
        self.cache_manager = cache_manager
        self.already_build = False
        self.on_comments = False
        self.which = "frameworks"
        self.total_frameworks = 0

        self.config = Config()
        self.poller = Poller(
            self,
            delay=int(self.config.parser.get('settings', 'refresh_interval')))
        self.palette = self.config.get_palette()

        self.sort_on = "name"
        self.sort_reverse = False
        self.in_search = False

        self.sort_asc = "▲"
        self.sort_desc = "▼"
        self.col_name = "FRAMEWORKS"
        self.col_memory = "MEM"
        self.col_cpus = "CPUs"
        self.col_uptime = "UPTIME"
        self.col_upsince = "UP SINCE"
        self.TEXT_CAPTION = " >> "
        self.widgetEdit = urwid.Edit(self.TEXT_CAPTION, "", multiline=False)

        self.frameworks = []
        self.search_string = ""
Пример #6
0
    def main(self, top_level=None, base_path=None):
        """
        Check command line arguments and run the build steps.

        :param top_level: The path to the root of the checked out flocker
            directory.
        :param base_path: ignored.
        """
        to_file(self.sys_module.stderr)

        options = BuildOptions()

        try:
            options.parseOptions(self.sys_module.argv[1:])
        except usage.UsageError as e:
            self.sys_module.stderr.write("%s\n" % (options,))
            self.sys_module.stderr.write("%s\n" % (e,))
            raise SystemExit(1)

        self.build_command(
            destination_path=options['destination-path'],
            package_uri=options['package-uri'],
            top_level=top_level,
            distribution=options['distribution'],
        ).run()
Пример #7
0
    def main(self, top_level=None, base_path=None):
        """
        Check command line arguments and run the build steps.

        :param top_level: The path to the root of the checked out flocker
            directory.
        :param base_path: ignored.
        """
        to_file(self.sys_module.stderr)
        distributions = available_distributions(top_level)

        options = BuildOptions(distributions)

        try:
            options.parseOptions(self.sys_module.argv[1:])
        except usage.UsageError as e:
            self.sys_module.stderr.write("%s\n" % (options, ))
            self.sys_module.stderr.write("%s\n" % (e, ))
            raise SystemExit(1)

        self.build_command(
            destination_path=options['destination-path'],
            package_uri=options['package-uri'],
            top_level=top_level,
            distribution=options['distribution'],
        ).run()
Пример #8
0
def set_file_destination():
    test_name = os.environ.get('TEST_NAME')

    if not test_name:
        now = datetime.now().strftime("%y-%m-%d_%H:%M:%S")
        test_name = f"apollo_run_{now}"

    logs_dir = '../../build/tests/apollo/logs/'
    test_dir = f'{logs_dir}{test_name}'
    test_log = f'{test_dir}/{test_name}.log'

    if not os.path.isdir(logs_dir):
        # Create logs directory if not exist
        os.mkdir(logs_dir)

    if not os.path.isdir(test_dir):
        # Create directory for the test logs
        os.mkdir(test_dir)

    if os.path.isfile(test_log):
        # Clean logs if file already exist
        open(test_log, "w").close()

    # Set the log file path
    to_file(open(test_log, "a"))
Пример #9
0
def _configure_logging(dest: Path_T) -> None:
    root = logging.getLogger()  # ISSUE: ambient
    # Add Eliot Handler to root Logger.
    root.addHandler(EliotHandler())
    # and to luigi
    logging.getLogger('luigi').addHandler(EliotHandler())
    logging.getLogger('luigi-interface').addHandler(EliotHandler())
    el.to_file(dest.open(mode='ab'))
Пример #10
0
def _run_task(rule_name, master_id):
    del Logger._destinations._destinations[:]
    to_file(open(os.path.join(eliot_log_path, master_id), "ab"))

    with start_task(action_type="invenio_checker:supervisor:_run_task",
                    master_id=master_id) as eliot_task:
        from .models import CheckerRule
        # cleanup_failed_runs()

        redis_master = None

        def cleanup_session():
            print 'Cleaning up'
            if redis_master is not None:
                redis_master.zap()

        def sigint_hook(rcv_signal, frame):
            cleanup_session()

        def except_hook(type_, value, tback):
            cleanup_session()
            reraise(type_, value, tback)

        signal.signal(signal.SIGINT, sigint_hook)
        signal.signal(signal.SIGTERM, sigint_hook)
        sys.excepthook = except_hook

        with start_action(action_type='create master'):
            eliot_task_id = eliot_task.serialize_task_id()
            redis_master = RedisMaster(master_id, eliot_task_id, rule_name)

        with start_action(action_type='create subtasks'):
            rules = CheckerRule.from_ids((rule_name,))
            bundles = rules_to_bundles(rules, redis_master.all_recids)

            subtasks = []
            errback = handle_error.s()
            for rule, rule_chunks in bundles.iteritems():
                for chunk in rule_chunks:
                    task_id = uuid()
                    redis_master.workers_append(task_id)
                    eliot_task_id = eliot_task.serialize_task_id()
                    RedisWorker(task_id, eliot_task_id, chunk)
                    subtasks.append(run_test.subtask(args=(rule.filepath,
                                                           redis_master.master_id,
                                                           task_id),
                                                     task_id=task_id,
                                                     link_error=[errback]))

            Message.log(message_type='registered subtasks', value=str(redis_master.workers))

        with start_action(action_type='run chord'):
            redis_master.status = StatusMaster.running
            header = subtasks
            callback = handle_results.subtask(link_error=[handle_errors.s(redis_master.master_id)])
            my_chord = chord(header)
            result = my_chord(callback)
            redis_master.status = StatusMaster.running
Пример #11
0
def _pytest_sessionstart(session, worker):
    """Initialize session-wide variables for record management and caching."""
    session.invenio_records = {'original': {}, 'modified': {}, 'temporary': {}}
    global Session  # pylint: disable=global-statement
    Session = session

    # Set the eliot log path
    eliot.to_file(get_eliot_log_file(worker_id=worker.uuid))
    session.invenio_eliot_action = eliot.start_action(action_type=u"pytest worker")
Пример #12
0
def with_eliot(action_type, master_id=None, worker_id=None):
    assert master_id or worker_id
    if worker_id:
        # print "WITH {}".format(worker_id)
        master_id = RedisWorker(worker_id).master.master_id
    master = RedisMaster(master_id)
    eliot_task_id = master.eliot_task_id
    del Logger._destinations._destinations[:]
    to_file(open(eliot_log_path + master_id, "ab"))
    with Action.continue_task(task_id=eliot_task_id):
        return start_action(action_type=action_type)
Пример #13
0
def with_eliot(action_type, master_id=None, worker_id=None):
    assert master_id or worker_id
    if worker_id:
        # print "WITH {}".format(worker_id)
        master_id = RedisWorker(worker_id).master.master_id
    master = RedisMaster(master_id)
    eliot_task_id = master.eliot_task_id
    del Logger._destinations._destinations[:]
    to_file(open(eliot_log_path + master_id, "ab"))
    with Action.continue_task(task_id=eliot_task_id):
        return start_action(action_type=action_type)
Пример #14
0
def elioterize(action_type, master_id=None, worker_id=None):
    """Eliot action continuer that can log to either worker or master."""
    clear_logger_destinations(Logger)
    if worker_id:
        to_file(get_eliot_log_file(worker_id=worker_id))
        client = RedisWorker(worker_id)
    elif master_id:
        to_file(get_eliot_log_file(master_id=master_id))
        client = RedisMaster(master_id)

    with Action.continue_task(task_id=client.eliot_task_id):
        return start_action(action_type=action_type)
Пример #15
0
def elioterize(action_type, master_id=None, worker_id=None):
    """Eliot action continuer that can log to either worker or master."""
    clear_logger_destinations(Logger)
    if worker_id:
        to_file(get_eliot_log_file(worker_id=worker_id))
        client = RedisWorker(worker_id)
    elif master_id:
        to_file(get_eliot_log_file(master_id=master_id))
        client = RedisMaster(master_id)

    with Action.continue_task(task_id=client.eliot_task_id):
        return start_action(action_type=action_type)
Пример #16
0
def _setup_eliot():
    # type: () -> bool
    """
    Set up the `eliot` package so that it outputs to a log file.
    """

    global _eliot_configured

    if not _eliot_configured:
        eliot.to_file(LOG_FILE)
        _eliot_configured = True

    return _eliot_configured
Пример #17
0
def _entry():
    """
    Implement the *magic-folder* console script declared in ``setup.py``.

    :return: ``None``
    """
    from eliot import to_file
    from os import getpid
    to_file(open("magic-folder-cli.{}.eliot".format(getpid()), "w"))

    def main(reactor):
        return dispatch_magic_folder_command(sys.argv[1:])
    return react(main)
Пример #18
0
def init_logging():
    eliot.register_exception_extractor(Exception,
                                       _add_exception_data_and_traceback)

    root_logger = logging.getLogger()
    root_logger.addHandler(EliotHandler())
    root_logger.setLevel(logging.DEBUG)
    logging.getLogger("morepath.directive").setLevel(logging.INFO)
    logging.getLogger("passlib.registry").setLevel(logging.INFO)
    logging.getLogger("passlib.utils.compat").setLevel(logging.INFO)
    logging.getLogger("parso").setLevel(logging.WARN)

    eliot.to_file(sys.stderr, encoder=EkklesiaLogEncoder)

    logging.captureWarnings(True)
Пример #19
0
        def wrapper(*args, **kwargs):
            redis_worker = Session.session.config.option.redis_worker
            eliot_task_id = redis_worker.eliot_task_id
            # print "~{} {}".format(eliot_task_id, action_type)

            del Logger._destinations._destinations[:]
            to_file(open(os.path.join(eliot_log_path, redis_worker.master.uuid + '.' + redis_worker.task_id), "ab"))

            eliot_task = Action.continue_task(task_id=eliot_task_id)
            with eliot_task:
                with start_action(action_type=action_type,
                                  worker_id=redis_worker.task_id,
                                  **dec_kwargs):
                    func(*args, **kwargs)
                redis_worker.eliot_task_id = eliot_task.serialize_task_id()
Пример #20
0
def make_verbose():
    # type: () -> bool
    """
    Make the logging verbose by output the full `eliot` output.
    """
    global _only_eliot

    if not _only_eliot:
        # Change the attribute (which will mute normal standard error msgs)
        _only_eliot = True

        # Add a standard output stream to eliot
        eliot.to_file(_sys.stdout)

    return _only_eliot
Пример #21
0
def main():
    eliot.to_file(sys.stdout)
    allowed_signals = {signal.SIGINT, signal.SIGTERM}
    for sig in range(1, signal.NSIG):
        if sig in allowed_signals:
            continue
        try:
            signal.siginterrupt(sig, False)
        except RuntimeError as e:
            if e.args[0] != errno.EINVAL:
                raise

    proc = SocketPassProcessor.from_path(sys.argv[1])
    app = cinje_app
    while True:
        proc.handle_request(app)
Пример #22
0
def run():
    """
    Implement the *magic-folder* console script declared in ``setup.py``.

    :return: ``None``
    """
    from eliot import to_file
    from os import getpid
    to_file(open("magic-folder-cli.{}.eliot".format(getpid()), "w"))

    console_scripts = importlib_metadata.entry_points()["console_scripts"]
    magic_folder = list(script for script in console_scripts
                        if script.name == "twist")[0]
    argv = ["twist", "--log-level=debug", "--log-format=text", "magic_folder"
            ] + sys.argv[1:]
    magic_folder.load()(argv)
Пример #23
0
def main():
    eliot.to_file(sys.stdout)
    allowed_signals = {signal.SIGINT, signal.SIGTERM}
    for sig in range(1, signal.NSIG):
        if sig in allowed_signals:
            continue
        try:
            signal.siginterrupt(sig, False)
        except RuntimeError as e:
            if e.args[0] != errno.EINVAL:
                raise

    from paste import lint
    proc = SocketPassProcessor.from_path(sys.argv[1])
    app = lint.middleware(test_app)
    while True:
        proc.handle_request(app)
Пример #24
0
 def _setup_logger(self, level=logging.WARNING, override_root=False):
     # In Python 3.8 we could use force=True to force the root logger
     #  to override existing defaults
     #
     # Don't eliot-log this or load_from_environment or
     #  create_derived_settings, because we initialize an LSSTConfig when
     #  we import jupyterhubutils, and we don't necessarily want to
     #  spam everyone about it
     # Add our formats
     fstr = "[%(levelname).1s %(asctime)s.%(msecs).03d"
     fstr += " %(module)s:%(funcName)s:%(lineno)d] %(message)s"
     dstr = "%Y-%m-%d %H:%M:%S"
     self.log_format = fstr
     self.log_datefmt = dstr
     self.log_level = level
     to_file(sys.stderr)
     if override_root:
         self.eliotify_root_logger(level=level)
     self.log = make_logger(level=level)
Пример #25
0
        def wrapper(*args, **kwargs):
            redis_worker = Session.session.config.option.redis_worker
            eliot_task_id = redis_worker.eliot_task_id
            # print "~{} {}".format(eliot_task_id, action_type)

            del Logger._destinations._destinations[:]
            to_file(
                open(
                    os.path.join(
                        eliot_log_path,
                        redis_worker.master.uuid + '.' + redis_worker.task_id),
                    "ab"))

            eliot_task = Action.continue_task(task_id=eliot_task_id)
            with eliot_task:
                with start_action(action_type=action_type,
                                  worker_id=redis_worker.task_id,
                                  **dec_kwargs):
                    func(*args, **kwargs)
                redis_worker.eliot_task_id = eliot_task.serialize_task_id()
Пример #26
0
    def __init__(self, **azure_config):
        """
        :param ServiceManagement azure_client: an instance of the azure
        serivce managment api client.
        :param String service_name: The name of the cloud service
        :param
            names of Azure volumes to identify cluster
        :returns: A ``BlockDeviceVolume``.
        """
        self._instance_id = self.compute_instance_id()
        self._azure_service_client = ServiceManagementService(
            azure_config['subscription_id'],
            azure_config['management_certificate_path'])
        self._service_name = azure_config['service_name']
        self._azure_storage_client = BlobService(
            azure_config['storage_account_name'],
            azure_config['storage_account_key'])
        self._storage_account_name = azure_config['storage_account_name']
        self._disk_container_name = azure_config['disk_container_name']

        if azure_config['debug']:
            to_file(sys.stdout)
    def __init__(self, **azure_config):
        """
        :param ServiceManagement azure_client: an instance of the azure
        serivce managment api client.
        :param String service_name: The name of the cloud service
        :param
            names of Azure volumes to identify cluster
        :returns: A ``BlockDeviceVolume``.
        """
        self._instance_id = self.compute_instance_id()
        self._azure_service_client = ServiceManagementService(
            azure_config['subscription_id'],
            azure_config['management_certificate_path'])
        self._service_name = azure_config['service_name']
        self._azure_storage_client = BlobService(
            azure_config['storage_account_name'],
            azure_config['storage_account_key'])
        self._storage_account_name = azure_config['storage_account_name']
        self._disk_container_name = azure_config['disk_container_name']

        if azure_config['debug']:
            to_file(sys.stdout)
Пример #28
0
def dd(cwd, config, skip, pretend, interactive):
    eliot.to_file(open(f"{cwd}/logs/eliot.txt", 'w'))
    # eliot.to_file(open(f"{cwd}/logs/eliot-{datetime.now().strftime('%y-%m-%d-%H:%M:%S')}.txt",'wb'))

    with eliot.start_action(action_type='DockerDriver',
                            cwd=str(cwd),
                            config=config):
        config_path = pathlib.Path(config)
        config = hjson.load(open(cwd.joinpath(config_path), 'r'))
        name = config_path.parts[-1].split('.')[0]
        config = DockerDriver(cwd, name, config)

    if pretend:
        [
            setattr(p, 'skip', True)
            for p in filter(lambda x: x.exec, config.plugins)
        ]

    # use cli --skip to set certain exec plugins to skip=True
    [
        setattr(p, 'skip', True)
        for p in filter(lambda x: getattr(x, 'name') in skip, config.plugins)
    ]

    # try to find initial image or create it
    if not pretend:
        with eliot.start_action(action_type='initialize'):
            config.initialize()

    if interactive:
        config.interact('initial')

    #start the sequence of operations

    with eliot.start_action(action_type='start'):
        config.start()
Пример #29
0
def set_file_destination():
    storage_type = os.environ.get('STORAGE_TYPE')
    tests_names = [m for m in sys.modules.keys() if "test_" in m]

    if len(tests_names) > 1:
        # Multiple Apollo tests modules loaded, test name unknown.
        now = datetime.now().strftime("%y-%m-%d_%H:%M:%S")
        test_name = f"apollo_run_{now}"
    else:
        # Single Apollo module loaded, test name known.
        test_name = f"{tests_names.pop()}_{storage_type}"

    # Create logs directory if not exist
    if not os.path.isdir("logs"):
        os.mkdir("logs")

    test_name = f"logs/{test_name}.log"

    if os.path.isfile(test_name):
        # Clean logs if file already exist
        open(test_name, "w").close()

    # Set the log file path
    to_file(open(test_name, "a"))
Пример #30
0
#!/usr/bin/env python
"""
Example of an Eliot action context spanning multiple threads.
"""

from __future__ import unicode_literals

from threading import Thread
from sys import stdout

from eliot import to_file, preserve_context, start_action
to_file(stdout)


def add_in_thread(x, y):
    with start_action(action_type="in_thread", x=x, y=y) as context:
        context.add_success_fields(result=x + y)


with start_action(action_type="main_thread"):
    # Preserve Eliot context and restore in new thread:
    thread = Thread(target=preserve_context(add_in_thread),
                    kwargs={
                        "x": 3,
                        "y": 4
                    })
    thread.start()
    # Wait for the thread to exit:
    thread.join()
Пример #31
0
import yaml

from eliot import to_file

from twisted.internet.task import react
from twisted.python.filepath import FilePath
from twisted.python.usage import Options, UsageError

from flocker import __version__ as flocker_client_version

from benchmark import metrics, operations, scenarios
from benchmark.cluster import BenchmarkCluster
from benchmark._driver import driver


to_file(sys.stderr)

# If modifying scenarios, operations, or metrics, please update
# docs/gettinginvolved/benchmarking.rst

_SCENARIOS = {
    'no-load': scenarios.NoLoadScenario,
}

_OPERATIONS = {
    'no-op': operations.NoOperation,
    'read-request': operations.ReadRequest,
    'wait': operations.Wait,
}

_METRICS = {
Пример #32
0
import sys
from eliot import start_action, to_file
import requests
to_file(sys.stdout)


def check_links(urls):
    with start_action(action_type="check_links", urls=urls):
        for url in urls:
            try:
                with start_action(action_type="download", url=url):
                    response = requests.get(url)
                    response.raise_for_status()
            except Exception as e:
                raise ValueError(str(e))

check_links(["http://google.com"], ["http://nosuchurl"])
Пример #33
0
from eliot import start_action, to_file
import trio

to_file(open("trio.log", "w"))


async def say(message, delay):
    with start_action(action_type="say", message=message):
        await trio.sleep(delay)

async def main():
    with start_action(action_type="main"):
        async with trio.open_nursery() as nursery:
            nursery.start_soon(say, "hello", 1)
            nursery.start_soon(say, "world", 2)

trio.run(main)
Пример #34
0
def eliot_logging():
    with open("integration.eliot.json", "w") as f:
        to_file(f)
        yield
Пример #35
0
"""
Benchmark of message serialization.

The goal here is to mostly focus on performance of serialization, in a vaguely
realistic manner. That is, mesages are logged in context of a message with a
small number of fields.
"""

from __future__ import unicode_literals

import time

from eliot import Message, start_action, to_file

# Ensure JSON serialization is part of benchmark:
to_file(open("/dev/null", "w"))

N = 10000


def run():
    start = time.time()
    for i in range(N):
        with start_action(action_type="my_action"):
            with start_action(action_type="my_action2") as ctx:
                ctx.log(
                    message_type="my_message",
                    integer=3,
                    string="abcdeft",
                    string2="dgsjdlkgjdsl",
                    list=[1, 2, 3, 4],
Пример #36
0
def eliot_logging():
    with open("integration.eliot.json", "w") as f:
        to_file(f)
        yield
Пример #37
0
def main(argv, environ, react=react):
    options = BenchmarkOptions()

    try:
        options.parseOptions(argv[1:])
    except UsageError as e:
        usage(options, e.args[0])

    if options['log-file'] is not None:
        try:
            log_file = open(options['log-file'], 'a')
        except EnvironmentError as e:
            usage(
                options, 'Can not open the log file {}.\n{}: {}.'.format(
                    options['log-file'], e.filename, e.strerror))
    else:
        log_file = sys.stderr
    to_file(log_file)

    cluster = get_cluster(options, environ)

    with open(options['config'], 'rt') as f:
        config = yaml.safe_load(f)

    validate_configuration(config)

    scenario_name = options['scenario']
    scenario_config = get_config_by_name(config['scenarios'], scenario_name)
    if scenario_config is None:
        usage(options, 'Invalid scenario name: {!r}'.format(scenario_name))
    scenario_factory = create_factory_from_config(_SCENARIOS, scenario_config)
    if scenario_factory is None:
        usage(options,
              'Invalid scenario type: {!r}'.format(scenario_config['type']))

    operation_name = options['operation']
    operation_config = get_config_by_name(config['operations'], operation_name)
    if operation_config is None:
        usage(options, 'Invalid operation name: {!r}'.format(operation_name))
    operation_factory = create_factory_from_config(_OPERATIONS,
                                                   operation_config)
    if operation_factory is None:
        usage(options,
              'Invalid operation type: {!r}'.format(operation_config['type']))

    metric_name = options['metric']
    metric_config = get_config_by_name(config['metrics'], metric_name)
    if metric_config is None:
        usage(options, 'Invalid metric name: {!r}'.format(metric_name))
    metric_factory = create_factory_from_config(_METRICS, metric_config)
    if metric_factory is None:
        usage(options,
              'Invalid metric type: {!r}'.format(metric_config['type']))

    try:
        num_samples = int(options['samples'])
    except ValueError:
        usage(options, 'Invalid sample count: {!r}'.format(options['samples']))

    timestamp = datetime.now().isoformat()

    result = dict(
        version=OUTPUT_VERSION,
        timestamp=timestamp,
        client=dict(
            flocker_version=flocker_client_version,
            working_directory=os.getcwd(),
            username=environ[b"USER"],
            nodename=node(),
            platform=platform(),
        ),
        scenario=scenario_config,
        operation=operation_config,
        metric=metric_config,
    )

    userdata = parse_userdata(options)
    if userdata:
        result['userdata'] = userdata

    react(driver,
          (cluster, scenario_factory, operation_factory, metric_factory,
           num_samples, result, partial(json.dump, fp=sys.stdout, indent=2)))
Пример #38
0
def main(argv, environ, react=react):
    options = BenchmarkOptions()

    try:
        options.parseOptions(argv[1:])
    except UsageError as e:
        usage(options, e.args[0])

    if options['log-file'] is not None:
        try:
            log_file = open(options['log-file'], 'a')
        except EnvironmentError as e:
            usage(
                options,
                'Can not open the log file {}.\n{}: {}.'.format(
                    options['log-file'],
                    e.filename,
                    e.strerror
                )
            )
    else:
        log_file = sys.stderr
    to_file(log_file)

    cluster = get_cluster(options, environ)

    with open(options['config'], 'rt') as f:
        config = yaml.safe_load(f)

    validate_configuration(config)

    scenario_name = options['scenario']
    scenario_config = get_config_by_name(config['scenarios'], scenario_name)
    if scenario_config is None:
        usage(options, 'Invalid scenario name: {!r}'.format(scenario_name))
    scenario_factory = create_factory_from_config(_SCENARIOS, scenario_config)
    if scenario_factory is None:
        usage(
            options,
            'Invalid scenario type: {!r}'.format(scenario_config['type'])
        )

    operation_name = options['operation']
    operation_config = get_config_by_name(config['operations'], operation_name)
    if operation_config is None:
        usage(options, 'Invalid operation name: {!r}'.format(operation_name))
    operation_factory = create_factory_from_config(
        _OPERATIONS, operation_config)
    if operation_factory is None:
        usage(
            options,
            'Invalid operation type: {!r}'.format(operation_config['type'])
        )

    metric_name = options['metric']
    metric_config = get_config_by_name(config['metrics'], metric_name)
    if metric_config is None:
        usage(options, 'Invalid metric name: {!r}'.format(metric_name))
    metric_factory = create_factory_from_config(_METRICS, metric_config)
    if metric_factory is None:
        usage(
            options, 'Invalid metric type: {!r}'.format(metric_config['type'])
        )

    try:
        num_samples = int(options['samples'])
    except ValueError:
        usage(options, 'Invalid sample count: {!r}'.format(options['samples']))

    timestamp = datetime.now().isoformat()

    result = dict(
        version=OUTPUT_VERSION,
        timestamp=timestamp,
        client=dict(
            flocker_version=flocker_client_version,
            working_directory=os.getcwd(),
            username=environ[b"USER"],
            nodename=node(),
            platform=platform(),
        ),
        scenario=scenario_config,
        operation=operation_config,
        metric=metric_config,
    )

    userdata = parse_userdata(options)
    if userdata:
        result['userdata'] = userdata

    react(
        driver, (
            cluster, scenario_factory, operation_factory, metric_factory,
            num_samples, result, partial(json.dump, fp=sys.stdout, indent=2)
        )
    )
Пример #39
0
"""
Benchmark of message serialization.

The goal here is to mostly focus on performance of serialization, in a vaguely
realistic manner. That is, mesages are logged in context of a message with a
small number of fields.
"""

from __future__ import unicode_literals

import time

from eliot import Message, start_action, to_file

# Ensure JSON serialization is part of benchmark:
to_file(open("/dev/null", "w"))

N = 10000


def run():
    start = time.time()
    for i in range(N):
        with start_action(action_type="my_action"):
            with start_action(action_type="my_action2"):
                Message.log(
                    message_type="my_message",
                    integer=3,
                    string=b"abcdeft",
                    string2="dgsjdlkgjdsl",
                    list=[1, 2, 3, 4],
Пример #40
0
async def configure_logging(app):
    print('configuring logging')
    use_asyncio_context()

    logfile = app['config'].get('logfile', 'log.json')
    to_file(open(logfile, 'w'))
Пример #41
0
def _run_task(rule_name, master_id):

    # # If you find yourself debugging celery crashes:
    # redis_master = None
    # def cleanup_session():
    #     if redis_master is not None:
    #         redis_master.zap()
    # def sigint_hook(rcv_signal, frame):
    #     cleanup_session()
    # def except_hook(type_, value, tback):
    #     cleanup_session()
    #     reraise(type_, value, tback)
    # signal.signal(signal.SIGINT, sigint_hook)
    # signal.signal(signal.SIGTERM, sigint_hook)
    # sys.excepthook = except_hook

    clear_logger_destinations(Logger)
    to_file(get_eliot_log_file(master_id=master_id))
    with start_task(action_type="invenio_checker:supervisor:_run_task",
                    master_id=master_id) as eliot_task:
        eliot_task_id = eliot_task.serialize_task_id()

        # Have the master initialize its presence in redis.
        Message.log(message_type='creating master')
        redis_master = RedisMaster.create(master_id, eliot_task_id, rule_name)

        # Load the rule from its name. `run_task` has already checked that it's
        # there.
        rule = CheckerRule.query.filter(CheckerRule.name == rule_name).one()
        Message.log(message_type='loaded rule', rule_name=rule.name)

        # Create workers to attach to this master. `record_centric` means that
        # the task uses the `record` fixture, which causes pytest to loop over
        # it len(chunk_recids) times. This is important to know now so that we
        # will spawn multiple workers.
        subtasks = []
        record_centric = _get_record_fixture_presence(rule.filepath)

        if record_centric:
            # We wish to spawn multiple workers to split the load.
            if rule.allow_chunking:
                recid_chunks = tuple(chunk_recids(rule.modified_requested_recids))
            else:
                recid_chunks = (rule.modified_requested_recids,)
            Message.log(message_type='creating subtasks', count=len(recid_chunks),
                        mode='record_centric', recid_count=len(rule.modified_requested_recids))
        else:
            # We wish to spawn just one worker than will run the check function
            # once.
            recid_chunks = (set(),)
            Message.log(message_type='creating subtasks', count=1,
                        mode='not_record_centric')

        # Create the subtasks based on the decisions taken above and inform the
        # master of its associations with these new workers/tasks.
        for chunk in recid_chunks:
            task_id = uuid()
            redis_master.workers_append(task_id)
            subtasks.append(create_celery_task(task_id, redis_master.master_id,
                                               rule, chunk, eliot_task))

        if not subtasks:
            # Note that if `record-centric` is True, there's the chance that no
            # records matched our query. This does not imply a problem.
            redis_master.status = StatusMaster.completed
        else:
            redis_master.status = StatusMaster.running
            # FIXME: handle_all_completion should be called after the callbacks
            # of all workers have completed.
            callback = handle_all_completion.subtask()
            chord(subtasks)(callback)
Пример #42
0
from sys import stdout
from eliot import start_action, start_task, to_file
to_file(stdout)


class Place(object):
    def __init__(self, name, contained=()):
        self.name = name
        self.contained = contained

    def visited(self, people):
        # No need to repetitively log people, since caller will:
        with start_action(action_type="visited", place=self.name):
            for thing in self.contained:
                thing.visited(people)


def honeymoon(family, destination):
    with start_task(action_type="honeymoon", people=family):
        destination.visited(family)


honeymoon(["Mrs. Casaubon", "Mr. Casaubon"],
          Place("Rome, Italy",
                [Place("Vatican Museum",
                       [Place("Statue #1"), Place("Statue #2")])]))
Пример #43
0
import asyncio
import aiohttp
from eliot import start_action, to_file

to_file(open("linkcheck.log", "w"))


async def check_links(urls):
    session = aiohttp.ClientSession()
    with start_action(action_type="check_links", urls=urls):
        for url in urls:
            try:
                with start_action(action_type="download", url=url):
                    async with session.get(url) as response:
                        response.raise_for_status()
            except Exception as e:
                raise ValueError(str(e))


try:
    loop = asyncio.get_event_loop()
    loop.run_until_complete(
        check_links(["http://eliot.readthedocs.io", "http://nosuchurl"]))
except ValueError:
    print("Not all links were valid.")
Пример #44
0
def _start_logging():
    # Name log file based on PID, so different processes so stomp on each
    # others' logfiles:
    to_file(open("{}.log".format(getpid()), "a"))
Пример #45
0
import requests
from eliot import start_action, to_file
to_file(open("linkcheck.log", "w"))


def check_links(urls):
    with start_action(action_type="check_links", urls=urls):
        for url in urls:
            try:
                with start_action(action_type="download", url=url):
                    response = requests.get(url)
                    response.raise_for_status()
            except Exception as e:
                raise ValueError(str(e))

try:
    check_links(["http://eliot.readthedocs.io", "http://nosuchurl"])
except ValueError:
    print("Not all links were valid.")
Пример #46
0
# 윈도우에서는 한글 인코딩 오류가 발생할 수 있습니다.
# 한글 인코딩 오류가 발생한다면
# Message.log(message_type="info", msg="데이터를 저장했습니다.")
# 위의 코드 부분의 msg를 영어로 수정해서 사용해주세요.

import json
import sys

from eliot import Message, start_action, to_file, write_traceback
import requests

# 로그 출력을 표준 출력으로 설정(터미널에 출력하기)
to_file(sys.stdout)
# 크롤링 대상 URL 리스트
PAGE_URL_LIST = [
    'https://eliot.readthedocs.io/en/1.0.0/',
    'https://eliot.readthedocs.io/en/1.0.0/generating/index.html',
    'https://example.com/notfound.html',
]
def fetch_pages():
    """페이지의 내용을 추출합니다."""
    # 어떤 처리의 로그인지는 action_type으로 지정
    with start_action(action_type="fetch_pages"):
        page_contents = {}
        for page_url in PAGE_URL_LIST:
            # 어떤 처리의 로그인지 action_type으로 출력
            with start_action(action_type="download", url=page_url):
                try:
                    r = requests.get(page_url, timeout=30)
                    r.raise_for_status()
                except requests.exceptions.RequestException as e:
Пример #47
0
import eliot
import logging
import sys
from eliot.stdlib import EliotHandler

logging.getLogger().addHandler(EliotHandler())
logging.getLogger().setLevel(logging.DEBUG)
eliot.to_file(sys.stdout)
logging.captureWarnings(True)

logg = logging.getLogger(__name__)

logging.getLogger("parso").setLevel(logging.WARN)

logg.info("init")
Пример #48
0
from eliot import log_call, to_file
import sys
to_file(open("out.log", "a"))

@log_call
def add(a, b):
    return a + b

@log_call
def multiply(a, b):
    return 0 * b

@log_call
def multiplysum(a, b, c):
    return multiply(add(a, b), c)

print(multiplysum(1, 2, 4)) # should print 12
Пример #49
0
# Tested on: Python 3.7.3 (MacOS)
import requests
import os.path
import schedule
import time
import urllib3
urllib3.disable_warnings(urllib3.exceptions.InsecureRequestWarning)
from eliot import start_action, to_file

# Overwrites log file in every program iteration
# Replace "w+" with "a+", if you want to append
to_file(open("log", "w+"))


# Check urls.txt presence, exit otherwise
if os.path.isfile('urls.txt'):
    pass
else:
    print('[-] ERR: urls.txt missing\n')
    exit(-1)


def do_crawl():
    user_agent = 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_14_0) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/80.0.3987.87 Safari/537.36'
    headers = {'User-Agent': user_agent}
    urls = []

    # Read urls.txt
    with open('urls.txt', 'r') as fObj:
        filecontents = fObj.readlines()
    
Пример #50
0
import sys

from click import echo, group, option, pass_context
from eliot import start_task, to_file, log_call

from pycryptpad_tools.padapi import PadAPI


to_file(sys.stderr)


# base_url = "http://cryptpad-local:3000"
base_url = "https://cryptpad.piratenpartei.de"


def pad_api(ctx):
    return PadAPI(ctx.obj['BASE_URL'], ctx.obj['HEADLESS'])


@log_call
def read_infile(infile):
    if infile == "-":
        return sys.stdin.read()
    else:
        with open(infile) as f:
            return f.read()


@group()
@option('--base_url', default=base_url)
@option('--headless/--no-headless', default=True)