예제 #1
0
파일: core.py 프로젝트: x0rzkov/explo
def main():
    """ Get file list from args and execute """
    global args

    args = parser.parse_args()
    add_destination(log_stdout)

    for filename in args.filename:
        try:
            print('Loading {}'.format(color.cyan(filename)))

            if from_file(filename):
                result = color.green('Success.')
            else:
                result = color.red('No match.')

            print('==> {}'.format(result))

        except ParserException as exc:
            print(color.yellow('ERROR parsing file %s: %s' % (filename, exc)))
        except (ConnectionException, ProxyException) as exc:
            print(
                color.yellow('ERROR connecting to host in file %s: %s' %
                             (filename, exc)))
        except ExploException as exc:
            print(color.yellow('ERROR in file %s: %s' % (filename, exc)))
    def startService(self):
        self.stdlib_cleanup = stdlib_logging_to_eliot_configuration(getLogger())
        self.twisted_observer = TwistedLoggerToEliotObserver()
        globalLogPublisher.addObserver(self.twisted_observer)

        for dest in self.destinations:
            add_destination(dest)
예제 #3
0
파일: go.py 프로젝트: lmiphay/gentoo-oam
def bg(wait, command, raweliot, targets):
    """Run the default oam operation on targets"""
    if raweliot:
        eliot.to_file(sys.stdout)
    else:
        # eliottree.render_tasks(sys.stdout.write, tasks, colorize=True) #py3
        eliot.add_destination(render_stdout)
    procs = []
    if len(targets)==0:
        targets = ['localhost']
    with eliot.start_task(action_type='run_ops', targets=targets):
        with eliot.start_action(action_type='start_ops', targets=targets):
            for server in targets:
                if wait:
                    cmd = FG_CMD.format(server, command)
                else:
                    cmd = BG_CMD.format(server, SESSION_NAME, command)
                logging.debug('%s start, cmd: %s', server, cmd)
                with eliot.start_action(action_type='start_process', target=server, cmd=cmd):
                    procs.append(subprocess.Popen(cmd, shell=True))
        finished = 0
        with eliot.start_action(action_type='wait_finishes', targets=targets):
            while finished != len(procs):
                for index, server in enumerate(procs):
                    logging.debug('looping at %s %d', targets[index], finished)
                    if not server.poll() is None:
                        eliot.Message.log(message_type='finish', target=targets[index])
                        finished += 1
                time.sleep(1)
        with eliot.start_action(action_type='wait_terminations', targets=targets):
            for index, server in enumerate(procs):
                with eliot.start_action(action_type='wait_process', target=targets[index]):
                    server.wait()
                    logging.debug('%s finish, returncode=%d', targets[index], server.returncode)
예제 #4
0
def main(reactor, args, base_path, top_level):
    """
    :param reactor: Reactor to use.
    :param list args: The arguments passed to the script.
    :param FilePath base_path: The executable being run.
    :param FilePath top_level: The top-level of the flocker repository.
    """
    options = RunOptions(top_level=top_level)

    add_destination(eliot_output)
    try:
        options.parseOptions(args)
    except UsageError as e:
        sys.stderr.write("%s: %s\n" % (base_path.basename(), e))
        raise SystemExit(1)

    runner = options.runner

    try:
        nodes = yield runner.start_nodes(reactor, node_count=1)
        yield perform(
            make_dispatcher(reactor),
            install_cli(runner.package_source, nodes[0]))
        result = yield run_client_tests(reactor=reactor, node=nodes[0])
    except:
        result = 1
        raise
    finally:
        # Unless the tests failed, and the user asked to keep the nodes, we
        # delete them.
        if not (result != 0 and options['keep']):
            runner.stop_nodes(reactor)
        elif options['keep']:
            print "--keep specified, not destroying nodes."
    raise SystemExit(result)
예제 #5
0
def main(reactor, args, base_path, top_level):
    """
    :param reactor: Reactor to use.
    :param list args: The arguments passed to the script.
    :param FilePath base_path: The executable being run.
    :param FilePath top_level: The top-level of the Flocker repository.
    """
    options = RunOptions(top_level=top_level)

    add_destination(eliot_output)
    try:
        options.parseOptions(args)
    except UsageError as e:
        sys.stderr.write("%s: %s\n" % (base_path.basename(), e))
        raise SystemExit(1)

    runner = options.runner

    def cluster_cleanup():
        print ("stopping cluster")
        return runner.stop_cluster(reactor)

    cleanup_trigger_id = reactor.addSystemEventTrigger("before", "shutdown", cluster_cleanup)

    from flocker.common.script import eliot_logging_service

    log_writer = eliot_logging_service(
        destination=FileDestination(file=open("%s.log" % (base_path.basename(),), "a")),
        reactor=reactor,
        capture_stdout=False,
    )
    log_writer.startService()
    reactor.addSystemEventTrigger("before", "shutdown", log_writer.stopService)

    yield runner.ensure_keys(reactor)
    cluster = yield runner.start_cluster(reactor)

    save_managed_config(options["cert-directory"], options["config"], cluster)
    managed_config_file = options["cert-directory"].child("managed.yaml")
    managed_config = create_managed_config(options["config"], cluster)
    managed_config_file.setContent(yaml.safe_dump(managed_config, default_flow_style=False))

    if options["distribution"] in ("centos-7",):
        remote_logs_file = open("remote_logs.log", "a")
        for node in cluster.all_nodes:
            capture_journal(reactor, node.address, remote_logs_file).addErrback(write_failure)
    elif options["distribution"] in ("ubuntu-14.04", "ubuntu-15.10"):
        remote_logs_file = open("remote_logs.log", "a")
        for node in cluster.all_nodes:
            capture_upstart(reactor, node.address, remote_logs_file).addErrback(write_failure)

    flocker_client = make_client(reactor, cluster)
    yield wait_for_nodes(reactor, flocker_client, len(cluster.agent_nodes))

    if options["no-keep"]:
        print ("not keeping cluster")
    else:
        save_environment(options["cert-directory"], cluster)
        reactor.removeSystemEventTrigger(cleanup_trigger_id)
예제 #6
0
def setup_logging(logger_name, human=False):
    """Patch in the Eliot logger and twisted log interception"""
    global LOGGER, HUMAN
    LOGGER = "-".join([logger_name, pkg_resources.get_distribution("autopush").version])
    HUMAN = human
    add_destination(stdout)
    ellie = EliotObserver()
    ellie.start()
    return ellie
예제 #7
0
def setup_logging(logger_name, human=False):
    """Patch in the Eliot logger and twisted log interception"""
    global LOGGER, HUMAN
    LOGGER = "-".join(
        [logger_name,
         pkg_resources.get_distribution("autopush").version])
    HUMAN = human
    add_destination(stdout)
    ellie = EliotObserver()
    ellie.start()
    return ellie
예제 #8
0
 def _setUp(self):
     self.logs = []
     add_destination(self.logs.append)
     self.addCleanup(lambda: remove_destination(self.logs.append))
     self.addDetail(
         self.LOG_DETAIL_NAME,
         Content(
             UTF8_TEXT,
             # Safeguard the logs against _tearDown.  Capture the list
             # object in the lambda's defaults.
             lambda logs=self.logs: [_eliottree(logs)],
         ),
     )
예제 #9
0
def main(reactor, args, base_path, top_level):
    try:
        options = TestBrewOptions()
        try:
            options.parseOptions(args)
        except UsageError as e:
            sys.stderr.write("Error: {error}.\n".format(error=str(e)))
            sys.exit(1)

        add_destination(eliot_output)

        recipe_url = options['recipe_url']
        options['vmpath'] = FilePath(options['vmpath'])
        # Open the recipe URL just to validate and verify that it exists.
        # We do not need to read its content.
        urllib2.urlopen(recipe_url)
        yield run(reactor, [
            "vmrun",
            "revertToSnapshot",
            options['vmpath'].path,
            options['vmsnapshot'],
        ])
        yield run(reactor, [
            "vmrun",
            "start",
            options['vmpath'].path,
            "nogui",
        ])
        yield perform(
            make_dispatcher(reactor),
            run_remotely(
                username=options['vmuser'],
                address=options['vmhost'],
                commands=sequence([
                    task_configure_brew_path(),
                    task_test_homebrew(recipe_url),
                ]),
            ),
        )
        yield run(reactor, [
            "vmrun",
            "stop",
            options['vmpath'].path,
            "hard",
        ])
        print "Done."
    except ProcessTerminated as e:
        sys.stderr.write(
            ("Error: Command terminated with exit status {code}.\n").format(
                code=e.exitCode))
        raise
예제 #10
0
 def __init__(self, stream, tbformat='default', realtime=False,
              publisher=None, logger=None):
     # TODO: Trial has a pretty confusing set of expectations for
     # reporters. In particular, it's not clear what it needs to construct
     # a reporter. It's also not clear what it expects as public
     # properties. The IReporter interface and the tests for the reporter
     # interface cover somewhat different things.
     self._stream = stream
     self.tbformat = tbformat
     self.shouldStop = False
     self.testsRun = 0
     add_destination(self._write_message)
     self._current_test = None
     self._successful = True
     self._logger = logger
def main(reactor, argv, environ):
    add_destination(eliot_output)

    try:
        options = ContainerOptions()
        options.parseOptions(argv[1:])
    except usage.UsageError as e:
        sys.stderr.write(e.args[0])
        sys.stderr.write('\n\n')
        sys.stderr.write(options.getSynopsis())
        sys.stderr.write('\n')
        sys.stderr.write(options.getUsage())
        raise SystemExit(1)
    container_deployment = ClusterContainerDeployment(reactor,
                                                      environ,
                                                      options)
    return container_deployment.deploy_and_wait_for_creation()
예제 #12
0
def main(reactor, args, base_path, top_level):
    try:
        options = TestBrewOptions()
        try:
            options.parseOptions(args)
        except UsageError as e:
            sys.stderr.write("Error: {error}.\n".format(error=str(e)))
            sys.exit(1)

        add_destination(eliot_output)

        recipe_url = options['recipe_url']
        options['vmpath'] = FilePath(options['vmpath'])
        # Open the recipe URL just to validate and verify that it exists.
        # We do not need to read its content.
        urllib2.urlopen(recipe_url)
        yield run(reactor, [
            "vmrun", "revertToSnapshot",
            options['vmpath'].path, options['vmsnapshot'],
        ])
        yield run(reactor, [
            "vmrun", "start", options['vmpath'].path, "nogui",
        ])
        yield perform(
            make_dispatcher(reactor),
            run_remotely(
                username=options['vmuser'],
                address=options['vmhost'],
                commands=sequence([
                    task_configure_brew_path(),
                    task_test_homebrew(recipe_url),
                ]),
            ),
        )
        yield run(reactor, [
            "vmrun", "stop", options['vmpath'].path, "hard",
        ])
        print "Done."
    except ProcessTerminated as e:
        sys.stderr.write(
            (
                "Error: Command terminated with exit status {code}.\n"
            ).format(code=e.exitCode)
        )
        raise
예제 #13
0
    def test_tree(self):
        """
        ``_eliottree`` returns a ``bytes`` string containing a rendered tree of
        Eliot actions and messages.
        """
        events = []
        add_destination(events.append)
        self.addCleanup(lambda: remove_destination(events.append))

        with start_action(action_type=u"foo"):
            pass

        # I don't know exactly what the tree rendering looks like.  That's why
        # I'm using eliot-tree!  So this assertion is sort of lame.
        self.assertThat(
            _eliottree(events).decode("utf-8"),
            Contains(u"foo@1/started"),
        )
예제 #14
0
파일: core.py 프로젝트: x0rzkov/explo
def from_content(content, log=None):
    """ Load, validate and process blocks """

    if not content:
        raise ExploException('no exploitation content')

    try:
        blocks = load_blocks(content)
    except yaml.YAMLError as err:
        raise ExploException('error parsing document: %s' % err)

    if not validate_blocks(blocks):
        raise ExploException('error parsing document:' \
            'not all blocks specify the required fields %s' % FIELDS_REQUIRED)

    if log:
        add_destination(log)

    return process_blocks(blocks)
예제 #15
0
파일: client.py 프로젝트: sloblee/flocker
def main(reactor, args, base_path, top_level):
    """
    :param reactor: Reactor to use.
    :param list args: The arguments passed to the script.
    :param FilePath base_path: The executable being run.
    :param FilePath top_level: The top-level of the flocker repository.
    """
    options = RunOptions(top_level=top_level)

    add_destination(eliot_output)
    try:
        options.parseOptions(args)
    except UsageError as e:
        sys.stderr.write("%s: %s\n" % (base_path.basename(), e))
        raise SystemExit(1)

    runner = options.runner

    from flocker.common.script import eliot_logging_service
    log_file = open("%s.log" % base_path.basename(), "a")
    log_writer = eliot_logging_service(log_file=log_file,
                                       reactor=reactor,
                                       capture_stdout=False)
    log_writer.startService()
    reactor.addSystemEventTrigger('before', 'shutdown', log_writer.stopService)

    try:
        nodes = yield runner.start_nodes(reactor, node_count=1)
        yield perform(make_dispatcher(reactor),
                      install_cli(runner.package_source, nodes[0]))
        result = yield run_client_tests(reactor=reactor, node=nodes[0])
    except:
        result = 1
        raise
    finally:
        # Unless the tests failed, and the user asked to keep the nodes, we
        # delete them.
        if not (result != 0 and options['keep']):
            runner.stop_nodes(reactor)
        elif options['keep']:
            print "--keep specified, not destroying nodes."
    raise SystemExit(result)
예제 #16
0
def main(reactor, argv, environ):
    # Setup eliot to print better human-readable output to standard
    # output
    add_destination(eliot_output)

    try:
        options = ContainerOptions()
        options.parseOptions(argv[1:])
    except usage.UsageError as e:
        sys.stderr.write(e.args[0])
        sys.stderr.write("\n\n")
        sys.stderr.write(options.getSynopsis())
        sys.stderr.write("\n")
        sys.stderr.write(options.getUsage())
        raise SystemExit(1)

    container_deployment = ClusterContainerDeployment.from_options(reactor, options)

    def deploy_and_wait(cluster_container_deployment):
        return cluster_container_deployment.deploy_and_wait_for_creation()

    container_deployment.addCallback(deploy_and_wait)

    return container_deployment
예제 #17
0
파일: script.py 프로젝트: james-w/flocker
def eliot_to_stdout(message_formats, action_formats, stdout=sys.stdout):
    """
    Write pretty versions of eliot log messages to stdout.
    """
    def eliot_output(message):
        message_type = message.get('message_type')
        action_type = message.get('action_type')
        action_status = message.get('action_status')

        message_format = '%s'
        if message_type is not None:
            if message_type == 'twisted:log' and message.get('error'):
                message_format = '%(message)s'
            else:
                message_format = message_formats.get(message_type, '%s')
        elif action_type is not None:
            if action_status == 'started':
                message_format = action_formats.get('action_type', '%s')
            # We don't consider other status, since we
            # have no meaningful messages to write.
        stdout.write(message_format % message)
        stdout.flush()

    add_destination(eliot_output)
예제 #18
0
파일: script.py 프로젝트: ienliven/flocker
def eliot_to_stdout(message_formats, action_formats, stdout=sys.stdout):
    """
    Write pretty versions of eliot log messages to stdout.
    """
    def eliot_output(message):
        message_type = message.get('message_type')
        action_type = message.get('action_type')
        action_status = message.get('action_status')

        message_format = ''
        if message_type is not None:
            if message_type == 'twisted:log' and message.get('error'):
                message_format = '%(message)s'
            else:
                message_format = message_formats.get(message_type, '')
        elif action_type is not None:
            if action_status == 'started':
                message_format = action_formats.get('action_type', '')
            # We don't consider other status, since we
            # have no meaningful messages to write.
        stdout.write(message_format % message)
        stdout.flush()

    add_destination(eliot_output)
예제 #19
0
파일: logs.py 프로젝트: AMJ-dev/tahoe-lafs
 def onOpen(self):
     """
     WebSocket callback
     """
     eliot.add_destination(self._received_eliot_log)
예제 #20
0
def main(reactor, args, base_path, top_level):
    """
    :param reactor: Reactor to use.
    :param list args: The arguments passed to the script.
    :param FilePath base_path: The executable being run.
    :param FilePath top_level: The top-level of the flocker repository.
    """
    options = RunOptions(top_level=top_level)

    add_destination(eliot_output)
    try:
        options.parseOptions(args)
    except UsageError as e:
        sys.stderr.write("%s: %s\n" % (base_path.basename(), e))
        raise SystemExit(1)

    runner = options.runner

    from flocker.common.script import eliot_logging_service
    log_writer = eliot_logging_service(
        destination=FileDestination(
            file=open("%s.log" % (base_path.basename(),), "a")
        ),
        reactor=reactor,
        capture_stdout=False)
    log_writer.startService()
    reactor.addSystemEventTrigger(
        'before', 'shutdown', log_writer.stopService)

    cluster = None
    results = []

    setup_succeeded = False
    reached_finally = False

    def cluster_cleanup():
        if not reached_finally:
            print "interrupted..."
        print "stopping cluster"
        return runner.stop_cluster(reactor)

    cleanup_trigger_id = reactor.addSystemEventTrigger('before', 'shutdown',
                                                       cluster_cleanup)

    try:
        yield runner.ensure_keys(reactor)
        cluster = yield runner.start_cluster(reactor)
        if options['distribution'] in ('centos-7',):
            remote_logs_file = open("remote_logs.log", "a")
            for node in cluster.all_nodes:
                results.append(capture_journal(reactor,
                                               node.address,
                                               remote_logs_file)
                               )
        elif options['distribution'] in ('ubuntu-14.04',):
            remote_logs_file = open("remote_logs.log", "a")
            for node in cluster.all_nodes:
                results.append(capture_upstart(reactor,
                                               node.address,
                                               remote_logs_file)
                               )
        gather_deferreds(results)

        if not options["no-pull"]:
            yield perform(
                make_dispatcher(reactor),
                parallel([
                    run_remotely(
                        username='******',
                        address=node.address,
                        commands=task_pull_docker_images()
                    ) for node in cluster.agent_nodes
                ]),
            )

        setup_succeeded = True
        result = yield run_tests(
            reactor=reactor,
            cluster=cluster,
            trial_args=options['trial-args'])

    finally:
        reached_finally = True
        # We delete the nodes if the user hasn't asked to keep them
        # or if we failed to provision the cluster.
        if not setup_succeeded:
            print "cluster provisioning failed"
        elif not options['keep']:
            print "not keeping cluster"
        else:
            print "--keep specified, not destroying nodes."
            print ("To run acceptance tests against these nodes, "
                   "set the following environment variables: ")

            environment_variables = get_trial_environment(cluster)

            for environment_variable in environment_variables:
                print "export {name}={value};".format(
                    name=environment_variable,
                    value=shell_quote(
                        environment_variables[environment_variable]),
                )
            reactor.removeSystemEventTrigger(cleanup_trigger_id)

    raise SystemExit(result)
예제 #21
0
"""
Write some logs to journald.
"""

from __future__ import print_function

from eliot import Message, start_action, add_destination
from eliot.journald import JournaldDestination

add_destination(JournaldDestination())


def divide(a, b):
    with start_action(action_type="divide", a=a, b=b):
        return a / b


print(divide(10, 2))
Message.log(message_type="inbetween")
print(divide(10, 0))
예제 #22
0
"""
Write some logs to journald.
"""

from __future__ import print_function

from eliot import Message, start_action, add_destination
from eliot.journald import JournaldDestination

add_destination(JournaldDestination())


def divide(a, b):
    with start_action(action_type="divide", a=a, b=b):
        return a / b

print(divide(10, 2))
Message.log(message_type="inbetween")
print(divide(10, 0))
    parser.add_argument(
        'database',
        nargs='?',
        help='The directory in which to create or open the database. If not'
        ' supplied, a temporary database will be used')
    parser.add_argument('--debug',
                        action='store_true',
                        help='Enable pudb post mortem debugging')
    parser.add_argument('--port',
                        type=int,
                        default=8034,
                        help='Port to listen for connections on')
    parser.add_argument('--host', default='0.0.0.0', help='Host to bind to')
    args = parser.parse_args()

    eliot.add_destination(eliot.FileDestination(sys.stderr))
    dbdir = args.database or tempfile.mkdtemp()
    try:
        with Database(dbdir) as db:
            the_app = app(db)
            if args.debug:
                the_app = debug_wrapper(the_app)
            httpd = Server((args.host, args.port), the_app)
            print('Serving "{dbdir}" on {args.host}:{args.port}'.format(
                dbdir=dbdir, args=args))
            httpd.safe_start()
    finally:
        if not args.database:
            shutil.rmtree(dbdir)

elif os.getenv(_DB_DIR_ENV_VAR):
예제 #24
0
def main(reactor, args, base_path, top_level):
    """
    :param reactor: Reactor to use.
    :param list args: The arguments passed to the script.
    :param FilePath base_path: The executable being run.
    :param FilePath top_level: The top-level of the Flocker repository.
    """
    options = RunOptions(top_level=top_level)

    add_destination(eliot_output)
    try:
        options.parseOptions(args)
    except UsageError as e:
        sys.stderr.write("%s: %s\n" % (base_path.basename(), e))
        raise SystemExit(1)

    runner = options.runner

    def cluster_cleanup():
        print("stopping cluster")
        return runner.stop_cluster(reactor)

    cleanup_trigger_id = reactor.addSystemEventTrigger('before', 'shutdown',
                                                       cluster_cleanup)

    from flocker.common.script import eliot_logging_service
    log_writer = eliot_logging_service(
        destination=FileDestination(
            file=open("%s.log" % (base_path.basename(),), "a")
        ),
        reactor=reactor,
        capture_stdout=False)
    log_writer.startService()
    reactor.addSystemEventTrigger(
        'before', 'shutdown', log_writer.stopService)

    yield runner.ensure_keys(reactor)
    cluster = yield runner.start_cluster(reactor)

    managed_config_file = options['cert-directory'].child("managed.yaml")
    managed_config = create_managed_config(options['config'], cluster)
    managed_config_file.setContent(
        yaml.safe_dump(managed_config, default_flow_style=False)
    )

    if options['distribution'] in ('centos-7',):
        remote_logs_file = open("remote_logs.log", "a")
        for node in cluster.all_nodes:
            capture_journal(reactor, node.address,
                            remote_logs_file).addErrback(write_failure)
    elif options['distribution'] in ('ubuntu-14.04', 'ubuntu-15.10'):
        remote_logs_file = open("remote_logs.log", "a")
        for node in cluster.all_nodes:
            capture_upstart(reactor, node.address,
                            remote_logs_file).addErrback(write_failure)

    flocker_client = _make_client(reactor, cluster)
    yield _wait_for_nodes(reactor, flocker_client, len(cluster.agent_nodes))

    if options['no-keep']:
        print("not keeping cluster")
    else:
        environment_variables = get_trial_environment(cluster)
        environment_strings = list()
        for environment_variable in environment_variables:
            environment_strings.append(
                "export {name}={value};\n".format(
                    name=environment_variable,
                    value=shell_quote(
                        environment_variables[environment_variable]
                    ),
                )
            )
        environment = ''.join(environment_strings)
        print("The following variables describe the cluster:")
        print(environment)
        env_file = options['cert-directory'].child("environment.env")
        env_file.setContent(environment)
        print("The variables are also saved in {}".format(
            env_file.path
        ))
        print("Be sure to preserve the required files.")

        reactor.removeSystemEventTrigger(cleanup_trigger_id)
예제 #25
0
def eliot_to_twisted_logging():
    """
    Ship eliot logs to twisted.
    """
    add_destination(_destination)
예제 #26
0
 def __enter__(self):
     eliot.add_destination(self)
     return self
예제 #27
0
def main(reactor, args, base_path, top_level):
    """
    :param reactor: Reactor to use.
    :param list args: The arguments passed to the script.
    :param FilePath base_path: The executable being run.
    :param FilePath top_level: The top-level of the Flocker repository.
    """
    options = RunOptions(top_level=top_level)

    add_destination(eliot_output)
    try:
        options.parseOptions(args)
    except UsageError as e:
        sys.stderr.write("%s: %s\n" % (base_path.basename(), e))
        raise SystemExit(1)

    runner = options.runner

    from flocker.common.script import eliot_logging_service
    log_writer = eliot_logging_service(
        destination=FileDestination(
            file=open("%s.log" % (base_path.basename(),), "a")
        ),
        reactor=reactor,
        capture_stdout=False)
    log_writer.startService()
    reactor.addSystemEventTrigger(
        'before', 'shutdown', log_writer.stopService)

    cluster = None
    results = []
    try:
        yield runner.ensure_keys(reactor)
        cluster = yield runner.start_cluster(reactor)
        if options['distribution'] in ('centos-7',):
            remote_logs_file = open("remote_logs.log", "a")
            for node in cluster.all_nodes:
                results.append(capture_journal(reactor,
                                               node.address,
                                               remote_logs_file)
                               )
        elif options['distribution'] in ('ubuntu-14.04', 'ubuntu-15.10'):
            remote_logs_file = open("remote_logs.log", "a")
            for node in cluster.all_nodes:
                results.append(capture_upstart(reactor,
                                               node.address,
                                               remote_logs_file)
                               )
        gather_deferreds(results)

        if options['apps-per-node'] > 0:
            config = _build_config(cluster, options['template'],
                                   options['apps-per-node'])
            yield _configure(reactor, cluster, config)

        result = 0

    except BaseException:
        result = 1
        raise
    finally:
        if options['no-keep'] or result == 1:
            runner.stop_cluster(reactor)
        else:
            if cluster is None:
                print("Didn't finish creating the cluster.")
                runner.stop_cluster(reactor)
            else:
                print("The following variables describe the cluster:")
                environment_variables = get_trial_environment(cluster)
                for environment_variable in environment_variables:
                    print("export {name}={value};".format(
                        name=environment_variable,
                        value=shell_quote(
                            environment_variables[environment_variable]),
                    ))
                print("Be sure to preserve the required files.")

    raise SystemExit(result)
예제 #28
0
from sphinxmixcrypto import PacketReplayCacheDict, GroupCurve25519, SphinxParams, SECURITY_PARAMETER
from sphinxmixcrypto import IReader, IKeyState

from txmix.interfaces import IMixTransport
from txmix.mix import ThresholdMixNode
from txmix.client import MixClient, RandomRouteFactory
from txmix.utils import DummyPKI


# tell eliot to log a line of json for each message to stdout
def stdout(message):
    sys.stdout.write(json.dumps(message) + "\n")


add_destination(stdout)


def generate_node_id(rand_reader):
    idnum = rand_reader.read(4)
    node_id = b"\xff" + idnum + (b"\x00" * (SECURITY_PARAMETER - len(idnum) - 1))
    return node_id


def generate_node_keypair(rand_reader):
    group = GroupCurve25519()
    private_key = group.gensecret(rand_reader)
    public_key = group.expon(group.generator, private_key)
    return public_key, private_key

예제 #29
0
def main(reactor, args, base_path, top_level):
    """
    :param reactor: Reactor to use.
    :param list args: The arguments passed to the script.
    :param FilePath base_path: The executable being run.
    :param FilePath top_level: The top-level of the flocker repository.
    """
    options = RunOptions(top_level=top_level)

    add_destination(eliot_output)
    try:
        options.parseOptions(args)
    except UsageError as e:
        sys.stderr.write("%s: %s\n" % (base_path.basename(), e))
        raise SystemExit(1)

    runner = options.runner

    from flocker.common.script import eliot_logging_service
    log_file = open("%s.log" % base_path.basename(), "a")
    log_writer = eliot_logging_service(log_file=log_file,
                                       reactor=reactor,
                                       capture_stdout=False)
    log_writer.startService()
    reactor.addSystemEventTrigger('before', 'shutdown', log_writer.stopService)

    cluster = None
    try:
        cluster = yield runner.start_cluster(reactor)

        if options['distribution'] in ('centos-7', ):
            remote_logs_file = open("remote_logs.log", "a")
            for node in cluster.all_nodes:
                capture_journal(reactor, node.address, remote_logs_file)

        if not options["no-pull"]:
            yield perform(
                make_dispatcher(reactor),
                parallel([
                    run_remotely(username='******',
                                 address=node.address,
                                 commands=task_pull_docker_images())
                    for node in cluster.agent_nodes
                ]),
            )

        result = yield run_tests(reactor=reactor,
                                 cluster=cluster,
                                 trial_args=options['trial-args'])
    except:
        result = 1
        raise
    finally:
        # Unless the tests failed, and the user asked to keep the nodes, we
        # delete them.
        if not options['keep']:
            runner.stop_cluster(reactor)
        else:
            print "--keep specified, not destroying nodes."
            if cluster is None:
                print("Didn't finish creating the cluster.")
            else:
                print(
                    "To run acceptance tests against these nodes, "
                    "set the following environment variables: ")

                environment_variables = get_trial_environment(cluster)

                for environment_variable in environment_variables:
                    print "export {name}={value};".format(
                        name=environment_variable,
                        value=shell_quote(
                            environment_variables[environment_variable]),
                    )

    raise SystemExit(result)
예제 #30
0
def main(reactor, args, base_path, top_level):
    """
    :param reactor: Reactor to use.
    :param list args: The arguments passed to the script.
    :param FilePath base_path: The executable being run.
    :param FilePath top_level: The top-level of the Flocker repository.
    """
    add_destination(eliot_output)
    options = RunOptions(top_level=top_level)
    try:
        options.parseOptions(args)
    except UsageError as e:
        sys.stderr.write("%s: %s\n" % (base_path.basename(), e))
        sys.stderr.write("\n")
        sys.stderr.write(str(options))
        raise SystemExit(1)

    # Existing nodes must be described in a managed section
    # of the configuration.
    existing_nodes = make_managed_nodes(
        options['config']['managed']['addresses'],
        options['distribution'],
    )
    # The following code assumes that one of the managed nodes
    # is both a control node and an agent node.
    [control_node] = [
        node for node in existing_nodes
        if node.address == options['control-node']
    ]
    dataset_backend_config_file = save_backend_configuration(
        options.dataset_backend(),
        options.dataset_backend_configuration(),
    )
    cluster = Cluster(
        all_nodes=list(existing_nodes),
        control_node=control_node,
        agent_nodes=list(existing_nodes),
        dataset_backend=options.dataset_backend(),
        default_volume_size=get_default_volume_size(
            options.dataset_backend_configuration()
        ),
        certificates=Certificates(options['cert-directory']),
        dataset_backend_config_file=dataset_backend_config_file,
    )

    flocker_client = make_client(reactor, cluster)
    existing_count = len(existing_nodes)
    yield wait_for_nodes(reactor, flocker_client, existing_count)
    if options['starting-index'] is None:
        options['starting-index'] = existing_count

    print(
        "Adding {} node(s) to the cluster of {} nodes "
        "starting at index {}".format(
            options['number-of-nodes'],
            existing_count,
            options['starting-index'],
        )
    )

    runner = options.runner
    cleanup_id = reactor.addSystemEventTrigger('before', 'shutdown',
                                               runner.stop_cluster, reactor)

    from flocker.common.script import eliot_logging_service
    log_writer = eliot_logging_service(
        destination=FileDestination(
            file=open("%s.log" % (base_path.basename(),), "a")
        ),
        reactor=reactor,
        capture_stdout=False)
    log_writer.startService()
    reactor.addSystemEventTrigger(
        'before', 'shutdown', log_writer.stopService)

    control_node = options['control-node']
    if options['distribution'] in ('centos-7',):
        remote_logs_file = open("remote_logs.log", "a")
        capture_journal(reactor, control_node, remote_logs_file)
    elif options['distribution'] in ('ubuntu-14.04', 'ubuntu-15.10'):
        remote_logs_file = open("remote_logs.log", "a")
        capture_upstart(reactor, control_node, remote_logs_file)

    yield runner.ensure_keys(reactor)

    deferreds = runner.extend_cluster(
        reactor,
        cluster,
        options['number-of-nodes'],
        options['tag'],
        options['starting-index'],
    )
    results = yield DeferredList(deferreds)

    failed_count = 0
    for (success, value) in results:
        if not success:
            failed_count = failed_count + 1
    if failed_count:
        print "Failed to create {} nodes, see logs.".format(failed_count)

    yield wait_for_nodes(
        reactor,
        flocker_client,
        len(cluster.agent_nodes),
    )

    save_managed_config(options['cert-directory'], options['config'], cluster)
    save_environment(options['cert-directory'], cluster)
    reactor.removeSystemEventTrigger(cleanup_id)
예제 #31
0
def main(reactor, args, base_path, top_level):
    """
    :param reactor: Reactor to use.
    :param list args: The arguments passed to the script.
    :param FilePath base_path: The executable being run.
    :param FilePath top_level: The top-level of the Flocker repository.
    """
    options = RunOptions(top_level=top_level)

    add_destination(eliot_output)
    try:
        options.parseOptions(args)
    except UsageError as e:
        sys.stderr.write("%s: %s\n" % (base_path.basename(), e))
        raise SystemExit(1)

    runner = options.runner

    from flocker.common.script import eliot_logging_service
    log_writer = eliot_logging_service(destination=FileDestination(
        file=open("%s.log" % (base_path.basename(), ), "a")),
                                       reactor=reactor,
                                       capture_stdout=False)
    log_writer.startService()
    reactor.addSystemEventTrigger('before', 'shutdown', log_writer.stopService)

    cluster = None
    results = []
    try:
        yield runner.ensure_keys(reactor)
        cluster = yield runner.start_cluster(reactor)
        if options['distribution'] in ('centos-7', ):
            remote_logs_file = open("remote_logs.log", "a")
            for node in cluster.all_nodes:
                results.append(
                    capture_journal(reactor, node.address, remote_logs_file))
        elif options['distribution'] in ('ubuntu-14.04', 'ubuntu-15.10'):
            remote_logs_file = open("remote_logs.log", "a")
            for node in cluster.all_nodes:
                results.append(
                    capture_upstart(reactor, node.address, remote_logs_file))
        gather_deferreds(results)

        if options['apps-per-node'] > 0:
            config = _build_config(cluster, options['template'],
                                   options['apps-per-node'])
            yield _configure(reactor, cluster, config)

        result = 0

    except BaseException:
        result = 1
        raise
    finally:
        if options['no-keep'] or result == 1:
            runner.stop_cluster(reactor)
        else:
            if cluster is None:
                print("Didn't finish creating the cluster.")
                runner.stop_cluster(reactor)
            else:
                print("The following variables describe the cluster:")
                environment_variables = get_trial_environment(cluster)
                for environment_variable in environment_variables:
                    print("export {name}={value};".format(
                        name=environment_variable,
                        value=shell_quote(
                            environment_variables[environment_variable]),
                    ))
                print("Be sure to preserve the required files.")

    raise SystemExit(result)
예제 #32
0
 def onOpen(self):
     """
     WebSocket callback
     """
     eliot.add_destination(self._received_eliot_log)
예제 #33
0
def main(reactor, args, base_path, top_level):
    """
    :param reactor: Reactor to use.
    :param list args: The arguments passed to the script.
    :param FilePath base_path: The executable being run.
    :param FilePath top_level: The top-level of the flocker repository.
    """
    options = RunOptions(top_level=top_level)

    add_destination(eliot_output)
    try:
        options.parseOptions(args)
    except UsageError as e:
        sys.stderr.write("%s: %s\n" % (base_path.basename(), e))
        raise SystemExit(1)

    runner = options.runner

    from flocker.common.script import eliot_logging_service
    log_writer = eliot_logging_service(
        destination=FileDestination(
            file=open("%s.log" % (base_path.basename(),), "a")
        ),
        reactor=reactor,
        capture_stdout=False)
    log_writer.startService()
    reactor.addSystemEventTrigger(
        'before', 'shutdown', log_writer.stopService)

    cluster = None
    try:
        yield runner.ensure_keys(reactor)
        cluster = yield runner.start_cluster(reactor)

        if options['distribution'] in ('centos-7',):
            remote_logs_file = open("remote_logs.log", "a")
            for node in cluster.all_nodes:
                capture_journal(reactor, node.address, remote_logs_file)

        if not options["no-pull"]:
            yield perform(
                make_dispatcher(reactor),
                parallel([
                    run_remotely(
                        username='******',
                        address=node.address,
                        commands=task_pull_docker_images()
                    ) for node in cluster.agent_nodes
                ]),
            )

        result = yield run_tests(
            reactor=reactor,
            cluster=cluster,
            trial_args=options['trial-args'])
    except:
        result = 1
        raise
    finally:
        # Unless the tests failed, and the user asked to keep the nodes, we
        # delete them.
        if not options['keep']:
            runner.stop_cluster(reactor)
        else:
            print "--keep specified, not destroying nodes."
            if cluster is None:
                print ("Didn't finish creating the cluster.")
            else:
                print ("To run acceptance tests against these nodes, "
                       "set the following environment variables: ")

                environment_variables = get_trial_environment(cluster)

                for environment_variable in environment_variables:
                    print "export {name}={value};".format(
                        name=environment_variable,
                        value=shell_quote(
                            environment_variables[environment_variable]),
                    )

    raise SystemExit(result)
예제 #34
0
def main(reactor, args, base_path, top_level):
    """
    :param reactor: Reactor to use.
    :param list args: The arguments passed to the script.
    :param FilePath base_path: The executable being run.
    :param FilePath top_level: The top-level of the flocker repository.
    """
    options = RunOptions(top_level=top_level)

    add_destination(eliot_output)
    try:
        options.parseOptions(args)
    except UsageError as e:
        sys.stderr.write("%s: %s\n" % (base_path.basename(), e))
        raise SystemExit(1)

    runner = options.runner

    try:
        nodes = yield runner.start_nodes(reactor)

        ca_directory = FilePath(mkdtemp())
        print("Generating certificates in: {}".format(ca_directory.path))
        certificates = Certificates.generate(ca_directory, nodes[0].address,
                                             len(nodes))

        yield perform(
            make_dispatcher(reactor),
            parallel([
                run_remotely(
                    username='******',
                    address=node.address,
                    commands=task_pull_docker_images()
                ) for node in nodes
            ]),
        )

        control_node = nodes[0]
        dataset_backend = options.dataset_backend

        yield perform(
            make_dispatcher(reactor),
            configure_cluster(control_node=control_node, agent_nodes=nodes,
                              certificates=certificates,
                              dataset_backend=dataset_backend))

        result = yield run_tests(
            reactor=reactor,
            nodes=nodes,
            control_node=control_node,
            agent_nodes=nodes,
            dataset_backend=dataset_backend,
            trial_args=options['trial-args'],
            certificates_path=ca_directory)
    except:
        result = 1
        raise
    finally:
        # Unless the tests failed, and the user asked to keep the nodes, we
        # delete them.
        if not (result != 0 and options['keep']):
            runner.stop_nodes(reactor)
        elif options['keep']:
            print "--keep specified, not destroying nodes."
            print ("To run acceptance tests against these nodes, "
                   "set the following environment variables: ")

            environment_variables = {
                'FLOCKER_ACCEPTANCE_NODES':
                    ':'.join(node.address for node in nodes),
                'FLOCKER_ACCEPTANCE_CONTROL_NODE': control_node.address,
                'FLOCKER_ACCEPTANCE_AGENT_NODES':
                    ':'.join(node.address for node in nodes),
                'FLOCKER_ACCEPTANCE_VOLUME_BACKEND': dataset_backend.name,
                'FLOCKER_ACCEPTANCE_API_CERTIFICATES_PATH': ca_directory.path,
            }

            for environment_variable in environment_variables:
                print "export {name}={value};".format(
                    name=environment_variable,
                    value=environment_variables[environment_variable],
                )

    raise SystemExit(result)