Exemple #1
0
 def test_stdout(self):
     """
     A ``file:`` description with a path of ``-`` causes logs to be written to
     stdout.
     """
     reactor = object()
     self.assertThat(
         _parse_destination_description("file:-")(reactor),
         Equals(FileDestination(stdout)),
     )
 def _parse_file(self, kind, args):
     if args == "-":
         get_file = lambda: stdout
     else:
         path = FilePath(args)
         get_file = lambda: LogFile(
             path.basename(),
             path.dirname(),
             rotateLength=1024 * 1024 * 1024,
             maxRotatedFiles=10,
         )
     return lambda reactor: FileDestination(get_file())
Exemple #3
0
def main(reactor):
    print("Logging to example-eliot.log...")
    logWriter = ThreadedWriter(
        FileDestination(file=open("example-eliot.log", "ab")), reactor)

    # Manually start the service, which will add it as a
    # destination. Normally we'd register ThreadedWriter with the usual
    # Twisted Service/Application infrastructure.
    logWriter.startService()

    # Log a message:
    Message.log(value="hello", another=1)

    # Manually stop the service.
    done = logWriter.stopService()
    return done
    def main(reactor):
        logWriter = ThreadedWriter(
            FileDestination(file=open("emc_midrange_driver.log", "ab")),
            reactor)

        # Manually start the service, which will add it as a
        # destination. Normally we'd register ThreadedWriter with the usual
        # Twisted Service/Application infrastructure.
        logWriter.startService()

        parameter = {
            'cluster_id': uuid.uuid4(),
            'ip': '192.168.1.58',
            'user': '******',
            'password': '******',
            'storage_pools': 'Manila_Pool',
            'multipath': True,
            'proto': 'iSCSI',
            'host_ip': '192.168.21.237',
        }

        api = EMCVNXBlockAPI(**parameter)

        print api.list_volumes()

        volume = api.create_volume(uuid.uuid4(), 80530636800)

        print api.list_volumes()

        volume = api.attach_volume(volume.blockdevice_id, api.hostname)
        print api.list_volumes()

        device_path = api.get_device_path(volume.blockdevice_id)
        print device_path

        api.detach_volume(volume.blockdevice_id)

        api.destroy_volume(volume.blockdevice_id)

        # Manually stop the service.
        done = logWriter.stopService()
        return done
Exemple #5
0
 def postOptions(self):
     if self['journald']:
         destination = JournaldDestination()
     else:
         if self['logfile'] is None:
             logfile = self._sys_module.stdout
         else:
             logfilepath = FilePath(self['logfile'])
             logfilepath_directory = logfilepath.parent()
             if not logfilepath_directory.exists():
                 logfilepath_directory.makedirs()
             # A twisted.python.logfile which has write and flush methods
             # but which also rotates the log file.
             logfile = LogFile.fromFullPath(
                 logfilepath.path,
                 rotateLength=LOGFILE_LENGTH,
                 maxRotatedFiles=LOGFILE_COUNT,
             )
         destination = FileDestination(file=logfile)
     self.eliot_destination = destination
     original_postOptions(self)
Exemple #6
0
    def _parse_file(self, kind, arg_text):
        # Reserve the possibility of an escape character in the future.  \ is
        # the standard choice but it's the path separator on Windows which
        # pretty much ruins it in this context.  Most other symbols already
        # have some shell-assigned meaning which makes them treacherous to use
        # in a CLI interface.  Eliminating all such dangerous symbols leaves
        # approximately @.
        if u"@" in arg_text:
            raise ValueError(
                u"Unsupported escape character (@) in destination text ({!r})."
                .format(arg_text), )
        arg_list = arg_text.split(u",")
        path_name = arg_list.pop(0)
        if path_name == "-":
            get_file = lambda: stdout
        else:
            path = FilePath(path_name)
            rotate_length = int(
                self._get_arg(
                    u"rotate_length",
                    1024 * 1024 * 1024,
                    arg_list,
                ))
            max_rotated_files = int(
                self._get_arg(
                    u"max_rotated_files",
                    10,
                    arg_list,
                ))

            def get_file():
                path.parent().makedirs(ignoreExistingDirectory=True)
                return LogFile(
                    path.basename(),
                    path.dirname(),
                    rotateLength=rotate_length,
                    maxRotatedFiles=max_rotated_files,
                )

        return lambda reactor: FileDestination(get_file())
Exemple #7
0
def main(reactor, args, base_path, top_level):
    """
    :param reactor: Reactor to use.
    :param list args: The arguments passed to the script.
    :param FilePath base_path: The executable being run.
    :param FilePath top_level: The top-level of the Flocker repository.
    """
    configure_eliot_logging_for_acceptance()
    options = RunOptions(top_level=top_level)
    try:
        options.parseOptions(args)
    except UsageError as e:
        sys.stderr.write("%s: %s\n" % (base_path.basename(), e))
        sys.stderr.write("\n")
        sys.stderr.write(str(options))
        raise SystemExit(1)

    # Existing nodes must be described in a managed section
    # of the configuration.
    existing_nodes = make_managed_nodes(
        options['config']['managed']['addresses'],
        options['distribution'],
    )
    # The following code assumes that one of the managed nodes
    # is both a control node and an agent node.
    [control_node] = [
        node for node in existing_nodes
        if node.address == options['control-node']
    ]
    dataset_backend_config_file = save_backend_configuration(
        options.dataset_backend(),
        options.dataset_backend_configuration(),
    )
    cluster = Cluster(
        all_nodes=list(existing_nodes),
        control_node=control_node,
        agent_nodes=list(existing_nodes),
        dataset_backend=options.dataset_backend(),
        default_volume_size=get_default_volume_size(
            options.dataset_backend_configuration()),
        certificates=Certificates(options['cert-directory']),
        dataset_backend_config_file=dataset_backend_config_file,
    )

    flocker_client = make_client(reactor, cluster)
    existing_count = len(existing_nodes)
    yield wait_for_nodes(reactor, flocker_client, existing_count)
    if options['starting-index'] is None:
        options['starting-index'] = existing_count

    print(
        "Adding {} node(s) to the cluster of {} nodes "
        "starting at index {}".format(
            options['number-of-nodes'],
            existing_count,
            options['starting-index'],
        ))

    runner = options.runner
    cleanup_id = reactor.addSystemEventTrigger('before', 'shutdown',
                                               runner.stop_cluster, reactor)

    from flocker.common.script import eliot_logging_service
    log_writer = eliot_logging_service(destination=FileDestination(
        file=open("%s.log" % (base_path.basename(), ), "a")),
                                       reactor=reactor,
                                       capture_stdout=False)
    log_writer.startService()
    reactor.addSystemEventTrigger('before', 'shutdown', log_writer.stopService)

    control_node = options['control-node']
    if options['distribution'] in ('centos-7', ):
        remote_logs_file = open("remote_logs.log", "a")
        capture_journal(reactor, control_node, remote_logs_file)
    elif options['distribution'] in ('ubuntu-14.04', 'ubuntu-15.10'):
        remote_logs_file = open("remote_logs.log", "a")
        capture_upstart(reactor, control_node, remote_logs_file)

    yield runner.ensure_keys(reactor)

    deferreds = runner.extend_cluster(
        reactor,
        cluster,
        options['number-of-nodes'],
        options['tag'],
        options['starting-index'],
    )
    results = yield DeferredList(deferreds)

    failed_count = 0
    for (success, _) in results:
        if not success:
            failed_count += 1
    if failed_count:
        print "Failed to create {} nodes, see logs.".format(failed_count)

    yield wait_for_nodes(
        reactor,
        flocker_client,
        len(cluster.agent_nodes),
    )

    save_managed_config(options['cert-directory'], options['config'], cluster)
    save_environment(options['cert-directory'], cluster,
                     options.package_source())
    reactor.removeSystemEventTrigger(cleanup_id)
Exemple #8
0
def main(reactor, args, base_path, top_level):
    """
    :param reactor: Reactor to use.
    :param list args: The arguments passed to the script.
    :param FilePath base_path: The executable being run.
    :param FilePath top_level: The top-level of the flocker repository.
    """
    options = RunOptions(top_level=top_level)

    add_destination(eliot_output)
    try:
        options.parseOptions(args)
    except UsageError as e:
        sys.stderr.write("%s: %s\n" % (base_path.basename(), e))
        raise SystemExit(1)

    runner = options.runner

    from flocker.common.script import eliot_logging_service
    log_writer = eliot_logging_service(
        destination=FileDestination(
            file=open("%s.log" % (base_path.basename(),), "a")
        ),
        reactor=reactor,
        capture_stdout=False)
    log_writer.startService()
    reactor.addSystemEventTrigger(
        'before', 'shutdown', log_writer.stopService)

    cluster = None
    results = []

    setup_succeeded = False
    reached_finally = False

    def cluster_cleanup():
        if not reached_finally:
            print "interrupted..."
        print "stopping cluster"
        return runner.stop_cluster(reactor)

    cleanup_trigger_id = reactor.addSystemEventTrigger('before', 'shutdown',
                                                       cluster_cleanup)

    try:
        yield runner.ensure_keys(reactor)
        cluster = yield runner.start_cluster(reactor)
        if options['distribution'] in ('centos-7',):
            remote_logs_file = open("remote_logs.log", "a")
            for node in cluster.all_nodes:
                results.append(capture_journal(reactor,
                                               node.address,
                                               remote_logs_file)
                               )
        elif options['distribution'] in ('ubuntu-14.04',):
            remote_logs_file = open("remote_logs.log", "a")
            for node in cluster.all_nodes:
                results.append(capture_upstart(reactor,
                                               node.address,
                                               remote_logs_file)
                               )
        gather_deferreds(results)

        if not options["no-pull"]:
            yield perform(
                make_dispatcher(reactor),
                parallel([
                    run_remotely(
                        username='******',
                        address=node.address,
                        commands=task_pull_docker_images()
                    ) for node in cluster.agent_nodes
                ]),
            )

        setup_succeeded = True
        result = yield run_tests(
            reactor=reactor,
            cluster=cluster,
            trial_args=options['trial-args'])

    finally:
        reached_finally = True
        # We delete the nodes if the user hasn't asked to keep them
        # or if we failed to provision the cluster.
        if not setup_succeeded:
            print "cluster provisioning failed"
        elif not options['keep']:
            print "not keeping cluster"
        else:
            print "--keep specified, not destroying nodes."
            print ("To run acceptance tests against these nodes, "
                   "set the following environment variables: ")

            environment_variables = get_trial_environment(cluster)

            for environment_variable in environment_variables:
                print "export {name}={value};".format(
                    name=environment_variable,
                    value=shell_quote(
                        environment_variables[environment_variable]),
                )
            reactor.removeSystemEventTrigger(cleanup_trigger_id)

    raise SystemExit(result)
Exemple #9
0
def main(reactor, args, base_path, top_level):
    """
    :param reactor: Reactor to use.
    :param list args: The arguments passed to the script.
    :param FilePath base_path: The executable being run.
    :param FilePath top_level: The top-level of the Flocker repository.
    """
    options = RunOptions(top_level=top_level)

    configure_eliot_logging_for_acceptance()
    try:
        options.parseOptions(args)
    except UsageError as e:
        sys.stderr.write("%s: %s\n" % (base_path.basename(), e))
        raise SystemExit(1)

    runner = options.runner

    def cluster_cleanup():
        print("stopping cluster")
        return runner.stop_cluster(reactor)

    cleanup_trigger_id = reactor.addSystemEventTrigger('before', 'shutdown',
                                                       cluster_cleanup)

    from flocker.common.script import eliot_logging_service
    log_writer = eliot_logging_service(destination=FileDestination(
        file=open("%s.log" % (base_path.basename(), ), "a")),
                                       reactor=reactor,
                                       capture_stdout=False)
    log_writer.startService()
    reactor.addSystemEventTrigger('before', 'shutdown', log_writer.stopService)

    yield runner.ensure_keys(reactor)
    cluster = yield runner.start_cluster(reactor)

    save_managed_config(options['cert-directory'], options['config'], cluster)
    managed_config_file = options['cert-directory'].child("managed.yaml")
    managed_config = create_managed_config(options['config'], cluster)
    managed_config_file.setContent(
        yaml.safe_dump(managed_config, default_flow_style=False))

    if options['distribution'] in ('centos-7', ):
        remote_logs_file = open("remote_logs.log", "a")
        for node in cluster.all_nodes:
            capture_journal(reactor, node.address,
                            remote_logs_file).addErrback(write_failure)
    elif options['distribution'] in ('ubuntu-14.04', ):
        remote_logs_file = open("remote_logs.log", "a")
        for node in cluster.all_nodes:
            capture_upstart(reactor, node.address,
                            remote_logs_file).addErrback(write_failure)

    flocker_client = make_client(reactor, cluster)
    yield wait_for_nodes(reactor, flocker_client, len(cluster.agent_nodes))

    if options['no-keep']:
        print("not keeping cluster")
    else:
        save_environment(options['cert-directory'], cluster,
                         options.package_source())
        reactor.removeSystemEventTrigger(cleanup_trigger_id)
Exemple #10
0
def main(reactor, args, base_path, top_level):
    """
    :param reactor: Reactor to use.
    :param list args: The arguments passed to the script.
    :param FilePath base_path: The executable being run.
    :param FilePath top_level: The top-level of the Flocker repository.
    """
    options = RunOptions(top_level=top_level)

    add_destination(eliot_output)
    try:
        options.parseOptions(args)
    except UsageError as e:
        sys.stderr.write("%s: %s\n" % (base_path.basename(), e))
        raise SystemExit(1)

    runner = options.runner

    from flocker.common.script import eliot_logging_service
    log_writer = eliot_logging_service(destination=FileDestination(
        file=open("%s.log" % (base_path.basename(), ), "a")),
                                       reactor=reactor,
                                       capture_stdout=False)
    log_writer.startService()
    reactor.addSystemEventTrigger('before', 'shutdown', log_writer.stopService)

    cluster = None
    results = []
    try:
        yield runner.ensure_keys(reactor)
        cluster = yield runner.start_cluster(reactor)
        if options['distribution'] in ('centos-7', ):
            remote_logs_file = open("remote_logs.log", "a")
            for node in cluster.all_nodes:
                results.append(
                    capture_journal(reactor, node.address, remote_logs_file))
        elif options['distribution'] in ('ubuntu-14.04', 'ubuntu-15.10'):
            remote_logs_file = open("remote_logs.log", "a")
            for node in cluster.all_nodes:
                results.append(
                    capture_upstart(reactor, node.address, remote_logs_file))
        gather_deferreds(results)

        if options['apps-per-node'] > 0:
            config = _build_config(cluster, options['template'],
                                   options['apps-per-node'])
            yield _configure(reactor, cluster, config)

        result = 0

    except BaseException:
        result = 1
        raise
    finally:
        if options['no-keep'] or result == 1:
            runner.stop_cluster(reactor)
        else:
            if cluster is None:
                print("Didn't finish creating the cluster.")
                runner.stop_cluster(reactor)
            else:
                print("The following variables describe the cluster:")
                environment_variables = get_trial_environment(cluster)
                for environment_variable in environment_variables:
                    print("export {name}={value};".format(
                        name=environment_variable,
                        value=shell_quote(
                            environment_variables[environment_variable]),
                    ))
                print("Be sure to preserve the required files.")

    raise SystemExit(result)
 def _parse_file(self, kind, args):
     if args == "-":
         get_file = lambda: stdout
     else:
         get_file = lambda: open(args, "a")
     return lambda reactor: FileDestination(get_file())
Exemple #12
0
def main(reactor, args, base_path, top_level):
    """
    :param reactor: Reactor to use.
    :param list args: The arguments passed to the script.
    :param FilePath base_path: The executable being run.
    :param FilePath top_level: The top-level of the Flocker repository.
    """
    options = RunOptions(top_level=top_level)

    add_destination(eliot_output)
    try:
        options.parseOptions(args)
    except UsageError as e:
        sys.stderr.write("%s: %s\n" % (base_path.basename(), e))
        raise SystemExit(1)

    runner = options.runner

    def cluster_cleanup():
        print("stopping cluster")
        return runner.stop_cluster(reactor)

    cleanup_trigger_id = reactor.addSystemEventTrigger('before', 'shutdown',
                                                       cluster_cleanup)

    from flocker.common.script import eliot_logging_service
    log_writer = eliot_logging_service(
        destination=FileDestination(
            file=open("%s.log" % (base_path.basename(),), "a")
        ),
        reactor=reactor,
        capture_stdout=False)
    log_writer.startService()
    reactor.addSystemEventTrigger(
        'before', 'shutdown', log_writer.stopService)

    yield runner.ensure_keys(reactor)
    cluster = yield runner.start_cluster(reactor)

    managed_config_file = options['cert-directory'].child("managed.yaml")
    managed_config = create_managed_config(options['config'], cluster)
    managed_config_file.setContent(
        yaml.safe_dump(managed_config, default_flow_style=False)
    )

    if options['distribution'] in ('centos-7',):
        remote_logs_file = open("remote_logs.log", "a")
        for node in cluster.all_nodes:
            capture_journal(reactor, node.address,
                            remote_logs_file).addErrback(write_failure)
    elif options['distribution'] in ('ubuntu-14.04', 'ubuntu-15.10'):
        remote_logs_file = open("remote_logs.log", "a")
        for node in cluster.all_nodes:
            capture_upstart(reactor, node.address,
                            remote_logs_file).addErrback(write_failure)

    flocker_client = _make_client(reactor, cluster)
    yield _wait_for_nodes(reactor, flocker_client, len(cluster.agent_nodes))

    if options['no-keep']:
        print("not keeping cluster")
    else:
        environment_variables = get_trial_environment(cluster)
        environment_strings = list()
        for environment_variable in environment_variables:
            environment_strings.append(
                "export {name}={value};\n".format(
                    name=environment_variable,
                    value=shell_quote(
                        environment_variables[environment_variable]
                    ),
                )
            )
        environment = ''.join(environment_strings)
        print("The following variables describe the cluster:")
        print(environment)
        env_file = options['cert-directory'].child("environment.env")
        env_file.setContent(environment)
        print("The variables are also saved in {}".format(
            env_file.path
        ))
        print("Be sure to preserve the required files.")

        reactor.removeSystemEventTrigger(cleanup_trigger_id)
Exemple #13
0
def main(reactor, args, base_path, top_level):
    """
    :param reactor: Reactor to use.
    :param list args: The arguments passed to the script.
    :param FilePath base_path: The executable being run.
    :param FilePath top_level: The top-level of the flocker repository.
    """
    options = RunOptions(top_level=top_level)

    add_destination(eliot_output)
    try:
        options.parseOptions(args)
    except UsageError as e:
        sys.stderr.write("%s: %s\n" % (base_path.basename(), e))
        raise SystemExit(1)

    runner = options.runner

    from flocker.common.script import eliot_logging_service
    log_writer = eliot_logging_service(
        destination=FileDestination(
            file=open("%s.log" % (base_path.basename(),), "a")
        ),
        reactor=reactor,
        capture_stdout=False)
    log_writer.startService()
    reactor.addSystemEventTrigger(
        'before', 'shutdown', log_writer.stopService)

    cluster = None
    try:
        yield runner.ensure_keys(reactor)
        cluster = yield runner.start_cluster(reactor)

        if options['distribution'] in ('centos-7',):
            remote_logs_file = open("remote_logs.log", "a")
            for node in cluster.all_nodes:
                capture_journal(reactor, node.address, remote_logs_file)

        if not options["no-pull"]:
            yield perform(
                make_dispatcher(reactor),
                parallel([
                    run_remotely(
                        username='******',
                        address=node.address,
                        commands=task_pull_docker_images()
                    ) for node in cluster.agent_nodes
                ]),
            )

        result = yield run_tests(
            reactor=reactor,
            cluster=cluster,
            trial_args=options['trial-args'])
    except:
        result = 1
        raise
    finally:
        # Unless the tests failed, and the user asked to keep the nodes, we
        # delete them.
        if not options['keep']:
            runner.stop_cluster(reactor)
        else:
            print "--keep specified, not destroying nodes."
            if cluster is None:
                print ("Didn't finish creating the cluster.")
            else:
                print ("To run acceptance tests against these nodes, "
                       "set the following environment variables: ")

                environment_variables = get_trial_environment(cluster)

                for environment_variable in environment_variables:
                    print "export {name}={value};".format(
                        name=environment_variable,
                        value=shell_quote(
                            environment_variables[environment_variable]),
                    )

    raise SystemExit(result)
 def to_fd(reactor):
     f = stdio_fds.get(fd)
     if f is None:
         f = os.fdopen(fd, "w")
     return FileDestination(f)