Example #1
0
def get_png(script):
    gnuplot = subprocess.Popen(['gnuplot'], stdin=subprocess.PIPE,
                               stdout=subprocess.PIPE, stderr=subprocess.PIPE)

    output = gnuplot.communicate(script)
    if output[1]:
        log.error('gnuplot', "Failed to generate the graph. "
                  "Error returned by gnuplot: \n%s", output[1])
    return output[0]
Example #2
0
File: locate.py Project: f3at/feat
 def body(connection):
     if len(args) < 1:
         log.error("script", "USAGE: locate.py <agent_id>")
         return
     agent_id = args[0]
     try:
         host = yield locate(connection, agent_id)
     except Exception as e:
         log.error("script", "ERROR: %r", e)
     log.info("script", "Agent runs at host: %r", host)
Example #3
0
def push_initial_data(connection):
    global _documents

    for doc in _documents:
        try:
            yield connection.save_document(doc)
        except ConflictError:
            log.error("script", "Document with id %s already exists!", doc.doc_id)

    design = view.generate_design_doc()
    yield connection.save_document(design)
Example #4
0
def unload(name):
    log.info("application", "Starting unloading application %r", name)
    r = get_application_registry()
    application = r.lookup(name)
    if not application:
        log.error("application", "Tried to unload application which is not " "loaded: %r", name)
        return
    try:
        application.unload()
    except Exception as e:
        error.handle_exception("application", e, "Problem while unloading " "application %r", name)
    log.info("application", "Unloading application %r complete", name)
Example #5
0
def get_png(script):
    gnuplot = subprocess.Popen(['gnuplot'],
                               stdin=subprocess.PIPE,
                               stdout=subprocess.PIPE,
                               stderr=subprocess.PIPE)

    output = gnuplot.communicate(script)
    if output[1]:
        log.error(
            'gnuplot', "Failed to generate the graph. "
            "Error returned by gnuplot: \n%s", output[1])
    return output[0]
Example #6
0
    def testDefaultLogging(self):
        keeper = DummyLogKeeper()
        log.set_default(keeper)

        log.log("foo", "1")
        log.debug("bar", "2", 42)
        log.info("spam", "3")
        log.warning("bacon", "4", 2, 3, 5)
        log.error("eggs", "4")

        self.assertEqual(keeper.entries,
                         [(LogLevel.log, None, 'foo', '1', (), 1),
                          (LogLevel.debug, None, 'bar', '2', (42, ), 1),
                          (LogLevel.info, None, 'spam', '3', (), 1),
                          (LogLevel.warning, None, 'bacon', '4', (2, 3, 5), 1),
                          (LogLevel.error, None, 'eggs', '4', (), 1)])
Example #7
0
def push_initial_data(connection, overwrite=False):
    documents = applications.get_initial_data_registry().itervalues()
    for doc in documents:
        try:
            yield connection.save_document(doc)
        except ConflictError:
            if not overwrite:
                log.error('script', 'Document with id %s already exists!',
                          doc.doc_id)
            else:

                yield _update_old(connection, doc)

    design_docs = view.generate_design_docs()
    for design_doc in design_docs:
        try:
            yield connection.save_document(design_doc)
        except ConflictError:
            yield _update_old(connection, design_doc)
Example #8
0
File: tools.py Project: f3at/feat
def migration_script(connection):
    log.info("script", "Running the migration script.")
    index = yield connection.query_view(view.DocumentByType,
                                        group_level=2, parse_result=False)
    try:
        for (type_name, version), count in index:
            restorator = serialization.lookup(type_name)
            if not restorator:
                log.error(
                    'script', "Failed to lookup the restorator for the "
                    "type name: %s. There is %d objects like this in the"
                    " database. They will not be migrated.", type_name,
                    count)

            if (IVersionAdapter.providedBy(restorator) and
                ((version is None and restorator.version > 1) or
                 (version is not None and version < restorator.version))):
                log.info('script', "I will migrate %d documents of the "
                          "type: %s from version %s to %d", count,
                          type_name, version, restorator.version)

                migrated = 0
                while migrated < count:
                    fetched = yield connection.query_view(
                        view.DocumentByType,
                        key=(type_name, version),
                        limit=15,
                        reduce=False,
                        include_docs=True)

                    migrated += len(fetched)
                    if not fetched:
                        break
                log.info("script", "Migrated %d documents of the type %s "
                         "from %s version to %s", migrated, type_name,
                         version, restorator.version)

    except Exception:
        error.handle_exception("script", None,
                               "Failed running migration script")
        raise
Example #9
0
File: driver.py Project: f3at/feat
def parse_response(response, tag):
    if response.status < 300:
        if (response.headers.get('content-type') == 'application/json'):
            try:
                return json.loads(response.body)
            except ValueError:
                log.error('couchdb',
                    "Could not parse json data from couchdb. Data: %r",
                    response.body)
                return failure.Failure(DatabaseError("Json parse error"))
        else:
            return response.body
    else:
        msg = ("%s gave %s status with body: %s"
               % (tag, response.status.name, response.body))
        if response.status == http.Status.NOT_FOUND:
            return failure.Failure(NotFoundError(msg))
        elif response.status == http.Status.CONFLICT:
            return failure.Failure(ConflictError(msg))
        else:
            return failure.Failure(DatabaseError(msg))
Example #10
0
    def testDefaultLogging(self):
        keeper = DummyLogKeeper()
        current = log.get_default()
        log.set_default(keeper)
        self.addCleanup(log.set_default, current)

        log.log("foo", "1")
        log.debug("bar", "2", 42)
        log.info("spam", "3")
        log.warning("bacon", "4", 2, 3, 5)
        log.error("eggs", "4")

        self.assertEqual(
            keeper.entries,
            [
                (LogLevel.log, None, "foo", "1", (), 1),
                (LogLevel.debug, None, "bar", "2", (42,), 1),
                (LogLevel.info, None, "spam", "3", (), 1),
                (LogLevel.warning, None, "bacon", "4", (2, 3, 5), 1),
                (LogLevel.error, None, "eggs", "4", (), 1),
            ],
        )
Example #11
0
def clean_all_descriptors(connection, dry_run=False):
    rows = yield connection.query_view(view.DocumentByType,
                                       group_level=1, parse_results=False)
    to_delete = list()
    for row in rows:
        type_name = row[0][0]
        restorator = serialization.lookup(type_name)
        if not restorator:
            log.info('cleanup',
                     'Could not lookup restorator for type name: %s. '
                     'There is %s documents of this type.',
                     type_name, row[1])
            continue
        if IDescriptor.implementedBy(restorator):
            log.info('cleanup',
                     'I will delete %s documents of type name: %s',
                     row[1], type_name)
            to_delete.append(type_name)

    if dry_run:
        log.info("cleanup",
                 "Not deleting anything, this is just a dry run.")
        return

    for type_name in to_delete:
        keys = view.DocumentByType.fetch(type_name)
        keys['include_docs'] = False
        rows = yield connection.query_view(
            view.DocumentByType, parse_results=False, **keys)

        for (key, value, doc_id) in rows:
            try:
                yield connection.update_document(doc_id, update.delete)
            except Exception as e:
                log.error("cleanup",
                          "Cannot delete the documents of type %s with ID: %s."
                          " Reason: %s", type_name, doc_id, e)
Example #12
0
def bootstrap(parser=None, args=None, descriptors=None):
    """Bootstrap a feat process, handling command line arguments.
    @param parser: the option parser to use; more options will be
        added to the parser; if not specified or None
        a new one will be created
    @type  parser: optparse.OptionParser or None
    @param args: the command line arguments to parse; if not specified
        or None, sys.argv[1:] will be used
    @type  args: [str()] or None
    @param descriptors: the descriptors of the agent to starts in addition
        of the host agent; if not specified or None
        no additional agents will be started
    @type  descriptors: [Descriptor()] or None

    @return: the deferred of the bootstrap chain
    @rtype:  defer.Deferred()"""

    tee = log.init()
    # The purpose of having log buffer here, is to be able to dump the
    # log lines to a journal after establishing connection with it.
    # This is done in stage_configure() of net agency Startup procedure.
    tee.add_keeper('buffer', log.LogBuffer(limit=10000))

    # use the resolver from twisted.names instead of the default
    # the reason for this is that ThreadedResolver behaves strangely
    # after the reconnection - raises the DNSLookupError for names
    # which have been resolved while there was no connection
    resolver.installResolver(reactor)

    if parser is None:
        parser = optparse.OptionParser()
        options.add_options(parser)
    try:
        opts, args = check_options(*parser.parse_args(args))
    except Exception as e:
        error.handle_exception('bootstrap', e, "Failed parsing config")
        sys.exit(1)

    if opts.standalone:
        cls = standalone.Agency
    else:
        cls = net_agency.Agency
    config = config_module.Config()
    config.load(os.environ, opts)
    agency = cls(config)

    applications.load('feat.agents.application', 'feat')
    applications.load('feat.gateway.application', 'featmodels')

    d = defer.Deferred()
    reactor.callWhenRunning(d.callback, None)

    if not opts.standalone:
        # specific to running normal agency

        hostdef = opts.hostdef

        if opts.hostres or opts.hostcat or opts.hostports:
            from feat.agents.common import host
            hostdef = host.HostDef()
            for resdef in opts.hostres:
                parts = resdef.split(":", 1)
                name = parts[0]
                value = 1
                if len(parts) > 1:
                    try:
                        value = int(parts[1])
                    except ValueError:
                        raise OptionError(
                            "Invalid host resource: %s" % resdef), \
                            None, sys.exc_info()[2]
                hostdef.resources[name] = value

            for catdef in opts.hostcat:
                name, value = check_category(catdef)
                hostdef.categories[name] = value

            if opts.hostports:
                hostdef.ports_ranges = dict()
            for ports in opts.hostports:
                group, start, stop = tuple(ports.split(":"))
                hostdef.ports_ranges[group] = (int(start), int(stop))

        agency.set_host_def(hostdef)

        d.addCallback(defer.drop_param, agency.initiate)
        for desc, kwargs, name in opts.agents:
            d.addCallback(defer.drop_param, agency.add_static_agent,
                          desc, kwargs, name)
    else:
        # standalone specific
        kwargs = opts.standalone_kwargs or dict()
        to_spawn = opts.agent_id or opts.agents[0][0]
        d.addCallback(defer.drop_param, agency.initiate)
        d.addCallback(defer.drop_param, agency.spawn_agent,
                      to_spawn, **kwargs)
    queue = None
    if opts.agency_daemonize:
        import multiprocessing
        queue = multiprocessing.Queue()

    d.addCallbacks(_bootstrap_success, _bootstrap_failure,
                   callbackArgs=(queue, ), errbackArgs=(agency, queue))

    if not opts.agency_daemonize:
        reactor.run()
    else:
        logname = "%s.%s.log" % ('feat', agency.agency_id)
        logfile = os.path.join(config.agency.logdir, logname)
        log.info("bootstrap", "Daemon processs will be logging to %s",
                 logfile)

        try:
            pid = os.fork()
        except OSError, e:
            sys.stderr.write("Failed to fork: (%d) %s\n" %
                             (e.errno, e.strerror))
            os._exit(1)

        if pid > 0:
            # original process waits for information about what status code
            # to use on exit
            log.info('bootstrap',
                     "Waiting for deamon process to intialize the agency")
            try:
                exit_code, reason = queue.get(timeout=20)
            except multiprocessing.queues.Empty:
                log.error('bootstrap',
                          "20 seconds timeout expires waiting for agency"
                          " in child process to initiate.")
                os._exit(1)
            else:
                log.info('bootstrap', "Process exiting with %d status",
                         exit_code)
                if exit_code:
                    log.info('bootstrap', 'Reason for failure: %s', reason)
                sys.exit(exit_code)
        else:
            # child process performs second fork
            try:
                pid = os.fork()
            except OSError, e:
                sys.stderr.write("Failed to fork: (%d) %s\n" %
                                 (e.errno, e.strerror))
                os._exit(1)
            if pid > 0:
                # child process just exits
                sys.exit(0)
            else:
                # grandchild runs the reactor and logs to an external log file
                log.FluLogKeeper.redirect_to(logfile, logfile)
                reactor.run()