Пример #1
0
def main():

    debug = int(os.environ["GNT_DEBUG"])

    logname = pathutils.GetLogFilename("jobs")
    utils.SetupLogging(logname, "job-startup", debug=debug)

    (job_id, livelock_name) = _GetMasterInfo()

    utils.SetupLogging(logname, "job-%s" % (job_id, ), debug=debug)

    exit_code = 1
    try:
        logging.debug("Preparing the context and the configuration")
        context = masterd.GanetiContext(livelock_name)

        logging.debug("Registering a SIGTERM handler")

        cancel = [False]

        def _TermHandler(signum, _frame):
            logging.info("Killed by signal %d", signum)
            cancel[0] = True

        signal.signal(signal.SIGTERM, _TermHandler)

        logging.debug("Picking up job %d", job_id)
        context.jobqueue.PickupJob(job_id)

        # waiting for the job to finish
        time.sleep(1)
        while not context.jobqueue.HasJobBeenFinalized(job_id):
            if cancel[0]:
                logging.debug("Got cancel request, cancelling job %d", job_id)
                r = context.jobqueue.CancelJob(job_id)
                logging.debug("CancelJob result for job %d: %s", job_id, r)
                cancel[0] = False
            time.sleep(1)

        # wait until the queue finishes
        logging.debug("Waiting for the queue to finish")
        while context.jobqueue.PrepareShutdown():
            time.sleep(1)
        logging.debug("Shutting the queue down")
        context.jobqueue.Shutdown()
        exit_code = 0
    except Exception:  # pylint: disable=W0703
        logging.exception("Exception when trying to run job %d", job_id)
    finally:
        logging.debug("Job %d finalized", job_id)
        logging.debug("Removing livelock file %s", livelock_name.GetPath())
        os.remove(livelock_name.GetPath())

    sys.exit(exit_code)
Пример #2
0
def main():

    debug = int(os.environ["GNT_DEBUG"])

    logname = pathutils.GetLogFilename("jobs")
    utils.SetupLogging(logname, "job-post-hooks-startup", debug=debug)
    job_id = _GetMasterInfo()
    utils.SetupLogging(logname, "job-%s-post-hooks" % (job_id, ), debug=debug)

    try:
        job = JobQueue.SafeLoadJobFromDisk(None,
                                           job_id,
                                           try_archived=False,
                                           writable=False)
        assert job.id == job_id, "The job id received %d differs " % job_id + \
          "from the serialized one %d" % job.id

        target_op = None
        for op in job.ops:
            if op.start_timestamp is None:
                break
            target_op = op

        # We should run post hooks only if opcode execution has been started.
        # Note that currently the opcodes inside a job execute sequentially.
        if target_op is None:
            sys.exit(0)

        livelock_name = livelock.LiveLockName("post-hooks-executor-%d" %
                                              job_id)
        context = masterd.GanetiContext(livelock_name)
        cfg_tmp = context.GetConfig(job_id)
        # Get static snapshot of the config and release it in order to prevent
        # further synchronizations.
        cfg = cfg_tmp.GetDetachedConfig()
        cfg_tmp.OutDate()

        hooksmaster.ExecGlobalPostHooks(
            target_op.input.OP_ID, cfg.GetMasterNodeName(),
            context.GetRpc(cfg).call_hooks_runner, logging.warning,
            cfg.GetClusterName(), cfg.GetMasterNode(), job_id,
            constants.POST_HOOKS_STATUS_DISAPPEARED)
    except Exception:  # pylint: disable=W0703
        logging.exception("Exception when trying to run post hooks of job %d",
                          job_id)
    finally:
        logging.debug("Post hooks exec for disappeared job %d finalized",
                      job_id)
        logging.debug("Removing livelock file %s", livelock_name.GetPath())
        os.remove(livelock_name.GetPath())

    sys.exit(0)
Пример #3
0
    def testReopen(self):
        logfile = utils.PathJoin(self.tmpdir, "reopen.log")
        logfile2 = utils.PathJoin(self.tmpdir, "reopen.log.OLD")
        logger = logging.Logger("TestLogger")
        reopen_fn = utils.SetupLogging(logfile,
                                       "test",
                                       console_logging=False,
                                       syslog=constants.SYSLOG_NO,
                                       stderr_logging=False,
                                       multithreaded=False,
                                       root_logger=logger)
        self.assertTrue(callable(reopen_fn))

        self.assertEqual(utils.ReadFile(logfile), "")
        logger.error("This is a test")
        self.assertTrue(utils.ReadFile(logfile).endswith("This is a test\n"))

        os.rename(logfile, logfile2)
        assert not os.path.exists(logfile)

        # Notify logger to reopen on the next message
        reopen_fn()
        assert not os.path.exists(logfile)

        # Provoke actual reopen
        logger.error("First message")

        self.assertTrue(utils.ReadFile(logfile).endswith("First message\n"))
        self.assertTrue(utils.ReadFile(logfile2).endswith("This is a test\n"))
Пример #4
0
def Main():
    """Main function.

  """
    (options, _) = ParseOptions()

    utils.SetupLogging(pathutils.LOG_WATCHER,
                       sys.argv[0],
                       debug=options.debug,
                       stderr_logging=options.debug)

    if ShouldPause() and not options.ignore_pause:
        logging.debug("Pause has been set, exiting")
        return constants.EXIT_SUCCESS

    # Try to acquire global watcher lock in shared mode.
    # In case we are in the global watcher process, this lock will be held by all
    # children processes (one for each nodegroup) and will only be released when
    # all of them have finished running.
    lock = utils.FileLock.Open(pathutils.WATCHER_LOCK_FILE)
    try:
        lock.Shared(blocking=False)
    except (EnvironmentError, errors.LockError), err:
        logging.error("Can't acquire lock on %s: %s",
                      pathutils.WATCHER_LOCK_FILE, err)
        return constants.EXIT_SUCCESS
Пример #5
0
def Main():
  """Main function.

  """
  utils.SetupLogging(pathutils.LOG_BURNIN, sys.argv[0],
                     debug=False, stderr_logging=True)

  return Burner().BurninCluster()
Пример #6
0
def Main():
    """Main function.

  """
    (options, _) = ParseOptions()

    utils.SetupLogging(pathutils.LOG_WATCHER,
                       sys.argv[0],
                       debug=options.debug,
                       stderr_logging=options.debug)

    if ShouldPause() and not options.ignore_pause:
        logging.debug("Pause has been set, exiting")
        return constants.EXIT_SUCCESS

    # Try to acquire global watcher lock in shared mode.
    # In case we are in the global watcher process, this lock will be held by all
    # children processes (one for each nodegroup) and will only be released when
    # all of them have finished running.
    lock = utils.FileLock.Open(pathutils.WATCHER_LOCK_FILE)
    try:
        lock.Shared(blocking=False)
    except (EnvironmentError, errors.LockError) as err:
        logging.error("Can't acquire lock on %s: %s",
                      pathutils.WATCHER_LOCK_FILE, err)
        return constants.EXIT_SUCCESS
    if options.nodegroup is None:
        fn = _GlobalWatcher
    else:
        # Per-nodegroup watcher
        fn = _GroupWatcher

    try:
        return fn(options)
    except (SystemExit, KeyboardInterrupt):
        raise
    except NotMasterError:
        logging.debug("Not master, exiting")
        return constants.EXIT_NOTMASTER
    except errors.ResolverError as err:
        logging.error("Cannot resolve hostname '%s', exiting", err.args[0])
        return constants.EXIT_NODESETUP_ERROR
    except errors.JobQueueFull:
        logging.error("Job queue is full, can't query cluster state")
    except errors.JobQueueDrainError:
        logging.error("Job queue is drained, can't maintain cluster state")
    except Exception as err:  # pylint: disable=W0703
        logging.exception(str(err))
        return constants.EXIT_FAILURE

    return constants.EXIT_SUCCESS
  def testSimple(self):
    logfile = utils.PathJoin(self.tmpdir, "basic.log")
    logger = logging.Logger("TestLogger")
    self.assertTrue(callable(utils.SetupLogging(logfile, "test",
                                                console_logging=False,
                                                syslog=constants.SYSLOG_NO,
                                                stderr_logging=False,
                                                multithreaded=False,
                                                root_logger=logger)))
    self.assertEqual(utils.ReadFile(logfile), "")
    logger.error("This is a test")

    # Ensure SetupLogging used custom logger
    logging.error("This message should not show up in the test log file")

    self.assertTrue(utils.ReadFile(logfile).endswith("This is a test\n"))
Пример #8
0
def Main():
  """Main function.

  """
  (options, _) = ParseOptions()

  utils.SetupLogging(pathutils.LOG_WATCHER, sys.argv[0],
                     debug=options.debug, stderr_logging=options.debug)

  if ShouldPause() and not options.ignore_pause:
    logging.debug("Pause has been set, exiting")
    return constants.EXIT_SUCCESS

  # Try to acquire global watcher lock in shared mode
  lock = utils.FileLock.Open(pathutils.WATCHER_LOCK_FILE)
  try:
    lock.Shared(blocking=False)
  except (EnvironmentError, errors.LockError), err:
    logging.error("Can't acquire lock on %s: %s",
                  pathutils.WATCHER_LOCK_FILE, err)
    return constants.EXIT_SUCCESS
Пример #9
0
def main():

  debug = int(os.environ["GNT_DEBUG"])

  logname = pathutils.GetLogFilename("jobs")
  utils.SetupLogging(logname, "job-startup", debug=debug)

  (job_id, llock, secret_params_serialized) = _SetupJob()

  secret_params = ""
  if secret_params_serialized:
    secret_params_json = serializer.LoadJson(secret_params_serialized)
    secret_params = RestorePrivateValueWrapping(secret_params_json)

  utils.SetupLogging(logname, "job-%s" % (job_id,), debug=debug)

  try:
    logging.debug("Preparing the context and the configuration")
    context = masterd.GanetiContext(llock)

    logging.debug("Registering signal handlers")

    cancel = [False]
    prio_change = [False]

    def _TermHandler(signum, _frame):
      logging.info("Killed by signal %d", signum)
      cancel[0] = True
    signal.signal(signal.SIGTERM, _TermHandler)

    def _HupHandler(signum, _frame):
      logging.debug("Received signal %d, old flag was %s, will set to True",
                    signum, mcpu.sighupReceived)
      mcpu.sighupReceived[0] = True
    signal.signal(signal.SIGHUP, _HupHandler)

    def _User1Handler(signum, _frame):
      logging.info("Received signal %d, indicating priority change", signum)
      prio_change[0] = True
    signal.signal(signal.SIGUSR1, _User1Handler)

    job = context.jobqueue.SafeLoadJobFromDisk(job_id, False)

    job.SetPid(os.getpid())

    if secret_params:
      for i in range(0, len(secret_params)):
        if hasattr(job.ops[i].input, "osparams_secret"):
          job.ops[i].input.osparams_secret = secret_params[i]

    execfun = mcpu.Processor(context, job_id, job_id).ExecOpCode
    proc = _JobProcessor(context.jobqueue, execfun, job)
    result = _JobProcessor.DEFER
    while result != _JobProcessor.FINISHED:
      result = proc()
      if result == _JobProcessor.WAITDEP and not cancel[0]:
        # Normally, the scheduler should avoid starting a job where the
        # dependencies are not yet finalised. So warn, but wait an continue.
        logging.warning("Got started despite a dependency not yet finished")
        time.sleep(5)
      if cancel[0]:
        logging.debug("Got cancel request, cancelling job %d", job_id)
        r = context.jobqueue.CancelJob(job_id)
        job = context.jobqueue.SafeLoadJobFromDisk(job_id, False)
        proc = _JobProcessor(context.jobqueue, execfun, job)
        logging.debug("CancelJob result for job %d: %s", job_id, r)
        cancel[0] = False
      if prio_change[0]:
        logging.debug("Received priority-change request")
        try:
          fname = os.path.join(pathutils.LUXID_MESSAGE_DIR, "%d.prio" % job_id)
          new_prio = int(utils.ReadFile(fname))
          utils.RemoveFile(fname)
          logging.debug("Changing priority of job %d to %d", job_id, new_prio)
          r = context.jobqueue.ChangeJobPriority(job_id, new_prio)
          job = context.jobqueue.SafeLoadJobFromDisk(job_id, False)
          proc = _JobProcessor(context.jobqueue, execfun, job)
          logging.debug("Result of changing priority of %d to %d: %s", job_id,
                        new_prio, r)
        except Exception: # pylint: disable=W0703
          logging.warning("Informed of priority change, but could not"
                          " read new priority")
        prio_change[0] = False

  except Exception: # pylint: disable=W0703
    logging.exception("Exception when trying to run job %d", job_id)
  finally:
    logging.debug("Job %d finalized", job_id)
    logging.debug("Removing livelock file %s", llock.GetPath())
    os.remove(llock.GetPath())

  sys.exit(0)
Пример #10
0
def GenericMain(daemon_name,
                optionparser,
                check_fn,
                prepare_fn,
                exec_fn,
                multithreaded=False,
                console_logging=False,
                default_ssl_cert=None,
                default_ssl_key=None,
                warn_breach=False):
    """Shared main function for daemons.

  @type daemon_name: string
  @param daemon_name: daemon name
  @type optionparser: optparse.OptionParser
  @param optionparser: initialized optionparser with daemon-specific options
                       (common -f -d options will be handled by this module)
  @type check_fn: function which accepts (options, args)
  @param check_fn: function that checks start conditions and exits if they're
                   not met
  @type prepare_fn: function which accepts (options, args)
  @param prepare_fn: function that is run before forking, or None;
      it's result will be passed as the third parameter to exec_fn, or
      if None was passed in, we will just pass None to exec_fn
  @type exec_fn: function which accepts (options, args, prepare_results)
  @param exec_fn: function that's executed with the daemon's pid file held, and
                  runs the daemon itself.
  @type multithreaded: bool
  @param multithreaded: Whether the daemon uses threads
  @type console_logging: boolean
  @param console_logging: if True, the daemon will fall back to the system
                          console if logging fails
  @type default_ssl_cert: string
  @param default_ssl_cert: Default SSL certificate path
  @type default_ssl_key: string
  @param default_ssl_key: Default SSL key path
  @type warn_breach: bool
  @param warn_breach: issue a warning at daemon launch time, before
      daemonizing, about the possibility of breaking parameter privacy
      invariants through the otherwise helpful debug logging.

  """
    optionparser.add_option("-f",
                            "--foreground",
                            dest="fork",
                            help="Don't detach from the current terminal",
                            default=True,
                            action="store_false")
    optionparser.add_option("-d",
                            "--debug",
                            dest="debug",
                            help="Enable some debug messages",
                            default=False,
                            action="store_true")
    optionparser.add_option("--syslog",
                            dest="syslog",
                            help="Enable logging to syslog (except debug"
                            " messages); one of 'no', 'yes' or 'only' [%s]" %
                            constants.SYSLOG_USAGE,
                            default=constants.SYSLOG_USAGE,
                            choices=["no", "yes", "only"])

    family = ssconf.SimpleStore().GetPrimaryIPFamily()
    # family will default to AF_INET if there is no ssconf file (e.g. when
    # upgrading a cluster from 2.2 -> 2.3. This is intended, as Ganeti clusters
    # <= 2.2 can not be AF_INET6
    if daemon_name in constants.DAEMONS_PORTS:
        default_bind_address = constants.IP4_ADDRESS_ANY
        if family == netutils.IP6Address.family:
            default_bind_address = constants.IP6_ADDRESS_ANY

        default_port = netutils.GetDaemonPort(daemon_name)

        # For networked daemons we allow choosing the port and bind address
        optionparser.add_option("-p",
                                "--port",
                                dest="port",
                                help="Network port (default: %s)" %
                                default_port,
                                default=default_port,
                                type="int")
        optionparser.add_option("-b",
                                "--bind",
                                dest="bind_address",
                                help=("Bind address (default: '%s')" %
                                      default_bind_address),
                                default=default_bind_address,
                                metavar="ADDRESS")
        optionparser.add_option("-i",
                                "--interface",
                                dest="bind_interface",
                                help=("Bind interface"),
                                metavar="INTERFACE")

    if default_ssl_key is not None and default_ssl_cert is not None:
        optionparser.add_option("--no-ssl",
                                dest="ssl",
                                help="Do not secure HTTP protocol with SSL",
                                default=True,
                                action="store_false")
        optionparser.add_option("-K",
                                "--ssl-key",
                                dest="ssl_key",
                                help=("SSL key path (default: %s)" %
                                      default_ssl_key),
                                default=default_ssl_key,
                                type="string",
                                metavar="SSL_KEY_PATH")
        optionparser.add_option("-C",
                                "--ssl-cert",
                                dest="ssl_cert",
                                help=("SSL certificate path (default: %s)" %
                                      default_ssl_cert),
                                default=default_ssl_cert,
                                type="string",
                                metavar="SSL_CERT_PATH")

    # Disable the use of fork(2) if the daemon uses threads
    if multithreaded:
        utils.DisableFork()

    options, args = optionparser.parse_args()

    if getattr(options, "bind_interface", None) is not None:
        if options.bind_address != default_bind_address:
            msg = (
                "Can't specify both, bind address (%s) and bind interface (%s)"
                % (options.bind_address, options.bind_interface))
            print(msg, file=sys.stderr)
            sys.exit(constants.EXIT_FAILURE)
        interface_ip_addresses = \
          netutils.GetInterfaceIpAddresses(options.bind_interface)
        if family == netutils.IP6Address.family:
            if_addresses = interface_ip_addresses[constants.IP6_VERSION]
        else:
            if_addresses = interface_ip_addresses[constants.IP4_VERSION]
        if len(if_addresses) < 1:
            msg = "Failed to find IP for interface %s" % options.bind_interace
            print(msg, file=sys.stderr)
            sys.exit(constants.EXIT_FAILURE)
        options.bind_address = if_addresses[0]

    if getattr(options, "ssl", False):
        ssl_paths = {
            "certificate": options.ssl_cert,
            "key": options.ssl_key,
        }

        for name, path in ssl_paths.items():
            if not os.path.isfile(path):
                print("SSL %s file '%s' was not found" % (name, path),
                      file=sys.stderr)
                sys.exit(constants.EXIT_FAILURE)

        # TODO: By initiating http.HttpSslParams here we would only read the files
        # once and have a proper validation (isfile returns False on directories)
        # at the same time.

    result, running_uid, expected_uid = _VerifyDaemonUser(daemon_name)
    if not result:
        msg = ("%s started using wrong user ID (%d), expected %d" %
               (daemon_name, running_uid, expected_uid))
        print(msg, file=sys.stderr)
        sys.exit(constants.EXIT_FAILURE)

    if check_fn is not None:
        check_fn(options, args)

    log_filename = constants.DAEMONS_LOGFILES[daemon_name]

    # node-daemon logging in lib/http/server.py, _HandleServerRequestInner
    if options.debug and warn_breach:
        sys.stderr.write(constants.DEBUG_MODE_CONFIDENTIALITY_WARNING %
                         daemon_name)

    if options.fork:
        # Newer GnuTLS versions (>= 3.3.0) use a library constructor for
        # initialization and open /dev/urandom on library load time, way before we
        # fork(). Closing /dev/urandom causes subsequent ganeti.http.client
        # requests to fail and the process to receive a SIGABRT. As we cannot
        # reliably detect GnuTLS's socket, we work our way around this by keeping
        # all fds referring to /dev/urandom open.
        noclose_fds = []
        for fd in os.listdir("/proc/self/fd"):
            try:
                if os.readlink(os.path.join("/proc/self/fd",
                                            fd)) == "/dev/urandom":
                    noclose_fds.append(int(fd))
            except EnvironmentError:
                # The fd might have disappeared (although it shouldn't as we're running
                # single-threaded).
                continue

        utils.CloseFDs(noclose_fds=noclose_fds)
        (wpipe, stdio_reopen_fn) = utils.Daemonize(logfile=log_filename)
    else:
        (wpipe, stdio_reopen_fn) = (None, None)

    log_reopen_fn = \
      utils.SetupLogging(log_filename, daemon_name,
                         debug=options.debug,
                         stderr_logging=not options.fork,
                         multithreaded=multithreaded,
                         syslog=options.syslog,
                         console_logging=console_logging)

    # Reopen log file(s) on SIGHUP
    signal.signal(
        signal.SIGHUP,
        compat.partial(_HandleSigHup, [log_reopen_fn, stdio_reopen_fn]))

    try:
        utils.WritePidFile(utils.DaemonPidFileName(daemon_name))
    except errors.PidFileLockError as err:
        print("Error while locking PID file:\n%s" % err, file=sys.stderr)
        sys.exit(constants.EXIT_FAILURE)

    try:
        try:
            logging.info("%s daemon startup", daemon_name)
            if callable(prepare_fn):
                prep_results = prepare_fn(options, args)
            else:
                prep_results = None
        except Exception as err:
            utils.WriteErrorToFD(wpipe, _BeautifyError(err))
            raise

        if wpipe is not None:
            # we're done with the preparation phase, we close the pipe to
            # let the parent know it's safe to exit
            os.close(wpipe)

        exec_fn(options, args, prep_results)
    finally:
        utils.RemoveFile(utils.DaemonPidFileName(daemon_name))
Пример #11
0
def main():

  debug = int(os.environ["GNT_DEBUG"])

  logname = pathutils.GetLogFilename("jobs")
  utils.SetupLogging(logname, "job-startup", debug=debug)

  (job_id, livelock_name) = _GetMasterInfo()

  utils.SetupLogging(logname, "job-%s" % (job_id,), debug=debug)

  exit_code = 1
  try:
    logging.debug("Preparing the context and the configuration")
    context = masterd.GanetiContext(livelock_name)

    logging.debug("Registering signal handlers")

    cancel = [False]
    prio_change = [False]

    def _TermHandler(signum, _frame):
      logging.info("Killed by signal %d", signum)
      cancel[0] = True
    signal.signal(signal.SIGTERM, _TermHandler)

    def _HupHandler(signum, _frame):
      logging.debug("Received signal %d, old flag was %s, will set to True",
                    signum, mcpu.sighupReceived)
      mcpu.sighupReceived[0] = True
    signal.signal(signal.SIGHUP, _HupHandler)

    def _User1Handler(signum, _frame):
      logging.info("Received signal %d, indicating priority change", signum)
      prio_change[0] = True
    signal.signal(signal.SIGUSR1, _User1Handler)

    logging.debug("Picking up job %d", job_id)
    context.jobqueue.PickupJob(job_id)

    # waiting for the job to finish
    time.sleep(1)
    while not context.jobqueue.HasJobBeenFinalized(job_id):
      if cancel[0]:
        logging.debug("Got cancel request, cancelling job %d", job_id)
        r = context.jobqueue.CancelJob(job_id)
        logging.debug("CancelJob result for job %d: %s", job_id, r)
        cancel[0] = False
      if prio_change[0]:
        logging.debug("Received priority-change request")
        try:
          fname = os.path.join(pathutils.LUXID_MESSAGE_DIR, "%d.prio" % job_id)
          new_prio = int(utils.ReadFile(fname))
          utils.RemoveFile(fname)
          logging.debug("Changing priority of job %d to %d", job_id, new_prio)
          r = context.jobqueue.ChangeJobPriority(job_id, new_prio)
          logging.debug("Result of changing priority of %d to %d: %s", job_id,
                        new_prio, r)
        except Exception: # pylint: disable=W0703
          logging.warning("Informed of priority change, but could not"
                          " read new priority")
        prio_change[0] = False
      time.sleep(1)

    # wait until the queue finishes
    logging.debug("Waiting for the queue to finish")
    while context.jobqueue.PrepareShutdown():
      time.sleep(1)
    logging.debug("Shutting the queue down")
    context.jobqueue.Shutdown()
    exit_code = 0
  except Exception: # pylint: disable=W0703
    logging.exception("Exception when trying to run job %d", job_id)
  finally:
    logging.debug("Job %d finalized", job_id)
    logging.debug("Removing livelock file %s", livelock_name.GetPath())
    os.remove(livelock_name.GetPath())

  sys.exit(exit_code)
Пример #12
0
def GenericMain(daemon_name,
                optionparser,
                check_fn,
                prepare_fn,
                exec_fn,
                multithreaded=False,
                console_logging=False,
                default_ssl_cert=None,
                default_ssl_key=None):
    """Shared main function for daemons.

  @type daemon_name: string
  @param daemon_name: daemon name
  @type optionparser: optparse.OptionParser
  @param optionparser: initialized optionparser with daemon-specific options
                       (common -f -d options will be handled by this module)
  @type check_fn: function which accepts (options, args)
  @param check_fn: function that checks start conditions and exits if they're
                   not met
  @type prepare_fn: function which accepts (options, args)
  @param prepare_fn: function that is run before forking, or None;
      it's result will be passed as the third parameter to exec_fn, or
      if None was passed in, we will just pass None to exec_fn
  @type exec_fn: function which accepts (options, args, prepare_results)
  @param exec_fn: function that's executed with the daemon's pid file held, and
                  runs the daemon itself.
  @type multithreaded: bool
  @param multithreaded: Whether the daemon uses threads
  @type console_logging: boolean
  @param console_logging: if True, the daemon will fall back to the system
                          console if logging fails
  @type default_ssl_cert: string
  @param default_ssl_cert: Default SSL certificate path
  @type default_ssl_key: string
  @param default_ssl_key: Default SSL key path

  """
    optionparser.add_option("-f",
                            "--foreground",
                            dest="fork",
                            help="Don't detach from the current terminal",
                            default=True,
                            action="store_false")
    optionparser.add_option("-d",
                            "--debug",
                            dest="debug",
                            help="Enable some debug messages",
                            default=False,
                            action="store_true")
    optionparser.add_option("--syslog",
                            dest="syslog",
                            help="Enable logging to syslog (except debug"
                            " messages); one of 'no', 'yes' or 'only' [%s]" %
                            constants.SYSLOG_USAGE,
                            default=constants.SYSLOG_USAGE,
                            choices=["no", "yes", "only"])

    family = ssconf.SimpleStore().GetPrimaryIPFamily()
    # family will default to AF_INET if there is no ssconf file (e.g. when
    # upgrading a cluster from 2.2 -> 2.3. This is intended, as Ganeti clusters
    # <= 2.2 can not be AF_INET6
    if daemon_name in constants.DAEMONS_PORTS:
        default_bind_address = constants.IP4_ADDRESS_ANY
        if family == netutils.IP6Address.family:
            default_bind_address = constants.IP6_ADDRESS_ANY

        default_port = netutils.GetDaemonPort(daemon_name)

        # For networked daemons we allow choosing the port and bind address
        optionparser.add_option("-p",
                                "--port",
                                dest="port",
                                help="Network port (default: %s)" %
                                default_port,
                                default=default_port,
                                type="int")
        optionparser.add_option("-b",
                                "--bind",
                                dest="bind_address",
                                help=("Bind address (default: '%s')" %
                                      default_bind_address),
                                default=default_bind_address,
                                metavar="ADDRESS")
        optionparser.add_option("-i",
                                "--interface",
                                dest="bind_interface",
                                help=("Bind interface"),
                                metavar="INTERFACE")

    if default_ssl_key is not None and default_ssl_cert is not None:
        optionparser.add_option("--no-ssl",
                                dest="ssl",
                                help="Do not secure HTTP protocol with SSL",
                                default=True,
                                action="store_false")
        optionparser.add_option("-K",
                                "--ssl-key",
                                dest="ssl_key",
                                help=("SSL key path (default: %s)" %
                                      default_ssl_key),
                                default=default_ssl_key,
                                type="string",
                                metavar="SSL_KEY_PATH")
        optionparser.add_option("-C",
                                "--ssl-cert",
                                dest="ssl_cert",
                                help=("SSL certificate path (default: %s)" %
                                      default_ssl_cert),
                                default=default_ssl_cert,
                                type="string",
                                metavar="SSL_CERT_PATH")

    # Disable the use of fork(2) if the daemon uses threads
    if multithreaded:
        utils.DisableFork()

    options, args = optionparser.parse_args()

    if getattr(options, "bind_interface", None) is not None:
        if options.bind_address != default_bind_address:
            msg = (
                "Can't specify both, bind address (%s) and bind interface (%s)"
                % (options.bind_address, options.bind_interface))
            print >> sys.stderr, msg
            sys.exit(constants.EXIT_FAILURE)
        interface_ip_addresses = \
          netutils.GetInterfaceIpAddresses(options.bind_interface)
        if family == netutils.IP6Address.family:
            if_addresses = interface_ip_addresses[constants.IP6_VERSION]
        else:
            if_addresses = interface_ip_addresses[constants.IP4_VERSION]
        if len(if_addresses) < 1:
            msg = "Failed to find IP for interface %s" % options.bind_interace
            print >> sys.stderr, msg
            sys.exit(constants.EXIT_FAILURE)
        options.bind_address = if_addresses[0]

    if getattr(options, "ssl", False):
        ssl_paths = {
            "certificate": options.ssl_cert,
            "key": options.ssl_key,
        }

        for name, path in ssl_paths.iteritems():
            if not os.path.isfile(path):
                print >> sys.stderr, "SSL %s file '%s' was not found" % (name,
                                                                         path)
                sys.exit(constants.EXIT_FAILURE)

        # TODO: By initiating http.HttpSslParams here we would only read the files
        # once and have a proper validation (isfile returns False on directories)
        # at the same time.

    result, running_uid, expected_uid = _VerifyDaemonUser(daemon_name)
    if not result:
        msg = ("%s started using wrong user ID (%d), expected %d" %
               (daemon_name, running_uid, expected_uid))
        print >> sys.stderr, msg
        sys.exit(constants.EXIT_FAILURE)

    if check_fn is not None:
        check_fn(options, args)

    log_filename = constants.DAEMONS_LOGFILES[daemon_name]

    if options.fork:
        utils.CloseFDs()
        (wpipe, stdio_reopen_fn) = utils.Daemonize(logfile=log_filename)
    else:
        (wpipe, stdio_reopen_fn) = (None, None)

    log_reopen_fn = \
      utils.SetupLogging(log_filename, daemon_name,
                         debug=options.debug,
                         stderr_logging=not options.fork,
                         multithreaded=multithreaded,
                         syslog=options.syslog,
                         console_logging=console_logging)

    # Reopen log file(s) on SIGHUP
    signal.signal(
        signal.SIGHUP,
        compat.partial(_HandleSigHup, [log_reopen_fn, stdio_reopen_fn]))

    try:
        utils.WritePidFile(utils.DaemonPidFileName(daemon_name))
    except errors.PidFileLockError, err:
        print >> sys.stderr, "Error while locking PID file:\n%s" % err
        sys.exit(constants.EXIT_FAILURE)