Beispiel #1
0
def _async_deploy(ctx, job_ctx, targets, role, extra_vars, partial):
    job = Job.load(job_ctx)
    job.bind(ctx.request.id)

    failures = []
    try:
        redis = _async_deploy.redis
        for return_data in _async_deploy.executor(targets).deploy(role, extra_vars, partial):
            target, retval = parse_exe_return(return_data)

            # recreate retval for deploy endpoint
            state, name, retval = parse_exe_retval(retval)
            retval = create_exe_retval(state, name, None)

            job.update(target, retval, redis)
            if isExeFailure(retval):
                failures.append(target)

        failed = True if failures else False
        for target in targets:
            op_failed = False
            if target in failures:
                op_failed = True
            job.update_done(redis, target, op_failed)
        job.done(redis, failed)

    except (ExecutorPrepareError, ExecutorDeployError, ExecutorNoMatchError):
        msg = "got executor error, {0}".format(excinst())
        LOG.error(msg)
        job.done(redis, failed=True, error=msg)
    except:
        msg = "got unexpected error, {0}".format(excinst())
        LOG.error(msg)
        job.done(redis, failed=True, error=msg)
Beispiel #2
0
def _async_ping(ctx, job_ctx, targets):
    job = Job.load(job_ctx)
    job.bind(ctx.request.id)

    failed = False
    try:
        redis = _async_ping.redis
        for return_data in _async_ping.executor(targets).ping():
            target, retval = parse_exe_return(return_data)

            job.update(target, retval, redis)
            job.update_done(redis, target, isExeFailure(retval))

            if isExeFailure(retval):
                failed = True

        job.done(redis, failed)

    except (ExecutorPrepareError, ExecutorNoMatchError):
        msg = "got executor error, {0}".format(excinst())
        LOG.error(msg)
        job.done(redis, failed=True, error=msg)
    except:
        msg = "got unexpected error, {0}".format(excinst())
        LOG.error(msg)
        job.done(redis, failed=True, error=msg)
Beispiel #3
0
 def _run_tasks(self, play, reaper):
     """ Init TQM and run play. """
     tqm = TaskQueueManager(inventory=self._inventory,
                            variable_manager=self._varmanager,
                            loader=self._loader,
                            options=self._opts,
                            passwords=None,
                            stdout_callback=reaper)
     # with multiprocessing, the parent cannot handle exception riased
     #   by the child process.
     # which means, the try/except in the `runner._async_deploy` cannot
     #   known what happened here, and cause the entire celery worker
     #   process stop working without exit.
     # Solution:
     #   1, handle ansible exception here (inside executor).
     #   2, cannot raise other exception in `except` block, because of
     #       this piece of code may be run under other `fork()`.
     #   3, because of <2>, we use `reaper` to tell outside something going wrong.
     try:
         tqm.run(play)
     except AnsibleError:
         reaper.reaper_exception(ExecutorPrepareError(str(excinst())))
     finally:
         tqm.cleanup()
         reaper.done()
Beispiel #4
0
 def executor(self, targets=[]):
     """ For runner subclass access executor plugin. """
     if self._executor_plugin == None:
         plugins = PluginLoader(ExecutorPrototype, self.cfg.modules).plugins
         plugins += EXECUTORS
         for EXECUTOR in plugins:
             if EXECUTOR.name() == self.cfg.executor:
                 self._executor_plugin = EXECUTOR
                 LOG.info("using executor: <{0}>".format(EXECUTOR.name()))
                 break
         if self._executor_plugin == None:
             raise ConfigError(
                 "executor plugin <{0}> could not be loaded".format(
                     self.cfg.executor))
     if self._executor_plugin_opts == None:
         try:
             self._executor_plugin_opts = CONF.module(self.cfg.executor)
             LOG.info("executor plugin opts of <{0}> loaded".format(
                 self.cfg.executor))
         except ConfigError:
             self._executor_plugin_opts = {}
             LOG.warning(
                 "no executor opts configuration found for plugin <0>".
                 format(self.cfg.executor))
     try:
         return self._executor_plugin(
             targets, **self._executor_plugin_opts.dict_opts)
     except TypeError:
         raise ExecutorPrepareError("{0} bad executor implementate.".format(
             excinst()))
Beispiel #5
0
def exe_logger_init(logcf):
    try:
        logger_init(logcf.error_log, logcf.log_level)
        return open_logfile(logcf.access_log)
    except IOError:
        raise ConfigError("cannot open logfile for write, \"{0}\"".format(
            excinst()))
    except ValueError:
        raise ConfigError("invalid log level")
Beispiel #6
0
    def handle(self, *args, **kwargs):
        """ Handle api request by invoke `runner.handle()`. 

        All exception raised by runner will be catched here, and convert them
        into cherrypy `HTTPError()` with corresponding status code and message.
        """
        try:
            return self._runner.handle(*args, **kwargs)
        except JobDeleteError:
            raise cherrypy.HTTPError(status.BAD_REQUEST, excinst().message)
        except JobConflictError:
            raise cherrypy.HTTPError(status.CONFLICT, excinst().message)
        except JobNotSupportedError:
            raise cherrypy.HTTPError(status.INTERNAL_SERVER_ERROR,
                                     excinst().message)
        except (JobNotExistsError, ExecutorNoMatchError):
            raise cherrypy.HTTPError(status.NOT_FOUND, excinst().message)
        except:
            cherrypy.log("error response 500", traceback=True)
            raise cherrypy.HTTPError(status.INTERNAL_SERVER_ERROR)
Beispiel #7
0
 def __init__(self, worker, exe_conf="", **options):
     try:
         if not exe_conf:
             raise ConfigError(
                 "no config file given, "
                 "need parse exe conf before start celery worker.")
         cfgread(exe_conf)
         celery_init(worker.app)
     except ConfigError:
         LOG.error("error while try to parse config file, {0}".format(
             excinst()))
         sys.exit(1)
Beispiel #8
0
 def _run_pbs(self, playbooks, reaper):
     """ Init PBEX and run playbooks. """
     pbex = PlaybookExecutor(playbooks=playbooks,
                             inventory=self._inventory,
                             variable_manager=self._varmanager,
                             loader=self._loader,
                             options=self._opts,
                             passwords=None)
     pbex._tqm._stdout_callback = reaper
     # Same raeson with `self._run_tasks`
     try:
         pbex.run()
     except AnsibleError:
         reaper.reaper_exception(ExecutorPrepareError(str(excinst())))
     reaper.done()
Beispiel #9
0
def exe_main():
    logger_bootstrap()
    
    try:
        args = exe_argparse()
        cf = exe_cfgparse(args.conf)
        logcf = exe_logger_cfgparse()
        access_log = exe_logger_init(logcf)

        celery_init(AsyncRunner)
        api = APIServer()
        api.set_access_log(access_log)

    except ConfigError:
        LOG.error("error while try to parse config file, {0}".format(excinst()))
        sys.exit(1)

    api.run(args.daemon)
Beispiel #10
0
def cfgread(config_file):
    """ ConfigFile reader, register sections to global `CONF` instance. """

    cfg = ConfigParser()
    if not hasattr(cfg, 'read_file'):
        cfg.read_file = cfg.readfp

    try:
        cfp = open(config_file)
        cfg.read_file(cfp)
        cfp.close()
    except:
        raise ConfigError("cannot open/read configfile, {0}".format(excinst()))

    for _cs in cfg.sections():
        CONF.regisiter_opts(_cs, dict(zip(
            [ c[0] for c in cfg.items(_cs) ],
            [ c[1].strip('\'').strip('"') for c in cfg.items(_cs) ])))

    return CONF
Beispiel #11
0
    def _load_plugins(self):
        """ Load exe plugins from imported python modules. """
        _path = sys.path
        _modules = []

        sys.path = self._pymodule_path
        for mod in self._pymodule_name:
            try:
                m = importlib.import_module(mod)
            except ImportError:
                LOG.error("bad module <{0}>, <{1}>".format(mod, excinst()))
            for attr, obj in vars(m).items():
                try:
                    if issubclass(obj,
                                  self._plugin_pt) and obj != self._plugin_pt:
                        _modules.append(obj)
                        LOG.info("module {0} loaded".format(obj))
                except TypeError:  # issubclass() arg 1 must be a class
                    continue
        sys.path = _path

        return _modules
Beispiel #12
0
def _async_release(ctx, job_ctx, targets, appname, apptype, revision, rollback,
                   extra_opts):
    job = Job.load(job_ctx)
    job.bind(ctx.request.id)

    failures = []
    try:
        redis = _async_release.redis
        executor = _async_release.executor(targets)

        try:
            rh = _async_release.release_plugin(apptype)(targets, appname,
                                                        executor)
        except TypeError:
            raise ReleasePrepareError(
                "{0} bad release plugin implementate".format(excinst()))

        returner = None
        try:
            if rollback:
                LOG.info(
                    "rollback to <{0}> using <{1}> with args <{2}> on <{3}>".
                    format(revision, rh.hname(), extra_opts, rh.hosts))
                returner = rh.rollback(revision, **extra_opts)
            else:
                if revision == REVISION_QUERY:
                    LOG.info(
                        "query revision using <{0}> with args <{1}> on <{2}>".
                        format(rh.hname(), extra_opts, rh.hosts))
                    returner = rh.revision(**extra_opts)
                else:
                    LOG.info(
                        "release rev <{0}> using <{1}> with args <{2}> on <{3}>"
                        .format(revision, rh.hname(), extra_opts, rh.hosts))
                    returner = rh.release(revision, **extra_opts)
        except TypeError:
            raise ReleasePrepareError("{0} bad release plugin args".format(
                excinst()))

        for return_data in returner:
            target, retval = parse_exe_return(return_data)

            job.update(target, retval, redis)
            if isExeFailure(retval):
                LOG.error("F %s" % target)
                failures.append(target)

        failed = True if failures else False
        for target in targets:
            op_failed = False
            if target in failures:
                op_failed = True
            job.update_done(redis, target, op_failed)
        job.done(redis, failed)

    except (ExecutorPrepareError, ExecutorDeployError, ExecutorNoMatchError):
        msg = "got executor error, {0}".format(excinst())
        LOG.error(msg)
        job.done(redis, failed=True, error=msg)
    except ReleasePrepareError:
        msg = "got release plugin error, {0}".format(excinst())
        LOG.error(msg)
        job.done(redis, failed=True, error=msg)
    except ReleaseAbort:
        msg = "release aborted by plugin, {0}".format(excinst())
        LOG.error(msg)
        job.done(redis, failed=True, error=msg)
    except ReleaseError:
        msg = "release aborted by plugin, got error, {0}".format(excinst())
        LOG.error(msg)
        job.done(redis, failed=True, error=msg)
    except:
        msg = "release aborted, got unexpected error, {0}".format(excinst())
        LOG.error(msg)
        job.done(redis, failed=True, error=msg)