Пример #1
0
def main():
    magnum_service.prepare_service(sys.argv)

    gmr.TextGuruMeditation.setup_autorun(version)

    LOG.info(_LI('Starting server in PID %s') % os.getpid())
    LOG.debug("Configuration:")
    cfg.CONF.log_opt_values(LOG, std_logging.DEBUG)

    cfg.CONF.import_opt('topic', 'magnum.conductor.config', group='conductor')

    conductor_id = short_id.generate_id()
    endpoints = [
        docker_conductor.Handler(),
        k8s_conductor.Handler(),
        bay_conductor.Handler(),
        x509keypair_conductor.Handler(),
        conductor_listener.Handler(),
        ca_conductor.Handler(),
    ]

    if (not os.path.isfile(cfg.CONF.bay.k8s_atomic_template_path)
            and not os.path.isfile(cfg.CONF.bay.k8s_coreos_template_path)):
        LOG.error(_LE("The Heat template can not be found for either k8s "
                      "atomic %(atomic_template)s or coreos "
                      "(coreos_template)%s. Install template first if you "
                      "want to create bay.") %
                  {'atomic_template': cfg.CONF.bay.k8s_atomic_template_path,
                   'coreos_template': cfg.CONF.bay.k8s_coreos_template_path})

    server = rpc_service.Service.create(cfg.CONF.conductor.topic,
                                        conductor_id, endpoints)
    launcher = service.launch(cfg.CONF, server)
    launcher.wait()
Пример #2
0
 def setUp(self):
     super(TestK8sConductor, self).setUp()
     self.kube_handler = k8s_conductor.Handler()