def test_pass_user_password(self):
        # The defaults
        fk = FakeConfig('host', 1, None, None)
        cf = etcdutils.EtcdClientFactory(fk)
        self.assertThat(cf.etcd_args['username'], matchers.Equals(None))
        self.assertThat(cf.etcd_args['password'], matchers.Equals(None))

        # When set
        fk = FakeConfig('host', 1, 'uuu', 'ppp')
        cf = etcdutils.EtcdClientFactory(fk)
        self.assertThat(cf.etcd_args['username'], matchers.Equals('uuu'))
        self.assertThat(cf.etcd_args['password'], matchers.Equals('ppp'))
Example #2
0
    def __init__(self):

        self.client_factory = etcdutils.EtcdClientFactory(cfg.CONF.ml2_vpp)

        self.election_key_space = LEADIN + '/election'
        self.journal_kick_key = self.election_key_space + '/kick-journal'
        LOG.debug('Journal manager init complete')
    def __init__(self, service_plugin):
        LOG.debug("Loading TaasEtcdDriver.")
        super(TaasEtcdDriver, self).__init__(service_plugin)

        self.client_factory = etcdutils.EtcdClientFactory(cfg.CONF.ml2_vpp)
        etcd_client = self.client_factory.client()
        etcd_helper = etcdutils.EtcdHelper(etcd_client)
        etcd_helper.ensure_dir(LEADIN + '/state_taas')

        self.taas_service = FeatureTaasService(service_plugin,
                                               self.client_factory.client(),
                                               'TaasService',
                                               LEADIN + '/state_taas')
        self.taas_flow = FeatureTaasFlow(service_plugin,
                                         self.client_factory.client(),
                                         'TaasFlow', LEADIN + '/state_taas')

        eventlet.spawn(self.taas_service.watch_forever)
        eventlet.spawn(self.taas_flow.watch_forever)
Example #4
0
    def setUp(self, mck):
        super(EtcdClientSecuredTestCase, self).setUp()

        mck.side_effect = self.fake_crypto

        config_opts.register_vpp_opts(cfg.CONF)
        cfg.CONF.ml2_vpp.jwt_controller_name_pattern = "etcd.*"
        cfg.CONF.ml2_vpp.jwt_node_private_key = "jwt_private_key.pem"
        cfg.CONF.ml2_vpp.jwt_node_cert = "jwt_node_cert.pem"
        cfg.CONF.ml2_vpp.jwt_ca_cert = "jwt_ca_cert.pem"
        cfg.CONF.ml2_vpp.jwt_signing = True
        cfg.CONF.ml2_vpp.jwt_max_duration = 0

        self.kdb = {}

        self.client_factory = etcdutils.EtcdClientFactory(cfg.CONF.ml2_vpp)
        self.client_factory.etcd_args['allow_reconnect'] = False
        self.client_factory.etcd_args['host'] = ''
        etcd_client = self.client_factory.client()
        self.client = etcdutils.SignedEtcdJSONWriter(etcd_client)
    def parse_config_test_run(self, host, port, user=None, pw=None):
        fk = FakeConfig(host, port, user, pw)

        cf = etcdutils.EtcdClientFactory(fk)

        return cf.etcd_args['host']
Example #6
0
    def __init__(self, notify_bound):
        super(EtcdAgentCommunicator, self).__init__()
        LOG.debug("Using etcd host:%s port:%s user:%s",
                  cfg.CONF.ml2_vpp.etcd_host, cfg.CONF.ml2_vpp.etcd_port,
                  cfg.CONF.ml2_vpp.etcd_user)

        # This is a function that is called when a port has been
        # notified from the agent via etcd as completely attached.

        # We call this when we're certain that the VPP on the far end
        # has definitely bound the port, and has dropped a vhost-user
        # socket where it can be found.

        # This is more important than it seems, becaus libvirt will
        # hang, because qemu ignores its monitor port, when qemu is
        # waiting for a partner to connect with on its vhost-user
        # interfaces.  It can't start the VM - that requires
        # information from its partner it can't guess at - but it
        # shouldn't hang the monitor - nevertheless...  So we notify
        # when the port is there and ready, and qemu is never put into
        # this state by Nova.
        self.notify_bound = notify_bound

        self.client_factory = etcdutils.EtcdClientFactory(cfg.CONF.ml2_vpp)

        # For Liberty support, we have to have a memory between notifications
        self.deleted_rule_secgroup_id = {}

        # We need certain directories to exist
        self.state_key_space = LEADIN + '/state'
        self.port_key_space = LEADIN + '/nodes'
        self.secgroup_key_space = LEADIN + '/global/secgroups'
        self.remote_group_key_space = LEADIN + '/global/remote_group'
        self.gpe_key_space = LEADIN + '/global/networks/gpe'
        self.election_key_space = LEADIN + '/election'
        self.journal_kick_key = self.election_key_space + '/kick-journal'

        etcd_client = self.client_factory.client()
        etcd_helper = etcdutils.EtcdHelper(etcd_client)
        etcd_helper.ensure_dir(self.state_key_space)
        etcd_helper.ensure_dir(self.port_key_space)
        etcd_helper.ensure_dir(self.secgroup_key_space)
        etcd_helper.ensure_dir(self.election_key_space)
        etcd_helper.ensure_dir(self.remote_group_key_space)

        self.secgroup_enabled = cfg.CONF.SECURITYGROUP.enable_security_group
        if self.secgroup_enabled:
            self.register_secgroup_event_handler()

        # TODO(ijw): .../state/<host> lists all known hosts, and they
        # heartbeat when they're functioning

        # From this point on, there are multiple threads: ensure that
        # we don't re-use the etcd_client from multiple threads
        # simultaneously
        etcd_helper = None
        etcd_client = None

        try:
            # Liberty, Mitaka
            ev = events.AFTER_INIT
        except Exception:
            # Newton and on
            ev = events.AFTER_CREATE

        registry.subscribe(self.start_threads, resources.PROCESS, ev)