Esempio n. 1
0
 def _call_orch_apply_nfs(self, placement, virtual_ip=None):
     if virtual_ip:
         # nfs + ingress
         # run NFS on non-standard port
         spec = NFSServiceSpec(
             service_type='nfs',
             service_id=self.cluster_id,
             pool=self.pool_name,
             namespace=self.pool_ns,
             placement=PlacementSpec.from_string(placement),
             # use non-default port so we don't conflict with ingress
             port=12049)
         completion = self.mgr.apply_nfs(spec)
         orchestrator.raise_if_exception(completion)
         ispec = IngressSpec(
             service_type='ingress',
             service_id='nfs.' + self.cluster_id,
             backend_service='nfs.' + self.cluster_id,
             frontend_port=2049,  # default nfs port
             monitor_port=9049,
             virtual_ip=virtual_ip)
         completion = self.mgr.apply_ingress(ispec)
         orchestrator.raise_if_exception(completion)
     else:
         # standalone nfs
         spec = NFSServiceSpec(
             service_type='nfs',
             service_id=self.cluster_id,
             pool=self.pool_name,
             namespace=self.pool_ns,
             placement=PlacementSpec.from_string(placement))
         completion = self.mgr.apply_nfs(spec)
         orchestrator.raise_if_exception(completion)
Esempio n. 2
0
 def _call_orch_apply_nfs(
     self,
     cluster_id: str,
     placement: Optional[str],
     virtual_ip: Optional[str] = None,
     port: Optional[int] = None,
 ) -> None:
     if not port:
         port = 2049  # default nfs port
     if virtual_ip:
         # nfs + ingress
         # run NFS on non-standard port
         spec = NFSServiceSpec(
             service_type='nfs',
             service_id=cluster_id,
             placement=PlacementSpec.from_string(placement),
             # use non-default port so we don't conflict with ingress
             port=10000 + port)  # semi-arbitrary, fix me someday
         completion = self.mgr.apply_nfs(spec)
         orchestrator.raise_if_exception(completion)
         ispec = IngressSpec(
             service_type='ingress',
             service_id='nfs.' + cluster_id,
             backend_service='nfs.' + cluster_id,
             frontend_port=port,
             monitor_port=7000 + port,  # semi-arbitrary, fix me someday
             virtual_ip=virtual_ip)
         completion = self.mgr.apply_ingress(ispec)
         orchestrator.raise_if_exception(completion)
     else:
         # standalone nfs
         spec = NFSServiceSpec(
             service_type='nfs',
             service_id=cluster_id,
             placement=PlacementSpec.from_string(placement),
             port=port)
         completion = self.mgr.apply_nfs(spec)
         orchestrator.raise_if_exception(completion)
     log.debug(
         "Successfully deployed nfs daemons with cluster id %s and placement %s",
         cluster_id, placement)
Esempio n. 3
0
    def test_ingress_config(self, _run_cephadm,
                            cephadm_module: CephadmOrchestrator):
        _run_cephadm.side_effect = async_side_effect(('{}', '', 0))

        with with_host(cephadm_module, 'test'):
            cephadm_module.cache.update_host_networks(
                'test', {'1.2.3.0/24': {
                    'if0': ['1.2.3.4/32']
                }})

            # the ingress backend
            s = RGWSpec(service_id="foo",
                        placement=PlacementSpec(count=1),
                        rgw_frontend_type='beast')

            ispec = IngressSpec(service_type='ingress',
                                service_id='test',
                                backend_service='rgw.foo',
                                frontend_port=8089,
                                monitor_port=8999,
                                monitor_user='******',
                                monitor_password='******',
                                keepalived_password='******',
                                virtual_interface_networks=['1.2.3.0/24'],
                                virtual_ip="1.2.3.4/32")
            with with_service(cephadm_module,
                              s) as _, with_service(cephadm_module,
                                                    ispec) as _:
                # generate the keepalived conf based on the specified spec
                keepalived_generated_conf = cephadm_module.cephadm_services[
                    'ingress'].keepalived_generate_config(
                        CephadmDaemonDeploySpec(
                            host='test',
                            daemon_id='ingress',
                            service_name=ispec.service_name()))

                keepalived_expected_conf = {
                    'files': {
                        'keepalived.conf':
                        '# This file is generated by cephadm.\n'
                        'vrrp_script check_backend {\n    '
                        'script "/usr/bin/curl http://localhost:8999/health"\n    '
                        'weight -20\n    '
                        'interval 2\n    '
                        'rise 2\n    '
                        'fall 2\n}\n\n'
                        'vrrp_instance VI_0 {\n  '
                        'state MASTER\n  '
                        'priority 100\n  '
                        'interface if0\n  '
                        'virtual_router_id 51\n  '
                        'advert_int 1\n  '
                        'authentication {\n      '
                        'auth_type PASS\n      '
                        'auth_pass 12345\n  '
                        '}\n  '
                        'unicast_src_ip 1::4\n  '
                        'unicast_peer {\n  '
                        '}\n  '
                        'virtual_ipaddress {\n    '
                        '1.2.3.4/32 dev if0\n  '
                        '}\n  '
                        'track_script {\n      '
                        'check_backend\n  }\n'
                        '}'
                    }
                }

                # check keepalived config
                assert keepalived_generated_conf[0] == keepalived_expected_conf

                # generate the haproxy conf based on the specified spec
                haproxy_generated_conf = cephadm_module.cephadm_services[
                    'ingress'].haproxy_generate_config(
                        CephadmDaemonDeploySpec(
                            host='test',
                            daemon_id='ingress',
                            service_name=ispec.service_name()))

                haproxy_expected_conf = {
                    'files': {
                        'haproxy.cfg':
                        '# This file is generated by cephadm.'
                        '\nglobal\n    log         '
                        '127.0.0.1 local2\n    '
                        'chroot      /var/lib/haproxy\n    '
                        'pidfile     /var/lib/haproxy/haproxy.pid\n    '
                        'maxconn     8000\n    '
                        'daemon\n    '
                        'stats socket /var/lib/haproxy/stats\n'
                        '\ndefaults\n    '
                        'mode                    http\n    '
                        'log                     global\n    '
                        'option                  httplog\n    '
                        'option                  dontlognull\n    '
                        'option http-server-close\n    '
                        'option forwardfor       except 127.0.0.0/8\n    '
                        'option                  redispatch\n    '
                        'retries                 3\n    '
                        'timeout queue           20s\n    '
                        'timeout connect         5s\n    '
                        'timeout http-request    1s\n    '
                        'timeout http-keep-alive 5s\n    '
                        'timeout client          1s\n    '
                        'timeout server          1s\n    '
                        'timeout check           5s\n    '
                        'maxconn                 8000\n'
                        '\nfrontend stats\n    '
                        'mode http\n    '
                        'bind 1.2.3.4:8999\n    '
                        'bind localhost:8999\n    '
                        'stats enable\n    '
                        'stats uri /stats\n    '
                        'stats refresh 10s\n    '
                        'stats auth admin:12345\n    '
                        'http-request use-service prometheus-exporter if { path /metrics }\n    '
                        'monitor-uri /health\n'
                        '\nfrontend frontend\n    '
                        'bind 1.2.3.4:8089\n    '
                        'default_backend backend\n\n'
                        'backend backend\n    '
                        'option forwardfor\n    '
                        'balance static-rr\n    '
                        'option httpchk HEAD / HTTP/1.0\n    '
                        'server ' + haproxy_generated_conf[1][0] +
                        ' 1::4:80 check weight 100\n'
                    }
                }

                assert haproxy_generated_conf[0] == haproxy_expected_conf
Esempio n. 4
0
         },
     }, [], [
         'rgw:host1(10.0.0.1:80)', 'rgw:host2(10.0.0.2:80)',
         'rgw:host1(10.0.0.1:81)', 'rgw:host2(10.0.0.2:81)',
         'rgw:host1(10.0.0.1:82)', 'rgw:host2(10.0.0.2:82)'
     ], [
         'rgw:host1(10.0.0.1:80)', 'rgw:host2(10.0.0.2:80)',
         'rgw:host1(10.0.0.1:81)', 'rgw:host2(10.0.0.2:81)',
         'rgw:host1(10.0.0.1:82)', 'rgw:host2(10.0.0.2:82)'
     ], []),
 NodeAssignmentTest4(
     IngressSpec(
         service_type='ingress',
         service_id='rgw.foo',
         frontend_port=443,
         monitor_port=8888,
         virtual_ip='10.0.0.20/8',
         backend_service='rgw.foo',
         placement=PlacementSpec(label='foo'),
         networks=['10.0.0.0/8'],
     ), {
         'host1': {
             '10.0.0.0/8': {
                 'eth0': ['10.0.0.1']
             }
         },
         'host2': {
             '10.0.0.0/8': {
                 'eth1': ['10.0.0.2']
             }
         },
         'host3': {