def prepare_nodes(nodes_count): resources = cr.create('nodes', 'templates/nodes', {"count": nodes_count}) nodes = resources.like('node') resources = cr.create('nodes_network', 'templates/nodes_network', {"count": nodes_count}) nodes_sdn = resources.like('node') r = {} for node, node_sdn in zip(nodes, nodes_sdn): r[node.name] = node r[node_sdn.name] = node_sdn # LIBRARIAN librarian = cr.create('librarian_{}'.format(node.name), 'resources/librarian', {})[0] r[librarian.name] = librarian node.connect(librarian, {}) # NETWORKING # TODO(bogdando) node's IPs should be populated as br-mgmt IPs, but now are hardcoded in templates signals.connect(node, node_sdn) node_sdn.connect_with_events(librarian, {'module': 'modules'}, {}) evapi.add_dep(librarian.name, node_sdn.name, actions=('run', 'update')) signals.connect(node, node_sdn) node_sdn.connect_with_events(librarian, {'module': 'modules'}, {}) evapi.add_dep(librarian.name, node_sdn.name, actions=('run', 'update')) return r
def setup_neutron_agent(node, neutron_server_puppet): # NEUTRON ML2 PLUGIN & ML2-OVS AGENT WITH GRE neutron_agents_ml2 = cr.create('neutron_agents_ml2', 'resources/neutron_agents_ml2_ovs_puppet', { # TODO(bogdando) these should come from the node network resource 'enable_tunneling': True, 'tunnel_types': ['gre'], 'local_ip': '10.1.0.13' # should be the IP addr of the br-mesh int. })[0] node.connect(neutron_agents_ml2) evapi.add_dep(neutron_server_puppet.name, neutron_agents_ml2.name, actions=('run',)) # NEUTRON DHCP, L3, metadata agents neutron_agents_dhcp = cr.create('neutron_agents_dhcp', 'resources/neutron_agents_dhcp_puppet', {})[0] node.connect(neutron_agents_dhcp) evapi.add_dep(neutron_server_puppet.name, neutron_agents_dhcp.name, actions=('run',)) neutron_agents_l3 = cr.create('neutron_agents_l3', 'resources/neutron_agents_l3_puppet', { # TODO(bogdando) these should come from the node network resource 'metadata_port': 8775, 'external_network_bridge': 'br-floating', })[0] node.connect(neutron_agents_l3) evapi.add_dep(neutron_server_puppet.name, neutron_agents_l3.name, actions=('run',)) neutron_agents_metadata = cr.create('neutron_agents_metadata', 'resources/neutron_agents_metadata_puppet', { 'sh2ared_secret': 'secret', })[0] node.connect(neutron_agents_metadata) neutron_server_puppet.connect(neutron_agents_metadata, { 'auth_host', 'auth_port', 'auth_password', 'auth_tenant', 'auth_user', }) return {'neutron_agents_ml2': neutron_agents_ml2, 'neutron_agents_dhcp': neutron_agents_dhcp, 'neutron_agents_metadata': neutron_agents_metadata}
def create_master(): master = source.master() try: resource.load('nodemaster') except solar.dblayer.model.DBLayerNotFound: cr.create('master', 'f2s/fuel_node', {'index': master[0], 'ip': master[1]}, tags=['nodemaster'])
def setup_base(node, librarian): # MARIADB mariadb_service = cr.create('mariadb_service1', 'resources/mariadb_service', { 'image': 'mariadb', 'port': 3306 })[0] node.connect(mariadb_service) # RABBIT rabbitmq_service = cr.create('rabbitmq_service1', 'resources/rabbitmq_service/', { 'management_port': 15672, 'port': 5672, })[0] openstack_vhost = cr.create('openstack_vhost', 'resources/rabbitmq_vhost/', { 'vhost_name': 'openstack' })[0] openstack_rabbitmq_user = cr.create('openstack_rabbitmq_user', 'resources/rabbitmq_user/', { 'user_name': 'openstack', 'password': '******' })[0] node.connect(rabbitmq_service) rabbitmq_service.connect_with_events(librarian, {'module': 'modules'}, {}) evapi.add_dep(librarian.name, rabbitmq_service.name, actions=('run', 'update')) rabbitmq_service.connect(openstack_vhost) rabbitmq_service.connect(openstack_rabbitmq_user) openstack_vhost.connect(openstack_rabbitmq_user, { 'vhost_name', }) return {'mariadb_service': mariadb_service, 'rabbitmq_service1': rabbitmq_service, 'openstack_vhost': openstack_vhost, 'openstack_rabbitmq_user': openstack_rabbitmq_user}
def setup_master(config, user_config): master = cr.create('kube-node-master', 'k8s/node', {'name': 'kube-node-master', 'ip': user_config['ip'], 'ssh_user': user_config['username'], 'ssh_password': user_config['password'], 'ssh_key': user_config['ssh_key']})['kube-node-master'] master.connect(config, {}) docker = cr.create('kube-docker-master', 'k8s/docker')['kube-docker-master'] master.connect(docker, {}) kubelet = cr.create('kubelet-master', 'k8s/kubelet_master')['kubelet-master'] config.connect(kubelet, {'k8s_version': 'k8s_version'}) calico = cr.create('calico-master', 'k8s/calico_master', {'options': "--nat-outgoing --ipip"})['calico-master'] master.connect(calico, {'ip': ['ip', 'etcd_host']}) config.connect(calico, {'network': 'network', 'prefix': 'prefix', 'calico_version': 'version'}) calico.connect(calico, {'etcd_host': 'etcd_authority', 'etcd_port': 'etcd_authority', 'etcd_authority': 'etcd_authority_internal'}) config.connect(kubelet, {'service_cluster_ip_range': "service_cluster_ip_range"}) master.connect(kubelet, {'name': 'master_host'}) kubelet.connect(kubelet, {'master_host': 'master_address', 'master_port': 'master_address'}) add_event(Dep(docker.name, 'run', 'success', kubelet.name, 'run')) add_event(Dep(kubelet.name, 'run', 'success', calico.name, 'run'))
def setup_resources(): ModelMeta.remove_all() node2 = cr.create('node2', 'resources/ro_node/', { 'ip': '10.0.0.4', 'ssh_key': '/vagrant/.vagrant/machines/solar-dev2/virtualbox/private_key', 'ssh_user': '******' })[0] solar_bootstrap2 = cr.create('solar_bootstrap2', 'resources/solar_bootstrap', {'master_ip': '10.0.0.2'})[0] signals.connect(node2, solar_bootstrap2) has_errors = False for r in locals().values(): if not isinstance(r, resource.Resource): continue print 'Validating {}'.format(r.name) errors = validation.validate_resource(r) if errors: has_errors = True print 'ERROR: %s: %s' % (r.name, errors) if has_errors: sys.exit(1)
def add_solar_agent(i): solar_agent_transport = cr.create( "solar_agent_transport%s" % i, "resources/transport_solar_agent", {"solar_agent_user": "******", "solar_agent_password": "******"}, )[0] transports = resource.load("transports%s" % i) ssh_transport = resource.load("ssh_transport%s" % i) transports_for_solar_agent = cr.create("transports_for_solar_agent%s" % i, "resources/transports")[0] # install solar_agent with ssh signals.connect(transports_for_solar_agent, solar_agent_transport, {}) signals.connect( ssh_transport, transports_for_solar_agent, {"key": "transports:key", "user": "******", "port": "transports:port", "name": "transports:name"}, ) # add solar_agent to transports on this node signals.connect( solar_agent_transport, transports, { "solar_agent_user": "******", "solar_agent_port": "transports:port", "solar_agent_password": "******", "name": "transports:name", }, )
def node(nobj): cr.create('fuel_node', 'f2s/fuel_node', { 'index': nobj.data['id'], 'ip': nobj.data['ip'] }, tags=['node%s' % nobj.data['id']])
def master(env): master = source.master() cr.create('master', 'vrs/fuel_node', {'index': master[0], 'ip': master[1]}) cr.create('genkeys', 'vrs/genkeys', { 'node': 'node'+master[0], 'index': int(env)})
def setup_riak(nodes_num=None, hosts_mapping=False): if nodes_num is None: nodes_num = NODES db.clear() resources = cr.create('nodes', 'templates/nodes', {'count': nodes_num}) nodes = [x for x in resources if x.name.startswith('node')] hosts_services = [x for x in resources if x.name.startswith('hosts_file')] riak_services = [] ips = '10.0.0.%d' for i in xrange(nodes_num): num = i + 1 r = cr.create('riak_service%d' % num, 'resources/riak_node', {'riak_self_name': 'riak%d' % num, 'riak_hostname': 'riak_server%d.solar' % num, 'riak_name': 'riak%d@riak_server%d.solar' % (num, num)})[0] riak_services.append(r) for i, riak in enumerate(riak_services): nodes[i].connect(riak) for i, riak in enumerate(riak_services[1:]): riak_services[0].connect(riak, {'riak_name': 'join_to'}) if hosts_mapping: for riak in riak_services: for hosts_file in hosts_services: riak.connect_with_events(hosts_file, {'riak_hostname': 'hosts:name', 'ip': 'hosts:ip'}) res_errors = resource.validate_resources() for r, error in res_errors: click.echo('ERROR: %s: %s' % (r.name, error)) has_errors = False if has_errors: click.echo("ERRORS") sys.exit(1) events = [] for x in xrange(nodes_num): i = x + 1 if hosts_mapping: events.append(Dep('hosts_file%d' % i, 'run', 'success', 'riak_service%d' % i, 'run')) if i >= 2: events.append(React('riak_service%d' % i, 'run', 'success', 'riak_service%d' % i, 'join')) events.append(React('riak_service%d' % i, 'join', 'success', 'riak_service1', 'commit')) for event in events: add_event(event) click.echo('Use solar changes process & orch') sys.exit(0)
def create_master(): master = source.master() try: resource.load('nodemaster') except solar.dblayer.model.DBLayerNotFound: cr.create('master', 'f2s/fuel_node', { 'index': master[0], 'ip': master[1] }, tags=['nodemaster'])
def setup_cinder_volume(node, cinder_puppet): # CINDER VOLUME cinder_volume = cr.create('cinder_volume_{}'.format(node.name), 'resources/volume_group', {'path': '/root/cinder.img', 'volume_name': 'cinder-volume'})[0] node.connect(cinder_volume) cinder_volume_puppet = cr.create('cinder_volume_puppet', 'resources/cinder_volume_puppet', {})[0] node.connect(cinder_volume_puppet) cinder_puppet.connect(cinder_volume_puppet) evapi.add_react(cinder_puppet.name, cinder_volume_puppet.name, actions=('update',)) cinder_volume.connect(cinder_volume_puppet, {'volume_name': 'volume_group'}) return {'cinder_volume_puppet': cinder_volume_puppet}
def setup_master(): config = cr.create('kube-config', 'k8s/global_config', { 'cluster_dns': '10.254.0.10', 'cluster_domain': 'cluster.local' })[0] master = cr.create( 'kube-node-master', 'k8s/node', { 'name': 'kube-node-master', 'ip': '10.0.0.3', 'ssh_user': '******', 'ssh_password': '******', 'ssh_key': None })['kube-node-master'] # etcd = cr.create('etcd', 'k8s/etcd', {'listen_client_port': 4001})['etcd'] # master.connect(etcd, {'name': 'listen_client_host'}) # etcd.connect(etcd, {'listen_client_host': 'listen_client_url', # 'listen_client_port': 'listen_client_url'}) # # 'listen_client_port_events': 'listen_client_url_events', # # 'listen_client_host': 'listen_client_url_events'}) master.connect(config, {}) docker = cr.create('kube-docker-master', 'k8s/docker')['kube-docker-master'] master.connect(docker, {}) kubelet = cr.create('kubelet-master', 'k8s/kubelet_master')['kubelet-master'] calico = cr.create('calico-master', 'k8s/calico_master', {'options': "--nat-outgoing --ipip"})['calico-master'] master.connect(calico, {'ip': ['ip', 'etcd_host']}) config.connect(calico, {'network': 'network', 'prefix': 'prefix'}) calico.connect( calico, { 'etcd_host': 'etcd_authority', 'etcd_port': 'etcd_authority', 'etcd_authority': 'etcd_authority_internal' }) config.connect(kubelet, {'service_cluster_ip_range': "service_cluster_ip_range"}) master.connect(kubelet, {'name': 'master_host'}) kubelet.connect(kubelet, { 'master_host': 'master_address', 'master_port': 'master_address' }) add_event(Dep(docker.name, 'run', 'success', kubelet.name, 'run')) add_event(Dep(kubelet.name, 'run', 'success', calico.name, 'run'))
def test_create_resource(): node_path = os.path.join( os.path.dirname(os.path.realpath(__file__)), 'resource_fixtures', 'node') resources = cr.create('node1', node_path) assert len(resources) == 1 assert resources[0].name == 'node1'
def run(): ModelMeta.remove_all() resources = cr.create('nodes', 'templates/nodes', {'count': 2}) node1, node2 = [x for x in resources if x.name.startswith('node')] hosts1, hosts2 = [x for x in resources if x.name.startswith('hosts_file')] node1.connect(hosts1, { 'name': 'hosts:name', 'ip': 'hosts:ip', }) node2.connect(hosts1, { 'name': 'hosts:name', 'ip': 'hosts:ip', }) node1.connect(hosts2, { 'name': 'hosts:name', 'ip': 'hosts:ip', }) node2.connect(hosts2, { 'name': 'hosts:name', 'ip': 'hosts:ip', })
def setup_cinder_scheduler(node, cinder_puppet): # CINDER SCHEDULER cinder_scheduler_puppet = cr.create('cinder_scheduler_puppet', 'resources/cinder_scheduler_puppet', {})[0] node.connect(cinder_scheduler_puppet) cinder_puppet.connect(cinder_scheduler_puppet) evapi.add_react(cinder_puppet.name, cinder_scheduler_puppet.name, actions=('update',)) return {'cinder_scheduler_puppet': cinder_scheduler_puppet}
def create(cls, count, resource_path, name='{resource_path_name}-{num}', args=None): """Create a number of resources of the same type, with optional args. name -- optional resource name args -- an optional dict with create arguments. You can use "{num}" -- index of resource in the list "{resource_path_name}" -- name of resource from the `resource_path` argument """ args = args or {} created_resources = [] resource_path_name = os.path.split(resource_path)[-1] for num in range(count): kwargs = { 'num': num, 'resource_path_name': resource_path_name, } kwargs['name'] = name.format(**kwargs) args_fmt = cls.args_fmt(args, kwargs) r = cr.create(kwargs['name'], resource_path, args_fmt)[0] created_resources.append(r) return ResourceListTemplate(created_resources)
def setup_openrc(node, keystone_puppet, admin_user): # OPENRC openrc = cr.create('openrc_file', 'resources/openrc_file', {})[0] node.connect(openrc) keystone_puppet.connect(openrc, {'ip': 'keystone_host', 'admin_port':'keystone_port'}) admin_user.connect(openrc, {'user_name': 'user_name','user_password':'******', 'tenant_name': 'tenant'}) return {'openrc_file' : openrc}
def setup_nova_conductor(node, nova_puppet, nova_api_puppet): # NOVA CONDUCTOR nova_conductor_puppet = cr.create('nova_conductor_puppet', 'resources/nova_conductor_puppet', {})[0] node.connect(nova_conductor_puppet) nova_puppet.connect(nova_conductor_puppet) evapi.add_dep(nova_api_puppet.name, nova_conductor_puppet.name, actions=('run',)) evapi.add_react(nova_puppet.name, nova_conductor_puppet.name, actions=('update',)) return {'nova_conductor': nova_conductor_puppet}
def add_dns(args, *_): config = rs.load('kube-config') kube_master = rs.load(MASTER_NODE_RESOURCE_NAME) master = rs.load('kubelet-master') kube_dns = cr.create('kube-dns', 'k8s/kubedns', {})[0] master.connect(kube_dns, {'master_port': 'api_port'}) kube_master.connect(kube_dns, {'ip': 'api_host'}) config.connect(kube_dns, {'cluster_domain': 'cluster_domain', 'cluster_dns': 'cluster_dns'})
def setup_master(config, user_config): master = cr.create( 'kube-node-master', 'k8s/node', { 'name': 'kube-node-master', 'ip': user_config['ip'], 'ssh_user': user_config['username'], 'ssh_password': user_config['password'], 'ssh_key': user_config['ssh_key'] })['kube-node-master'] master.connect(config, {}) docker = cr.create('kube-docker-master', 'k8s/docker')['kube-docker-master'] master.connect(docker, {}) kubelet = cr.create('kubelet-master', 'k8s/kubelet_master')['kubelet-master'] config.connect(kubelet, {'k8s_version': 'k8s_version'}) calico = cr.create('calico-master', 'k8s/calico_master', {'options': "--nat-outgoing --ipip"})['calico-master'] master.connect(calico, {'ip': ['ip', 'etcd_host']}) config.connect(calico, { 'network': 'network', 'prefix': 'prefix', 'calico_version': 'version' }) calico.connect( calico, { 'etcd_host': 'etcd_authority', 'etcd_port': 'etcd_authority', 'etcd_authority': 'etcd_authority_internal' }) config.connect(kubelet, {'service_cluster_ip_range': "service_cluster_ip_range"}) master.connect(kubelet, {'name': 'master_host'}) kubelet.connect(kubelet, { 'master_host': 'master_address', 'master_port': 'master_address' }) add_event(Dep(docker.name, 'run', 'success', kubelet.name, 'run')) add_event(Dep(kubelet.name, 'run', 'success', calico.name, 'run'))
def test_update(tmpdir): # XXX: make helper for it base_path = os.path.join( os.path.dirname(os.path.realpath(__file__)), 'resource_fixtures') vr_node_tmpl_path = os.path.join(base_path, 'nodes.yaml.tmpl') vr_update_tmpl_path = os.path.join(base_path, 'update.yaml.tmpl') update_path = os.path.join(base_path, 'update') node_resource_path = os.path.join(base_path, 'node') with open(vr_node_tmpl_path) as f: vr_data = f.read().format(resource_path=node_resource_path) with open(vr_update_tmpl_path) as f: update_data = f.read().format(resource_path=update_path) vr_file = tmpdir.join('nodes.yaml') vr_file.write(vr_data) update_file = tmpdir.join('update.yaml') update_file.write(update_data) resources = cr.create('nodes', str(vr_file)) cr.create('updates', str(update_file)) assert resources[0].args['ip'] == '10.0.0.4'
def setup_cinder_api(node, cinder_puppet): # CINDER API cinder_api_puppet = cr.create('cinder_api_puppet', 'resources/cinder_api_puppet', {})[0] node.connect(cinder_api_puppet) cinder_puppet.connect(cinder_api_puppet, { 'keystone_password', 'keystone_tenant', 'keystone_user'}) cinder_puppet.connect(cinder_api_puppet, { 'keystone_host': 'keystone_auth_host', 'keystone_port': 'keystone_auth_port'}) evapi.add_react(cinder_puppet.name, cinder_api_puppet.name, actions=('update',)) return {'cinder_api_puppet': cinder_api_puppet}
def add_dns(args, *_): config = rs.load('kube-config') kube_master = rs.load('kube-node-master') master = rs.load('kubelet-master') kube_dns = cr.create('kube-dns', 'k8s/kubedns', {})[0] master.connect(kube_dns, {'master_port': 'api_port'}) kube_master.connect(kube_dns, {'ip': 'api_host'}) config.connect(kube_dns, { 'cluster_domain': 'cluster_domain', 'cluster_dns': 'cluster_dns' })
def test_setting_location(tmpdir): # XXX: make helper for it base_path = os.path.join( os.path.dirname(os.path.realpath(__file__)), 'resource_fixtures') vr_node_tmpl_path = os.path.join(base_path, 'nodes.yaml.tmpl') vr_location_tmpl_path = os.path.join(base_path, 'with_location.yaml.tmpl') base_service_path = os.path.join(base_path, 'base_service') node_resource_path = os.path.join(base_path, 'node') with open(vr_node_tmpl_path) as f: vr_data = f.read().format(resource_path=node_resource_path) with open(vr_location_tmpl_path) as f: location_data = f.read().format(resource_path=base_service_path) vr_file = tmpdir.join('nodes.yaml') vr_file.write(vr_data) location_file = tmpdir.join('with_location.yaml') location_file.write(location_data) cr.create('nodes', str(vr_file)) resources = cr.create('updates', str(location_file)) assert 'location=node1' in resources[0].tags
def setup_nova_scheduler(node, nova_puppet, nova_api_puppet): # NOVA SCHEDULER # NOTE(bogdando) Generic service is used. Package and service names for Ubuntu case # come from https://github.com/openstack/puppet-nova/blob/5.1.0/manifests/params.pp nova_scheduler_puppet = cr.create('nova_scheduler_puppet', 'resources/nova_generic_service_puppet', { 'title' : 'scheduler', 'package_name': 'nova-scheduler', 'service_name': 'nova-scheduler', })[0] node.connect(nova_scheduler_puppet) evapi.add_dep(nova_puppet.name, nova_scheduler_puppet.name, actions=('run',)) evapi.add_dep(nova_api_puppet.name, nova_scheduler_puppet.name, actions=('run',)) evapi.add_react(nova_puppet.name, nova_scheduler_puppet.name, actions=('update',)) return {'nova_scheduler_puppet': nova_scheduler_puppet}
def setup_master(): master = cr.create('kube-node-master', 'k8s/node', {'name': 'kube-node-master', 'ip': '10.0.0.3', 'ssh_user': '******', 'ssh_password': '******', 'ssh_key': None})['kube-node-master'] etcd = cr.create('etcd', 'k8s/etcd', {'listen_client_port': 4001})['etcd'] master.connect(etcd, {'name': 'listen_client_host'}) etcd.connect(etcd, {'listen_client_host': 'listen_client_url', 'listen_client_port': 'listen_client_url'}) kubernetes = cr.create('k8s-master', 'k8s/kubernetes', {'master_port': 8080})['k8s-master'] master.connect(kubernetes, {'name': 'master_host'}) etcd.connect(kubernetes, {'listen_client_url': 'etcd_servers'}) kubernetes.connect(kubernetes, {'master_port': 'master_address', 'master_host': 'master_address'}) calico = cr.create('calico-master', 'k8s/calico', {})['calico-master'] master.connect(calico, {'ip': 'ip'}) etcd.connect(calico, {'listen_client_url': 'etcd_authority'}) calico.connect(calico, {'etcd_authority': 'etcd_authority_internal'})
def add_solar_agent(i): solar_agent_transport = cr.create('solar_agent_transport%s' % i, 'resources/transport_solar_agent', {'solar_agent_user': '******', 'solar_agent_password': '******'})[0] transports = resource.load('transports%s' % i) ssh_transport = resource.load('ssh_transport%s' % i) transports_for_solar_agent = cr.create('transports_for_solar_agent%s' % i, 'resources/transports')[0] # install solar_agent with ssh signals.connect(transports_for_solar_agent, solar_agent_transport, {}) signals.connect(ssh_transport, transports_for_solar_agent, {'ssh_key': 'transports:key', 'ssh_user': '******', 'ssh_port': 'transports:port', 'name': 'transports:name'}) # add solar_agent to transports on this node signals.connect(solar_agent_transport, transports, {'solar_agent_user': '******', 'solar_agent_port': 'transports:port', 'solar_agent_password': '******', 'name': 'transports:name'})
def test_create_from_composer_file(tmpdir): base_path = os.path.join( os.path.dirname(os.path.realpath(__file__)), 'resource_fixtures') vr_tmpl_path = os.path.join(base_path, 'nodes.yaml.tmpl') node_resource_path = os.path.join(base_path, 'node') with open(vr_tmpl_path) as f: vr_data = f.read().format(resource_path=node_resource_path) vr_file = tmpdir.join('nodes.yaml') vr_file.write(vr_data) resources = cr.create('nodes', str(vr_file)) assert len(resources) == 2
def setup_slave_node(config, user_config, kubernetes_master, calico_master, internal_network, i): j = i + 1 kube_node = cr.create( 'kube-node-%d' % j, 'k8s/node', {'name': 'kube-node-%d' % j, 'ip': get_free_slave_ip(user_config['ips']), 'ssh_user': user_config['username'], 'ssh_password': user_config['password'], 'ssh_key': user_config['ssh_key']} )['kube-node-%d' % j] iface_node = cr.create( 'kube-node-%d-iface' % j, 'k8s/virt_iface', {'name': 'cbr0', 'ipaddr': str(internal_network + 256 * j + 1), 'onboot': 'yes', 'bootproto': 'static', 'type': 'Bridge'})['kube-node-%d-iface' % j] kube_node.connect(iface_node, {}) config.connect(iface_node, {'netmask': 'netmask'}) calico_node = cr.create('calico-node-%d' % j, 'k8s/calico', {})[0] kube_node.connect(calico_node, {'ip': 'ip'}) config.connect(calico_node, {'calico_version': 'version'}) calico_master.connect(calico_node, {'etcd_authority': 'etcd_authority'}) calico_node.connect(calico_node, { 'etcd_authority': 'etcd_authority_internal' }) calico_cni = cr.create('calico-cni-node-%d' % j, 'k8s/cni', {})[0] calico_node.connect(calico_cni, {'etcd_authority_internal': 'etcd_authority'}) docker = cr.create('kube-docker-%d' % j, 'k8s/docker')['kube-docker-%d' % j] kube_node.connect(docker, {}) iface_node.connect(docker, {'name': 'iface'}) kubelet = cr.create('kubelet-node-%d' % j, 'k8s/kubelet', { 'kubelet_args': '--v=5', })['kubelet-node-%d' % j] kube_node.connect(kubelet, {'name': 'kubelet_hostname'}) kubernetes_master.connect(kubelet, {'master_address': 'master_api'}) config.connect(kubelet, {'cluster_domain': 'cluster_domain', 'cluster_dns': 'cluster_dns', 'k8s_version': 'k8s_version'}) add_event(Dep(docker.name, 'run', 'success', calico_node.name, 'run')) add_event(Dep(docker.name, 'run', 'success', kubelet.name, 'run')) add_event(Dep(calico_node.name, 'run', 'success', kubelet.name, 'run')) return kube_node
def setup_neutron_compute(node, librarian, neutron_puppet, neutron_server_puppet): # NEUTRON FOR COMPUTE (node1) # Deploy chain neutron -> (plugins) -> ( agents ) name = node.name neutron_puppet2 = cr.create('neutron_puppet_{}'.format(name), 'resources/neutron_puppet', {})[0] neutron_puppet2.connect_with_events(librarian, {'module': 'modules'}, {}) evapi.add_dep(librarian.name, neutron_puppet2.name, actions=('run', 'update')) dep = evapi.Dep(librarian.name, 'update', state='SUCESS', child=neutron_puppet2.name, child_action='run') evapi.add_event(dep) node.connect(neutron_puppet2) neutron_puppet.connect(neutron_puppet2, { 'rabbit_host', 'rabbit_port', 'rabbit_user', 'rabbit_password', 'rabbit_virtual_host', 'package_ensure', 'core_plugin', }) # NEUTRON OVS PLUGIN & AGENT WITH GRE FOR COMPUTE (node1) neutron_plugins_ml22 = cr.create('neutron_plugins_ml_{}'.format(name), 'resources/neutron_plugins_ml2_puppet', {})[0] node.connect(neutron_plugins_ml22) evapi.add_dep(neutron_puppet2.name, neutron_plugins_ml22.name, actions=('run',)) evapi.add_dep(neutron_server_puppet.name, neutron_plugins_ml22.name, actions=('run',)) neutron_agents_ml22 = cr.create('neutron_agents_ml_{}'.format(name), 'resources/neutron_agents_ml2_ovs_puppet', { # TODO(bogdando) these should come from the node network resource 'enable_tunneling': True, 'tunnel_types': ['gre'], 'local_ip': '10.1.0.14' # Should be the IP addr of the br-mesh int. })[0] node.connect(neutron_agents_ml22) evapi.add_dep(neutron_puppet2.name, neutron_agents_ml22.name, actions=('run',)) evapi.add_dep(neutron_server_puppet.name, neutron_agents_ml22.name, actions=('run',)) return {'neutron_puppet2': neutron_puppet2, 'neutron_plugins_ml22': neutron_plugins_ml22, 'neutron_agents_ml22': neutron_agents_ml22}
def setup_slave_node(config, user_config, kubernetes_master, calico_master, internal_network, i): j = i + 1 kube_node = cr.create( 'kube-node-%d' % j, 'k8s/node', { 'name': 'kube-node-%d' % j, 'ip': get_free_slave_ip(user_config['ips']), 'ssh_user': user_config['username'], 'ssh_password': user_config['password'], 'ssh_key': user_config['ssh_key'] })['kube-node-%d' % j] iface_node = cr.create( 'kube-node-%d-iface' % j, 'k8s/virt_iface', { 'name': 'cbr0', 'ipaddr': str(internal_network + 256 * j + 1), 'onboot': 'yes', 'bootproto': 'static', 'type': 'Bridge' })['kube-node-%d-iface' % j] kube_node.connect(iface_node, {}) config.connect(iface_node, {'netmask': 'netmask'}) calico_node = cr.create('calico-node-%d' % j, 'k8s/calico', {})[0] kube_node.connect(calico_node, {'ip': 'ip'}) config.connect(calico_node, {'calico_version': 'version'}) calico_master.connect(calico_node, {'etcd_authority': 'etcd_authority'}) calico_node.connect(calico_node, {'etcd_authority': 'etcd_authority_internal'}) calico_cni = cr.create('calico-cni-node-%d' % j, 'k8s/cni', {})[0] calico_node.connect(calico_cni, {'etcd_authority_internal': 'etcd_authority'}) docker = cr.create('kube-docker-%d' % j, 'k8s/docker')['kube-docker-%d' % j] kube_node.connect(docker, {}) iface_node.connect(docker, {'name': 'iface'}) kubelet = cr.create('kubelet-node-%d' % j, 'k8s/kubelet', { 'kubelet_args': '--v=5', })['kubelet-node-%d' % j] kube_node.connect(kubelet, {'name': 'kubelet_hostname'}) kubernetes_master.connect(kubelet, {'master_address': 'master_api'}) config.connect( kubelet, { 'cluster_domain': 'cluster_domain', 'cluster_dns': 'cluster_dns', 'k8s_version': 'k8s_version' }) add_event(Dep(docker.name, 'run', 'success', calico_node.name, 'run')) add_event(Dep(docker.name, 'run', 'success', kubelet.name, 'run')) add_event(Dep(calico_node.name, 'run', 'success', kubelet.name, 'run')) return kube_node
def setup_nova_api(node, nova_puppet, neutron_agents_metadata): # NOVA API nova_api_puppet = cr.create('nova_api_puppet', 'resources/nova_api_puppet', {})[0] node.connect(nova_api_puppet) nova_puppet.connect(nova_api_puppet, { 'keystone_tenant': 'admin_tenant_name', 'keystone_user': '******', 'keystone_password': '******', 'keystone_host': 'auth_host', 'keystone_port': 'auth_port'}) evapi.add_react(nova_puppet.name, nova_api_puppet.name, actions=('update',)) nova_api_puppet.connect(neutron_agents_metadata, {'ip': 'metadata_ip'}) return {'nova_api_puppet': nova_api_puppet}
def test_create_from_composer_file_with_dict(tmpdir): base_path = os.path.join( os.path.dirname(os.path.realpath(__file__)), 'resource_fixtures') vr_tmpl_path = os.path.join(base_path, 'resource_with_dict.yaml.tmpl') base_resource_path = os.path.join(base_path, 'base_service') with open(vr_tmpl_path) as f: vr_data = f.read().format(resource_path=base_resource_path) vr_file = tmpdir.join('base.yaml') vr_file.write(vr_data) resources = cr.create('base', str(vr_file)) assert len(resources) == 1 res = resources[0] assert res.args['servers'] == {'a': 1, 'b': 2}
def create(args, base_path, name): if base_path.startswith('./'): base_path = os.path.abspath(base_path) elif base_path.endswith('.yaml'): base_path = os.path.abspath(base_path) args_parsed = {} click.echo('create {} {} {}'.format(name, base_path, args)) for arg in args: try: args_parsed.update(json.loads(arg)) except ValueError: k, v = arg.split('=') args_parsed.update({k: v}) resources = cr.create(name, base_path, inputs=args_parsed) for res in resources: click.echo(res.color_repr())
def setup_glance_registry(node, glance_api_puppet): # GLANCE REGISTRY glance_registry_puppet = cr.create('glance_registry_puppet', 'resources/glance_registry_puppet', {})[0] node.connect(glance_registry_puppet) glance_api_puppet.connect(glance_registry_puppet) evapi.add_react(glance_api_puppet.name, glance_registry_puppet.name, actions=('update',)) # API and registry should not listen same ports # should not use the same log destination and a pipeline, # so disconnect them and restore the defaults signals.disconnect_receiver_by_input(glance_registry_puppet, 'bind_port') signals.disconnect_receiver_by_input(glance_registry_puppet, 'log_file') signals.disconnect_receiver_by_input(glance_registry_puppet, 'pipeline') glance_registry_puppet.update({ 'bind_port': 9191, 'log_file': '/var/log/glance/registry.log', 'pipeline': 'keystone', }) return {'glance_registry_puppet': glance_registry_puppet}
def test_correct_types(tmpdir): base_path = os.path.join( os.path.dirname(os.path.realpath(__file__)), 'resource_fixtures') vr_tmpl_path = os.path.join(base_path, 'types.yaml.tmpl') sub_resource_path = os.path.join(base_path, 'type.yaml.tmpl') target_resource_path = os.path.join(base_path, 'types_test') with open(sub_resource_path) as f: vr_sub_data = f.read() vr_sub_data = vr_sub_data.replace('{type_path}', target_resource_path) vr_sub_file = tmpdir.join('type.yaml') vr_sub_file.write(vr_sub_data) with open(vr_tmpl_path) as f: vr_data = f.read().format(sub_path=str(vr_sub_file)) vr_file = tmpdir.join('types.yaml') vr_file.write(vr_data) resource = cr.create('types', str(vr_file))[0] exps = (('as_int', int), ('as_string', str), ('as_null', NoneType)) for name, exp in exps: assert isinstance(resource.args[name], exp)
def nodes_from(template_path): """Return ResourceListTemplate for nodes read from template_path.""" nodes = cr.create('nodes', template_path, {}) return ResourceListTemplate(nodes)
def add_dashboard(args, *_): kube_master = rs.load('kube-node-master') master = rs.load('kubelet-master') dashboard = cr.create('kubernetes-dashboard', 'k8s/dashboard', {})[0] master.connect(dashboard, {'master_port': 'api_port'}) kube_master.connect(dashboard, {'ip': 'api_host'})
def create_config(dns_config): return cr.create('kube-config', 'k8s/global_config', { 'cluster_dns': dns_config['ip'], 'cluster_domain': dns_config['domain'] })[0]
def setup_nodes(num=1): kube_nodes = [] kubernetes_master = rs.load('kubelet-master') calico_master = rs.load('calico-master') config = rs.load('kube-config') for i in xrange(num): j = i + 1 kube_node = cr.create( 'kube-node-%d' % j, 'k8s/node', { 'name': 'kube-node-%d' % j, 'ip': '10.0.0.%d' % (3 + j), 'ssh_user': '******', 'ssh_password': '******', 'ssh_key': None })['kube-node-%d' % j] iface_node = cr.create( 'kube-node-%d-iface' % j, 'k8s/virt_iface', { 'name': 'cbr0', 'ipaddr': '172.20.%d.1' % (i + 1), # TODO(jnowak) support config for it 'onboot': 'yes', 'bootproto': 'static', 'type': 'Bridge' })['kube-node-%d-iface' % j] kube_node.connect(iface_node, {}) config.connect(iface_node, {'netmask': 'netmask'}) calico_node = cr.create('calico-node-%d' % j, 'k8s/calico', {})[0] kube_node.connect(calico_node, {'ip': 'ip'}) calico_master.connect(calico_node, {'etcd_authority': 'etcd_authority'}) calico_node.connect(calico_node, {'etcd_authority': 'etcd_authority_internal'}) calico_cni = cr.create('calico-cni-node-%d' % j, 'k8s/cni', {})[0] calico_node.connect(calico_cni, {'etcd_authority_internal': 'etcd_authority'}) docker = cr.create('kube-docker-%d' % j, 'k8s/docker')['kube-docker-%d' % j] kube_node.connect(docker, {}) iface_node.connect(docker, {'name': 'iface'}) kubelet = cr.create('kubelet-node-%d' % j, 'k8s/kubelet', { 'kubelet_args': '--v=5', })['kubelet-node-%d' % j] kube_node.connect(kubelet, {'name': 'kubelet_hostname'}) kubernetes_master.connect(kubelet, {'master_address': 'master_api'}) config.connect(kubelet, { 'cluster_domain': 'cluster_domain', 'cluster_dns': 'cluster_dns' }) add_event(Dep(docker.name, 'run', 'success', calico_node.name, 'run')) add_event(Dep(docker.name, 'run', 'success', kubelet.name, 'run')) add_event(Dep(calico_node.name, 'run', 'success', kubelet.name, 'run')) kube_nodes.append(kube_node) kube_master = rs.load('kube-node-master') all_nodes = kube_nodes[:] + [kube_master] hosts_files = rs.load_all(startswith='hosts_file_node_kube-') for node in all_nodes: for host_file in hosts_files: node.connect(host_file, {'name': 'hosts:name', 'ip': 'hosts:ip'})
def resources(request, sequence_vr): scale = request.getfuncargvalue('scale') for idx in range(scale): composer.create('sequence_%s' % idx, sequence_vr, inputs={'idx': idx})
def test_create_path_does_not_exists(): with pytest.raises(Exception) as excinfo: cr.create('node1', '/path/does/not/exists') assert excinfo.filename == '/path/does/not/exists'
def create_resource(self, name, src, args=None): args = args or {} return cr.create(name, src, inputs=args)[0]