def install_dcluster(sandbox_dir): # e.g. /etc/dcluster/config.yml sandboxed_config_file = install_production_config(sandbox_dir) # for debugging/testing purposes print(sandboxed_config_file) # install templates and ansible_static folders install_source_artifact('templates', main_config.paths('templates')) install_source_artifact('ansible_static', main_config.paths('ansible_static')) install_source_artifact('bootstrap', main_config.paths('bootstrap')) # install cluster profiles, note that the entry is a list # assuming the first element has the dcluster-supplied path install_source_artifact('config/profiles', main_config.paths('profiles')[0])
def configure_parser(create_parser): ''' Configure argument parser for create subcommand. ''' create_parser.add_argument('cluster_name', help='name of the virtual cluster') create_parser.add_argument('compute_count', help='number of compute nodes in cluster') # optional arguments help_msg = 'cluster profile, see configuration file (default: %(default)s)' create_parser.add_argument('-p', '--profile', default='simple', help=help_msg) msg = 'directory where cluster files are created (default: %(default)s)' create_parser.add_argument('--workpath', help=msg, default=main_config.paths('work')) msg = 'additional directories with profiles in YAML files (can be specified multiple times)' create_parser.add_argument('--profile-path', help=msg, action='append') msg = 'run these ansible playbooks immediately after creating the cluster (list)' create_parser.add_argument('--playbooks', help=msg, nargs='+') msg = 'extra-vars passed to ansible-playbook as-is' create_parser.add_argument('-e', '--extra-vars', help=msg, nargs='+') # default function to call create_parser.set_defaults(func=process_cli_call)
def get_renderer(creation_request): ''' Used for template rendering. ''' # only one type for now templates_dir = main_config.paths('templates') return JinjaRenderer(templates_dir)
def create_default_cluster(creation_request): ''' Creates a new default cluster, the request must have: - name - compute_count - profile other optional arguments: - playbooks - extra_vars_list ''' # ensure that user-specified profile paths exist before attempting anything fs_util.check_directories_exist(creation_request.profile_paths) # go ahead and create the network using Docker cluster_network = networking.create(creation_request.name) # develop the cluster plan given request cluster_plan = cluster.create_plan(creation_request, cluster_network) # get the blueprints with plans for all nodes cluster_blueprints = cluster_plan.create_blueprints() # deploy the cluster renderer = runtime.get_renderer(creation_request) composer_workpath = main_config.composer_workpath(creation_request.name) deployer = runtime.DockerComposeDeployer(composer_workpath) cluster_blueprints.deploy(renderer, deployer) # create the Ansible inventory now, too hard later cluster_name = creation_request.name inventory_workpath = dansible_config.inventory_workpath(cluster_name) (_, inventory_file) = dansible.create_inventory(cluster_blueprints.as_dict(), inventory_workpath) # show newly created live_cluster = display.show_cluster(creation_request.name) if main_config.prefs('inject_ssh_public_keys_to_root'): # inject SSH public key to all containers for password-less SSH # public_key_with_shell_vars = main_config.paths('ssh_public_key') # public_key_path = fs_util.evaluate_shell_path(public_key_with_shell_vars) public_key_paths = main_config.paths('ssh_public_keys') for public_key_path in public_key_paths: live_cluster.inject_public_ssh_key(public_key_path) # fix for containers running /sbin/init live_cluster.fix_init_if_needed() # run requested Ansible playbooks with optional extra vars for playbook in creation_request.playbooks: dansible.run_playbook(cluster_name, playbook, inventory_file, creation_request.extra_vars_list)
def test_config_composer_workpath(self): # given cluster_name = 'mycluster' workpath = main_config.paths('work') # when composer_workpath = main_config.composer_workpath(cluster_name) # then expected = os.path.join(workpath, 'clusters/mycluster') self.assertEqual(composer_workpath, expected)
def get_workpath(args): ''' Set the work path of dcluster. By default, it is set by the configuration (paths:work), but it can be overridden by the user using the --workpath optional variable. ''' log = logger.logger_for_me(get_workpath) if args.workpath is not None: workpath = args.workpath else: # work_path_with_shell_variables = main_config.paths('work') # log.debug('workpath shell %s' % work_path_with_shell_variables) # workpath = fs_util.evaluate_shell_path(work_path_with_shell_variables) workpath = main_config.paths('work') # create the directory, it may not exist but we need it now log.debug('workpath %s' % workpath) fs_util.create_dir_dont_complain(workpath) return workpath
def build_specs(self): ''' Creates a dictionary of node specs that will be used for the cluster blueprint. A 'basic' cluster always has a single head, and zero or more compute nodes, depending on compute_count. Example output: for compute_count = 3, add a head node and 3 compute nodes: cluster_specs = { 'profile': 'basic', 'name': 'mycluster', 'nodes': { '172.30.0.253': BasicPlannedNode( hostname='head', container='mycluster-head', image='centos7:ssh', ip_address='172.30.0.253', role='head'), '172.30.0.1': BasicPlannedNode( hostname='node001', container='mycluster-node001', image='centos7:ssh', ip_address='172.30.0.1', role='compute'), '172.30.0.2': BasicPlannedNode( hostname='node002', container='mycluster-node002', image='centos7:ssh', ip_address='172.30.0.2', role='compute'), '172.30.0.3': BasicPlannedNode( hostname='node003', container='mycluster-node003', image='centos7:ssh', ip_address='172.30.0.3', role='compute') }, 'network': { 'name': 'dcluster-mycluster', 'address': '172.30.0.0/24', 'subnet': '172.30.0.0', 'gateway': 'gateway', 'gateway_ip': '172.30.0.254' }, 'template': 'cluster-basic.yml.j2' 'volumes': - 'my_first_volume' - 'my_second_volume' } ''' plan_data = self.plan_data cluster_network = self.cluster_network node_planner = self.node_planner # initialize with name, template, network cluster_specs = collection_util.defensive_subset( plan_data, ('profile', 'name', 'template')) cluster_specs['network'] = cluster_network.as_dict() # always have a head head_plan = node_planner.create_head_plan(plan_data) cluster_specs['nodes'] = {head_plan.ip_address: head_plan} # create <compute_count> nodes compute_ips = cluster_network.compute_ips(plan_data['compute_count']) for index, compute_ip in enumerate(compute_ips): compute_plan = node_planner.create_compute_plan( plan_data, index, compute_ip) cluster_specs['nodes'][compute_plan.ip_address] = compute_plan # will be used to set the entrypoint to the injected bootstrap script cluster_specs['bootstrap_dir'] = main_config.paths('bootstrap') self.__handle_volume_specs(cluster_specs) return cluster_specs
def setUp(self): self.resources = test_resources.ResourcesForTest() self.maxDiff = None templates_dir = main_config.paths('templates') self.renderer = render.JinjaRenderer(templates_dir)