def create_cluster(values): ctx = context.ctx() cluster = conductor.cluster_create(ctx, values) plugin = plugin_base.PLUGINS.get_plugin(cluster.plugin_name) # validating cluster try: cluster = conductor.cluster_update(ctx, cluster, {"status": "Validating"}) LOG.info(g.format_cluster_status(cluster)) plugin.validate(cluster) except Exception as e: with excutils.save_and_reraise_exception(): cluster = conductor.cluster_update(ctx, cluster, {"status": "Error", "status_description": str(e)}) LOG.info(g.format_cluster_status(cluster)) context.spawn("cluster-creating-%s" % cluster.id, _provision_cluster, cluster.id) if CONF.use_identity_api_v3 and cluster.is_transient: trusts.create_trust(cluster) return conductor.cluster_get(ctx, cluster.id)
def scale_cluster(id, data): ctx = context.ctx() cluster = conductor.cluster_get(ctx, id) plugin = plugin_base.PLUGINS.get_plugin(cluster.plugin_name) existing_node_groups = data.get("resize_node_groups", []) additional_node_groups = data.get("add_node_groups", []) # the next map is the main object we will work with # to_be_enlarged : {node_group_id: desired_amount_of_instances} to_be_enlarged = {} for ng in existing_node_groups: ng_id = g.find(cluster.node_groups, name=ng["name"])["id"] to_be_enlarged.update({ng_id: ng["count"]}) additional = construct_ngs_for_scaling(cluster, additional_node_groups) try: cluster = conductor.cluster_update(ctx, cluster, {"status": "Validating"}) LOG.info(g.format_cluster_status(cluster)) plugin.validate_scaling(cluster, to_be_enlarged, additional) except Exception: with excutils.save_and_reraise_exception(): i.clean_cluster_from_empty_ng(cluster) cluster = conductor.cluster_update(ctx, cluster, {"status": "Active"}) LOG.info(g.format_cluster_status(cluster)) # If we are here validation is successful. # So let's update to_be_enlarged map: to_be_enlarged.update(additional) context.spawn("cluster-scaling-%s" % id, _provision_nodes, id, to_be_enlarged) return conductor.cluster_get(ctx, id)
def scale_cluster(cluster_id, data): cluster = get_cluster(id=cluster_id) plugin = plugin_base.PLUGINS.get_plugin(cluster.plugin_name) existing_node_groups = data.get("resize_node_groups", []) additional_node_groups = data.get("add_node_groups", []) # the next map is the main object we will work with # to_be_enlarged : {node_group_name: desired_amount_of_instances} to_be_enlarged = {} for ng in existing_node_groups: to_be_enlarged.update({ng["name"]: ng["count"]}) additional = construct_ngs_for_scaling(additional_node_groups) try: context.model_update(cluster, status="Validating") plugin.validate_scaling(cluster, to_be_enlarged, additional) except Exception: with excutils.save_and_reraise_exception(): context.model_update(cluster, status="Active") # If we are here validation is successful. # So let's update bd and to_be_enlarged map: for add_n_g in additional: cluster.node_groups.append(add_n_g) to_be_enlarged.update({add_n_g.name: additional[add_n_g]}) context.model_save(cluster) context.spawn(_provision_nodes, cluster_id, to_be_enlarged) return cluster
def execute_job(job_id, data): # Elements common to all job types cluster_id = data['cluster_id'] configs = data.get('job_configs', {}) ctx = context.current() cluster = conductor.cluster_get(ctx, cluster_id) plugin = plugin_base.PLUGINS.get_plugin(cluster.plugin_name) instance = plugin.get_oozie_server(cluster) extra = {} info = None if CONF.use_namespaces and not CONF.use_floating_ips: info = instance.remote().get_neutron_info() extra['neutron'] = info # Not in Java job types but present for all others input_id = data.get('input_id', None) output_id = data.get('output_id', None) # Since we will use a unified class in the database, we pass # a superset for all job types job_ex_dict = {'input_id': input_id, 'output_id': output_id, 'job_id': job_id, 'cluster_id': cluster_id, 'info': {'status': 'Pending'}, 'job_configs': configs, 'extra': extra} job_execution = conductor.job_execution_create(context.ctx(), job_ex_dict) context.spawn("Starting Job Execution %s" % job_execution.id, manager.run_job, job_execution) return job_execution
def execute_job(job_id, input_id, output_id, cluster_id, configs): job_ex_dict = { "input_id": input_id, "output_id": output_id, "job_id": job_id, "cluster_id": cluster_id, "info": {"status": "Pending"}, "job_configs": configs, } job_execution = conductor.job_execution_create(context.ctx(), job_ex_dict) context.spawn("Starting Job Execution %s" % job_execution.id, manager.run_job, job_execution) return job_execution
def scale_cluster(id, data): ctx = context.ctx() cluster = conductor.cluster_get(ctx, id) plugin = plugin_base.PLUGINS.get_plugin(cluster.plugin_name) existing_node_groups = data.get('resize_node_groups', []) additional_node_groups = data.get('add_node_groups', []) #the next map is the main object we will work with #to_be_enlarged : {node_group_id: desired_amount_of_instances} to_be_enlarged = {} for ng in existing_node_groups: ng_id = g.find(cluster.node_groups, name=ng['name'])['id'] to_be_enlarged.update({ng_id: ng['count']}) additional = construct_ngs_for_scaling(cluster, additional_node_groups) cluster = conductor.cluster_get(ctx, cluster) # update nodegroup image usernames for nodegroup in cluster.node_groups: if additional.get(nodegroup.id): image_username = INFRA.get_node_group_image_username(nodegroup) conductor.node_group_update(ctx, nodegroup, {"image_username": image_username}) cluster = conductor.cluster_get(ctx, cluster) try: cluster = conductor.cluster_update(ctx, cluster, {"status": "Validating"}) LOG.info(g.format_cluster_status(cluster)) plugin.validate_scaling(cluster, to_be_enlarged, additional) except Exception: with excutils.save_and_reraise_exception(): g.clean_cluster_from_empty_ng(cluster) cluster = conductor.cluster_update(ctx, cluster, {"status": "Active"}) LOG.info(g.format_cluster_status(cluster)) # If we are here validation is successful. # So let's update to_be_enlarged map: to_be_enlarged.update(additional) for node_group in cluster.node_groups: if node_group.id not in to_be_enlarged: to_be_enlarged[node_group.id] = node_group.count context.spawn("cluster-scaling-%s" % id, _provision_scaled_cluster, id, to_be_enlarged) return conductor.cluster_get(ctx, id)
def scale_cluster(id, data): ctx = context.ctx() cluster = conductor.cluster_get(ctx, id) plugin = plugin_base.PLUGINS.get_plugin(cluster.plugin_name) existing_node_groups = data.get('resize_node_groups', []) additional_node_groups = data.get('add_node_groups', []) #the next map is the main object we will work with #to_be_enlarged : {node_group_id: desired_amount_of_instances} to_be_enlarged = {} for ng in existing_node_groups: ng_id = g.find(cluster.node_groups, name=ng['name'])['id'] to_be_enlarged.update({ng_id: ng['count']}) additional = construct_ngs_for_scaling(cluster, additional_node_groups) cluster = conductor.cluster_get(ctx, cluster) # update nodegroup image usernames for nodegroup in cluster.node_groups: if additional.get(nodegroup.id): image_username = INFRA.get_node_group_image_username(nodegroup) conductor.node_group_update( ctx, nodegroup, {"image_username": image_username}) cluster = conductor.cluster_get(ctx, cluster) try: cluster = conductor.cluster_update(ctx, cluster, {"status": "Validating"}) LOG.info(g.format_cluster_status(cluster)) plugin.validate_scaling(cluster, to_be_enlarged, additional) except Exception: with excutils.save_and_reraise_exception(): INFRA.clean_cluster_from_empty_ng(cluster) cluster = conductor.cluster_update(ctx, cluster, {"status": "Active"}) LOG.info(g.format_cluster_status(cluster)) # If we are here validation is successful. # So let's update to_be_enlarged map: to_be_enlarged.update(additional) for node_group in cluster.node_groups: if node_group.id not in to_be_enlarged: to_be_enlarged[node_group.id] = node_group.count context.spawn("cluster-scaling-%s" % id, _provision_scaled_cluster, id, to_be_enlarged) return conductor.cluster_get(ctx, id)
def create_cluster(values): cluster = s.create_cluster(values) plugin = plugin_base.PLUGINS.get_plugin(cluster.plugin_name) # validating cluster try: context.model_update(cluster, status="Validating") plugin.validate(cluster) except Exception as ex: with excutils.save_and_reraise_exception(): context.model_update(cluster, status="Error", status_description=str(ex)) context.spawn(_provision_cluster, cluster.id) return cluster
def execute_job(job_id, input_id, output_id, cluster_id, configs): job_ex_dict = { 'input_id': input_id, 'output_id': output_id, 'job_id': job_id, 'cluster_id': cluster_id, 'info': { 'status': 'Pending' }, 'job_configs': configs } job_execution = conductor.job_execution_create(context.ctx(), job_ex_dict) context.spawn("Starting Job Execution %s" % job_execution.id, manager.run_job, job_execution) return job_execution
def execute_job(job_id, data): # Elements common to all job types cluster_id = data['cluster_id'] configs = data.get('job_configs', {}) # Squash args if it is a dict. # TODO(tmckay): remove this after bug #1269968 is fixed on the UI side # (tracked in bug #1270882) if "args" in configs and type(configs["args"]) is dict: configs["args"] = [] ctx = context.current() cluster = conductor.cluster_get(ctx, cluster_id) plugin = plugin_base.PLUGINS.get_plugin(cluster.plugin_name) instance = plugin.get_oozie_server(cluster) extra = {} info = None if CONF.use_namespaces and not CONF.use_floating_ips: info = instance.remote().get_neutron_info() extra['neutron'] = info # Not in Java job types but present for all others input_id = data.get('input_id', None) output_id = data.get('output_id', None) # Present for Java job types main_class = data.get('main_class', '') java_opts = data.get('java_opts', '') # Since we will use a unified class in the database, we pass # a superset for all job types job_ex_dict = {'main_class': main_class, 'java_opts': java_opts, 'input_id': input_id, 'output_id': output_id, 'job_id': job_id, 'cluster_id': cluster_id, 'info': {'status': 'Pending'}, 'job_configs': configs, 'extra': extra} job_execution = conductor.job_execution_create(context.ctx(), job_ex_dict) context.spawn("Starting Job Execution %s" % job_execution.id, manager.run_job, job_execution) return job_execution
def _spawn(self, func, args): context.spawn(func, args)
def _spawn(self, description, func, *args, **kwargs): context.spawn(description, func, *args, **kwargs)
def _spawn(self, description, func, args): context.spawn(description, func, args)