Exemple #1
0
def execute_job(job_id, data):

    # Elements common to all job types
    cluster_id = data['cluster_id']
    configs = data.get('job_configs', {})

    ctx = context.current()
    cluster = conductor.cluster_get(ctx, cluster_id)
    plugin = plugin_base.PLUGINS.get_plugin(cluster.plugin_name)
    instance = plugin.get_oozie_server(cluster)

    extra = {}
    info = None
    if CONF.use_namespaces and not CONF.use_floating_ips:
        info = instance.remote().get_neutron_info()
        extra['neutron'] = info

    # Not in Java job types but present for all others
    input_id = data.get('input_id', None)
    output_id = data.get('output_id', None)

    # Since we will use a unified class in the database, we pass
    # a superset for all job types
    job_ex_dict = {'input_id': input_id, 'output_id': output_id,
                   'job_id': job_id, 'cluster_id': cluster_id,
                   'info': {'status': 'Pending'}, 'job_configs': configs,
                   'extra': extra}
    job_execution = conductor.job_execution_create(context.ctx(), job_ex_dict)

    context.spawn("Starting Job Execution %s" % job_execution.id,
                  manager.run_job, job_execution)
    return job_execution
Exemple #2
0
def create_cluster(values):
    ctx = context.ctx()
    cluster = conductor.cluster_create(ctx, values)
    plugin = plugin_base.PLUGINS.get_plugin(cluster.plugin_name)

    # update nodegroup image usernames
    for nodegroup in cluster.node_groups:
        conductor.node_group_update(
            ctx, nodegroup,
            {"image_username": INFRA.get_node_group_image_username(nodegroup)})
    cluster = conductor.cluster_get(ctx, cluster)

    # validating cluster
    try:
        cluster = conductor.cluster_update(ctx, cluster,
                                           {"status": "Validating"})
        LOG.info(g.format_cluster_status(cluster))

        plugin.validate(cluster)
    except Exception as e:
        with excutils.save_and_reraise_exception():
            cluster = conductor.cluster_update(ctx, cluster,
                                               {"status": "Error",
                                                "status_description": str(e)})
            LOG.info(g.format_cluster_status(cluster))

    context.spawn("cluster-creating-%s" % cluster.id,
                  _provision_cluster, cluster.id)
    if CONF.use_identity_api_v3 and cluster.is_transient:
        trusts.create_trust(cluster)

    return conductor.cluster_get(ctx, cluster.id)
Exemple #3
0
def create_cluster(values):
    ctx = context.ctx()
    cluster = conductor.cluster_create(ctx, values)
    plugin = plugin_base.PLUGINS.get_plugin(cluster.plugin_name)

    # update nodegroup image usernames
    for nodegroup in cluster.node_groups:
        conductor.node_group_update(
            ctx, nodegroup,
            {"image_username": INFRA.get_node_group_image_username(nodegroup)})
    cluster = conductor.cluster_get(ctx, cluster)

    # validating cluster
    try:
        cluster = conductor.cluster_update(ctx, cluster,
                                           {"status": "Validating"})
        LOG.info(g.format_cluster_status(cluster))

        plugin.validate(cluster)
    except Exception as e:
        with excutils.save_and_reraise_exception():
            cluster = conductor.cluster_update(ctx, cluster, {
                "status": "Error",
                "status_description": str(e)
            })
            LOG.info(g.format_cluster_status(cluster))

    context.spawn("cluster-creating-%s" % cluster.id, _provision_cluster,
                  cluster.id)
    if CONF.use_identity_api_v3 and cluster.is_transient:
        trusts.create_trust(cluster)

    return conductor.cluster_get(ctx, cluster.id)
Exemple #4
0
 def provision_scaled_cluster(self,
                              cluster_id,
                              node_group_id_map,
                              node_group_instance_map=None):
     context.spawn("cluster-scaling-%s" % cluster_id,
                   _provision_scaled_cluster, cluster_id, node_group_id_map,
                   node_group_instance_map)
Exemple #5
0
def scale_cluster(id, data):
    ctx = context.ctx()

    cluster = conductor.cluster_get(ctx, id)
    plugin = plugin_base.PLUGINS.get_plugin(cluster.plugin_name)
    existing_node_groups = data.get('resize_node_groups', [])
    additional_node_groups = data.get('add_node_groups', [])

    #the next map is the main object we will work with
    #to_be_enlarged : {node_group_id: desired_amount_of_instances}
    to_be_enlarged = {}
    for ng in existing_node_groups:
        ng_id = g.find(cluster.node_groups, name=ng['name'])['id']
        to_be_enlarged.update({ng_id: ng['count']})

    additional = construct_ngs_for_scaling(cluster, additional_node_groups)
    cluster = conductor.cluster_get(ctx, cluster)

    # update nodegroup image usernames
    for nodegroup in cluster.node_groups:
        if additional.get(nodegroup.id):
            image_username = INFRA.get_node_group_image_username(nodegroup)
            conductor.node_group_update(
                ctx, nodegroup, {"image_username": image_username})
    cluster = conductor.cluster_get(ctx, cluster)

    try:
        cluster = conductor.cluster_update(ctx, cluster,
                                           {"status": "Validating"})
        LOG.info(g.format_cluster_status(cluster))
        plugin.validate_scaling(cluster, to_be_enlarged, additional)
    except Exception:
        with excutils.save_and_reraise_exception():
            g.clean_cluster_from_empty_ng(cluster)
            cluster = conductor.cluster_update(ctx, cluster,
                                               {"status": "Active"})
            LOG.info(g.format_cluster_status(cluster))

    # If we are here validation is successful.
    # So let's update to_be_enlarged map:
    to_be_enlarged.update(additional)

    for node_group in cluster.node_groups:
        if node_group.id not in to_be_enlarged:
            to_be_enlarged[node_group.id] = node_group.count

    context.spawn("cluster-scaling-%s" % id,
                  _provision_scaled_cluster, id, to_be_enlarged)
    return conductor.cluster_get(ctx, id)
Exemple #6
0
def scale_cluster(id, data):
    ctx = context.ctx()

    cluster = conductor.cluster_get(ctx, id)
    plugin = plugin_base.PLUGINS.get_plugin(cluster.plugin_name)
    existing_node_groups = data.get('resize_node_groups', [])
    additional_node_groups = data.get('add_node_groups', [])

    #the next map is the main object we will work with
    #to_be_enlarged : {node_group_id: desired_amount_of_instances}
    to_be_enlarged = {}
    for ng in existing_node_groups:
        ng_id = g.find(cluster.node_groups, name=ng['name'])['id']
        to_be_enlarged.update({ng_id: ng['count']})

    additional = construct_ngs_for_scaling(cluster, additional_node_groups)
    cluster = conductor.cluster_get(ctx, cluster)

    # update nodegroup image usernames
    for nodegroup in cluster.node_groups:
        if additional.get(nodegroup.id):
            image_username = INFRA.get_node_group_image_username(nodegroup)
            conductor.node_group_update(ctx, nodegroup,
                                        {"image_username": image_username})
    cluster = conductor.cluster_get(ctx, cluster)

    try:
        cluster = conductor.cluster_update(ctx, cluster,
                                           {"status": "Validating"})
        LOG.info(g.format_cluster_status(cluster))
        plugin.validate_scaling(cluster, to_be_enlarged, additional)
    except Exception:
        with excutils.save_and_reraise_exception():
            g.clean_cluster_from_empty_ng(cluster)
            cluster = conductor.cluster_update(ctx, cluster,
                                               {"status": "Active"})
            LOG.info(g.format_cluster_status(cluster))

    # If we are here validation is successful.
    # So let's update to_be_enlarged map:
    to_be_enlarged.update(additional)

    for node_group in cluster.node_groups:
        if node_group.id not in to_be_enlarged:
            to_be_enlarged[node_group.id] = node_group.count

    context.spawn("cluster-scaling-%s" % id, _provision_scaled_cluster, id,
                  to_be_enlarged)
    return conductor.cluster_get(ctx, id)
Exemple #7
0
def execute_job(job_id, data):

    # Elements common to all job types
    cluster_id = data['cluster_id']
    configs = data.get('job_configs', {})

    ctx = context.current()
    cluster = conductor.cluster_get(ctx, cluster_id)
    plugin = plugin_base.PLUGINS.get_plugin(cluster.plugin_name)
    instance = plugin.get_oozie_server(cluster)

    extra = {}
    info = None
    if CONF.use_namespaces and not CONF.use_floating_ips:
        info = instance.remote().get_neutron_info()
        extra['neutron'] = info

    # Not in Java job types but present for all others
    input_id = data.get('input_id', None)
    output_id = data.get('output_id', None)

    # Since we will use a unified class in the database, we pass
    # a superset for all job types
    job_ex_dict = {
        'input_id': input_id,
        'output_id': output_id,
        'job_id': job_id,
        'cluster_id': cluster_id,
        'info': {
            'status': 'Pending'
        },
        'job_configs': configs,
        'extra': extra
    }
    job_execution = conductor.job_execution_create(context.ctx(), job_ex_dict)

    context.spawn("Starting Job Execution %s" % job_execution.id,
                  manager.run_job, job_execution)
    return job_execution
Exemple #8
0
 def handle_verification(self, cluster_id, values):
     context.spawn('Handling Verification for cluster %s' % cluster_id,
                   _handle_verification, cluster_id, values)
Exemple #9
0
 def delete_job_execution(self, job_execution_id):
     context.spawn("Deleting Job Execution %s" % job_execution_id,
                   _delete_job_execution, job_execution_id)
Exemple #10
0
 def terminate_cluster(self, cluster_id, force=False):
     context.spawn("cluster-terminating-%s" % cluster_id, terminate_cluster,
                   cluster_id, force)
Exemple #11
0
 def job_execution_suspend(self, job_execution_id):
     context.spawn("Suspend Job Execution %s" % job_execution_id,
                   _suspend_job_execution, job_execution_id)
Exemple #12
0
 def delete_job_execution(self, job_execution_id):
     context.spawn("Deleting Job Execution %s" % job_execution_id,
                   _delete_job_execution, job_execution_id)
Exemple #13
0
 def run_edp_job(self, job_execution_id):
     context.spawn("Starting Job Execution %s" % job_execution_id,
                   _run_edp_job, job_execution_id)
Exemple #14
0
 def provision_scaled_cluster(self, cluster_id, node_group_id_map,
                              node_group_instance_map=None):
     context.spawn("cluster-scaling-%s" % cluster_id,
                   _provision_scaled_cluster, cluster_id, node_group_id_map,
                   node_group_instance_map)
Exemple #15
0
 def _spawn(self, description, func, *args, **kwargs):
     context.spawn(description, func, *args, **kwargs)
Exemple #16
0
 def job_execution_suspend(self, job_execution_id):
     context.spawn("Suspend Job Execution %s" % job_execution_id,
                   _suspend_job_execution, job_execution_id)
Exemple #17
0
 def provision_cluster(self, cluster_id):
     context.spawn("cluster-creating-%s" % cluster_id,
                   _provision_cluster, cluster_id)
Exemple #18
0
 def terminate_cluster(self, cluster_id):
     context.spawn("cluster-terminating-%s" % cluster_id,
                   _terminate_cluster, cluster_id)
Exemple #19
0
 def terminate_cluster(self, cluster_id, force=False):
     context.spawn("cluster-terminating-%s" % cluster_id,
                   terminate_cluster, cluster_id, force)
Exemple #20
0
 def provision_cluster(self, cluster_id):
     context.spawn("cluster-creating-%s" % cluster_id, _provision_cluster,
                   cluster_id)
Exemple #21
0
 def cancel_job_execution(self, job_execution_id):
     context.spawn("Canceling Job Execution %s" % job_execution_id,
                   _cancel_job_execution, job_execution_id)
Exemple #22
0
 def terminate_cluster(self, cluster_id):
     context.spawn("cluster-terminating-%s" % cluster_id, terminate_cluster,
                   cluster_id)
Exemple #23
0
 def handle_verification(self, cluster_id, values):
     context.spawn('Handling Verification for cluster %s' % cluster_id,
                   _handle_verification, cluster_id, values)
Exemple #24
0
 def run_edp_job(self, job_execution_id):
     context.spawn("Starting Job Execution %s" % job_execution_id,
                   _run_edp_job, job_execution_id)
Exemple #25
0
 def _spawn(self, description, func, *args, **kwargs):
     context.spawn(description, func, *args, **kwargs)
Exemple #26
0
 def cancel_job_execution(self, job_execution_id):
     context.spawn("Canceling Job Execution %s" % job_execution_id,
                   _cancel_job_execution, job_execution_id)