示例#1
0
def j_e_i_wrapper(data):
    job = mock.Mock(
        interface=[int_arg(**_configs()),
                   int_arg(**_args()),
                   int_arg(**_params())]
    )
    j_i.check_execution_interface(data, job)
示例#2
0
def j_e_i_wrapper(data):
    job = mock.Mock(interface=[
        int_arg(**_configs()),
        int_arg(**_args()),
        int_arg(**_params())
    ])
    j_i.check_execution_interface(data, job)
示例#3
0
def check_job_execution(data, job_id):
    ctx = context.ctx()
    job_execution_info = data.get('job_execution_info', {})

    cluster = conductor.cluster_get(ctx, data['cluster_id'])
    if not cluster:
        raise ex.InvalidReferenceException(
            _("Cluster with id '%s' doesn't exist") % data['cluster_id'])

    val_base.check_plugin_labels(cluster.plugin_name, cluster.hadoop_version)
    job = conductor.job_get(ctx, job_id)

    plugin = plugin_base.PLUGINS.get_plugin(cluster.plugin_name)
    edp_engine = plugin.get_edp_engine(cluster, job.type)
    if not edp_engine:
        raise ex.InvalidReferenceException(
            _("Cluster with id '%(cluster_id)s' doesn't support job type "
              "'%(job_type)s'") % {
                  "cluster_id": cluster.id,
                  "job_type": job.type
              })

    j_i.check_execution_interface(data, job)
    edp_engine.validate_job_execution(cluster, job, data)

    if 'job_execution_type' in job_execution_info:
        j_type = job_execution_info.get('job_execution_type', 'workflow')
        if j_type == 'scheduled':
            check_scheduled_job_execution_info(job_execution_info)
示例#4
0
def check_job_execution(data, job_id):
    ctx = context.ctx()
    job_execution_info = data.get('job_execution_info', {})

    cluster = conductor.cluster_get(ctx, data['cluster_id'])
    if not cluster:
        raise ex.InvalidReferenceException(
            _("Cluster with id '%s' doesn't exist") % data['cluster_id'])

    val_base.check_plugin_labels(cluster.plugin_name, cluster.hadoop_version)
    job = conductor.job_get(ctx, job_id)

    plugin = plugin_base.PLUGINS.get_plugin(cluster.plugin_name)
    edp_engine = plugin.get_edp_engine(cluster, job.type)
    if not edp_engine:
        raise ex.InvalidReferenceException(
            _("Cluster with id '%(cluster_id)s' doesn't support job type "
              "'%(job_type)s'") % {"cluster_id": cluster.id,
                                   "job_type": job.type})

    j_i.check_execution_interface(data, job)
    edp_engine.validate_job_execution(cluster, job, data)

    if 'job_execution_type' in job_execution_info:
        j_type = job_execution_info.get('job_execution_type', 'workflow')
        if j_type == 'scheduled':
            check_scheduled_job_execution_info(job_execution_info)
示例#5
0
def check_job_execution(data, job_id):
    ctx = context.ctx()

    cluster = conductor.cluster_get(ctx, data['cluster_id'])
    if not cluster:
        raise ex.InvalidReferenceException(
            _("Cluster with id '%s' doesn't exist") % data['cluster_id'])

    job = conductor.job_get(ctx, job_id)

    plugin = plugin_base.PLUGINS.get_plugin(cluster.plugin_name)
    edp_engine = plugin.get_edp_engine(cluster, job.type)
    if not edp_engine:
        raise ex.InvalidReferenceException(
            _("Cluster with id '%(cluster_id)s' doesn't support job type "
              "'%(job_type)s'") % {"cluster_id": cluster.id,
                                   "job_type": job.type})

    j_i.check_execution_interface(data, job)
    edp_engine.validate_job_execution(cluster, job, data)